]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9-3.2.11-201203131840.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-3.2.11-201203131840.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index dfa6fc6..0aa3907 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -5,6 +5,7 @@
6 *.cis
7 *.cpio
8 *.csp
9 +*.dbg
10 *.dsp
11 *.dvi
12 *.elf
13 @@ -14,6 +15,7 @@
14 *.gcov
15 *.gen.S
16 *.gif
17 +*.gmo
18 *.grep
19 *.grp
20 *.gz
21 @@ -48,9 +50,11 @@
22 *.tab.h
23 *.tex
24 *.ver
25 +*.vim
26 *.xml
27 *.xz
28 *_MODULES
29 +*_reg_safe.h
30 *_vga16.c
31 *~
32 \#*#
33 @@ -70,6 +74,7 @@ Kerntypes
34 Module.markers
35 Module.symvers
36 PENDING
37 +PERF*
38 SCCS
39 System.map*
40 TAGS
41 @@ -93,19 +98,24 @@ bounds.h
42 bsetup
43 btfixupprep
44 build
45 +builtin-policy.h
46 bvmlinux
47 bzImage*
48 capability_names.h
49 capflags.c
50 classlist.h*
51 +clut_vga16.c
52 +common-cmds.h
53 comp*.log
54 compile.h*
55 conf
56 config
57 config-*
58 config_data.h*
59 +config.c
60 config.mak
61 config.mak.autogen
62 +config.tmp
63 conmakehash
64 consolemap_deftbl.c*
65 cpustr.h
66 @@ -119,6 +129,7 @@ dslm
67 elf2ecoff
68 elfconfig.h*
69 evergreen_reg_safe.h
70 +exception_policy.conf
71 fixdep
72 flask.h
73 fore200e_mkfirm
74 @@ -126,12 +137,15 @@ fore200e_pca_fw.c*
75 gconf
76 gconf.glade.h
77 gen-devlist
78 +gen-kdb_cmds.c
79 gen_crc32table
80 gen_init_cpio
81 generated
82 genheaders
83 genksyms
84 *_gray256.c
85 +hash
86 +hid-example
87 hpet_example
88 hugepage-mmap
89 hugepage-shm
90 @@ -146,7 +160,7 @@ int32.c
91 int4.c
92 int8.c
93 kallsyms
94 -kconfig
95 +kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99 @@ -154,7 +168,7 @@ kxgettext
100 lkc_defs.h
101 lex.c
102 lex.*.c
103 -linux
104 +lib1funcs.S
105 logo_*.c
106 logo_*_clut224.c
107 logo_*_mono.c
108 @@ -166,14 +180,15 @@ machtypes.h
109 map
110 map_hugetlb
111 maui_boot.h
112 -media
113 mconf
114 +mdp
115 miboot*
116 mk_elfconfig
117 mkboot
118 mkbugboot
119 mkcpustr
120 mkdep
121 +mkpiggy
122 mkprep
123 mkregtable
124 mktables
125 @@ -209,6 +224,7 @@ r300_reg_safe.h
126 r420_reg_safe.h
127 r600_reg_safe.h
128 recordmcount
129 +regdb.c
130 relocs
131 rlim_names.h
132 rn50_reg_safe.h
133 @@ -219,6 +235,7 @@ setup
134 setup.bin
135 setup.elf
136 sImage
137 +slabinfo
138 sm_tbl*
139 split-include
140 syscalltab.h
141 @@ -229,6 +246,7 @@ tftpboot.img
142 timeconst.h
143 times.h*
144 trix_boot.h
145 +user_constants.h
146 utsrelease.h*
147 vdso-syms.lds
148 vdso.lds
149 @@ -246,7 +264,9 @@ vmlinux
150 vmlinux-*
151 vmlinux.aout
152 vmlinux.bin.all
153 +vmlinux.bin.bz2
154 vmlinux.lds
155 +vmlinux.relocs
156 vmlinuz
157 voffset.h
158 vsyscall.lds
159 @@ -254,9 +274,11 @@ vsyscall_32.lds
160 wanxlfw.inc
161 uImage
162 unifdef
163 +utsrelease.h
164 wakeup.bin
165 wakeup.elf
166 wakeup.lds
167 zImage*
168 zconf.hash.c
169 +zconf.lex.c
170 zoffset.h
171 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
172 index 81c287f..d456d02 100644
173 --- a/Documentation/kernel-parameters.txt
174 +++ b/Documentation/kernel-parameters.txt
175 @@ -1935,6 +1935,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
176 the specified number of seconds. This is to be used if
177 your oopses keep scrolling off the screen.
178
179 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
180 + virtualization environments that don't cope well with the
181 + expand down segment used by UDEREF on X86-32 or the frequent
182 + page table updates on X86-64.
183 +
184 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
185 +
186 pcbit= [HW,ISDN]
187
188 pcd. [PARIDE]
189 diff --git a/Makefile b/Makefile
190 index 4b76371..53aa79c 100644
191 --- a/Makefile
192 +++ b/Makefile
193 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
194
195 HOSTCC = gcc
196 HOSTCXX = g++
197 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
198 -HOSTCXXFLAGS = -O2
199 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
200 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
201 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
202
203 # Decide whether to build built-in, modular, or both.
204 # Normally, just do built-in.
205 @@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
206 # Rules shared between *config targets and build targets
207
208 # Basic helpers built in scripts/
209 -PHONY += scripts_basic
210 -scripts_basic:
211 +PHONY += scripts_basic gcc-plugins
212 +scripts_basic: gcc-plugins
213 $(Q)$(MAKE) $(build)=scripts/basic
214 $(Q)rm -f .tmp_quiet_recordmcount
215
216 @@ -564,6 +565,50 @@ else
217 KBUILD_CFLAGS += -O2
218 endif
219
220 +ifndef DISABLE_PAX_PLUGINS
221 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
222 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
223 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
224 +endif
225 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
226 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
227 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
228 +endif
229 +ifdef CONFIG_KALLOCSTAT_PLUGIN
230 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
231 +endif
232 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
233 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
234 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
235 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
236 +endif
237 +ifdef CONFIG_CHECKER_PLUGIN
238 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
239 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
240 +endif
241 +endif
242 +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
243 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
244 +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
245 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
246 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
247 +ifeq ($(KBUILD_EXTMOD),)
248 +gcc-plugins:
249 + $(Q)$(MAKE) $(build)=tools/gcc
250 +else
251 +gcc-plugins: ;
252 +endif
253 +else
254 +gcc-plugins:
255 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
256 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
257 +else
258 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
259 +endif
260 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
261 +endif
262 +endif
263 +
264 include $(srctree)/arch/$(SRCARCH)/Makefile
265
266 ifneq ($(CONFIG_FRAME_WARN),0)
267 @@ -708,7 +753,7 @@ export mod_strip_cmd
268
269
270 ifeq ($(KBUILD_EXTMOD),)
271 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
272 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
273
274 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
275 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
276 @@ -932,6 +977,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
277
278 # The actual objects are generated when descending,
279 # make sure no implicit rule kicks in
280 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
281 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
282 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
283
284 # Handle descending into subdirectories listed in $(vmlinux-dirs)
285 @@ -941,7 +988,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
286 # Error messages still appears in the original language
287
288 PHONY += $(vmlinux-dirs)
289 -$(vmlinux-dirs): prepare scripts
290 +$(vmlinux-dirs): gcc-plugins prepare scripts
291 $(Q)$(MAKE) $(build)=$@
292
293 # Store (new) KERNELRELASE string in include/config/kernel.release
294 @@ -985,6 +1032,7 @@ prepare0: archprepare FORCE
295 $(Q)$(MAKE) $(build)=.
296
297 # All the preparing..
298 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
299 prepare: prepare0
300
301 # Generate some files
302 @@ -1086,6 +1134,8 @@ all: modules
303 # using awk while concatenating to the final file.
304
305 PHONY += modules
306 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
307 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
308 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
309 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
310 @$(kecho) ' Building modules, stage 2.';
311 @@ -1101,7 +1151,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
312
313 # Target to prepare building external modules
314 PHONY += modules_prepare
315 -modules_prepare: prepare scripts
316 +modules_prepare: gcc-plugins prepare scripts
317
318 # Target to install modules
319 PHONY += modules_install
320 @@ -1198,6 +1248,7 @@ distclean: mrproper
321 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
322 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
323 -o -name '.*.rej' \
324 + -o -name '.*.rej' -o -name '*.so' \
325 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
326 -type f -print | xargs rm -f
327
328 @@ -1358,6 +1409,8 @@ PHONY += $(module-dirs) modules
329 $(module-dirs): crmodverdir $(objtree)/Module.symvers
330 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
331
332 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
333 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
334 modules: $(module-dirs)
335 @$(kecho) ' Building modules, stage 2.';
336 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
337 @@ -1484,17 +1537,21 @@ else
338 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
339 endif
340
341 -%.s: %.c prepare scripts FORCE
342 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
343 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
344 +%.s: %.c gcc-plugins prepare scripts FORCE
345 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
346 %.i: %.c prepare scripts FORCE
347 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
348 -%.o: %.c prepare scripts FORCE
349 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
350 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
351 +%.o: %.c gcc-plugins prepare scripts FORCE
352 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
353 %.lst: %.c prepare scripts FORCE
354 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
355 -%.s: %.S prepare scripts FORCE
356 +%.s: %.S gcc-plugins prepare scripts FORCE
357 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
358 -%.o: %.S prepare scripts FORCE
359 +%.o: %.S gcc-plugins prepare scripts FORCE
360 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
361 %.symtypes: %.c prepare scripts FORCE
362 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
363 @@ -1504,11 +1561,15 @@ endif
364 $(cmd_crmodverdir)
365 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
366 $(build)=$(build-dir)
367 -%/: prepare scripts FORCE
368 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
369 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
370 +%/: gcc-plugins prepare scripts FORCE
371 $(cmd_crmodverdir)
372 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
373 $(build)=$(build-dir)
374 -%.ko: prepare scripts FORCE
375 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
376 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
377 +%.ko: gcc-plugins prepare scripts FORCE
378 $(cmd_crmodverdir)
379 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
380 $(build)=$(build-dir) $(@:.ko=.o)
381 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
382 index 640f909..48b6597 100644
383 --- a/arch/alpha/include/asm/atomic.h
384 +++ b/arch/alpha/include/asm/atomic.h
385 @@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
386 #define atomic_dec(v) atomic_sub(1,(v))
387 #define atomic64_dec(v) atomic64_sub(1,(v))
388
389 +#define atomic64_read_unchecked(v) atomic64_read(v)
390 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
391 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
392 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
393 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
394 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
395 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
396 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
397 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
398 +
399 #define smp_mb__before_atomic_dec() smp_mb()
400 #define smp_mb__after_atomic_dec() smp_mb()
401 #define smp_mb__before_atomic_inc() smp_mb()
402 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
403 index ad368a9..fbe0f25 100644
404 --- a/arch/alpha/include/asm/cache.h
405 +++ b/arch/alpha/include/asm/cache.h
406 @@ -4,19 +4,19 @@
407 #ifndef __ARCH_ALPHA_CACHE_H
408 #define __ARCH_ALPHA_CACHE_H
409
410 +#include <linux/const.h>
411
412 /* Bytes per L1 (data) cache line. */
413 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
414 -# define L1_CACHE_BYTES 64
415 # define L1_CACHE_SHIFT 6
416 #else
417 /* Both EV4 and EV5 are write-through, read-allocate,
418 direct-mapped, physical.
419 */
420 -# define L1_CACHE_BYTES 32
421 # define L1_CACHE_SHIFT 5
422 #endif
423
424 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
425 #define SMP_CACHE_BYTES L1_CACHE_BYTES
426
427 #endif
428 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
429 index da5449e..7418343 100644
430 --- a/arch/alpha/include/asm/elf.h
431 +++ b/arch/alpha/include/asm/elf.h
432 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
433
434 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
435
436 +#ifdef CONFIG_PAX_ASLR
437 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
438 +
439 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
440 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
441 +#endif
442 +
443 /* $0 is set by ld.so to a pointer to a function which might be
444 registered using atexit. This provides a mean for the dynamic
445 linker to call DT_FINI functions for shared libraries that have
446 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
447 index de98a73..bd4f1f8 100644
448 --- a/arch/alpha/include/asm/pgtable.h
449 +++ b/arch/alpha/include/asm/pgtable.h
450 @@ -101,6 +101,17 @@ struct vm_area_struct;
451 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
452 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
453 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
454 +
455 +#ifdef CONFIG_PAX_PAGEEXEC
456 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
457 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
458 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
459 +#else
460 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
461 +# define PAGE_COPY_NOEXEC PAGE_COPY
462 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
463 +#endif
464 +
465 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
466
467 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
468 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
469 index 2fd00b7..cfd5069 100644
470 --- a/arch/alpha/kernel/module.c
471 +++ b/arch/alpha/kernel/module.c
472 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
473
474 /* The small sections were sorted to the end of the segment.
475 The following should definitely cover them. */
476 - gp = (u64)me->module_core + me->core_size - 0x8000;
477 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
478 got = sechdrs[me->arch.gotsecindex].sh_addr;
479
480 for (i = 0; i < n; i++) {
481 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
482 index 01e8715..be0e80f 100644
483 --- a/arch/alpha/kernel/osf_sys.c
484 +++ b/arch/alpha/kernel/osf_sys.c
485 @@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
486 /* At this point: (!vma || addr < vma->vm_end). */
487 if (limit - len < addr)
488 return -ENOMEM;
489 - if (!vma || addr + len <= vma->vm_start)
490 + if (check_heap_stack_gap(vma, addr, len))
491 return addr;
492 addr = vma->vm_end;
493 vma = vma->vm_next;
494 @@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
495 merely specific addresses, but regions of memory -- perhaps
496 this feature should be incorporated into all ports? */
497
498 +#ifdef CONFIG_PAX_RANDMMAP
499 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
500 +#endif
501 +
502 if (addr) {
503 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
504 if (addr != (unsigned long) -ENOMEM)
505 @@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
506 }
507
508 /* Next, try allocating at TASK_UNMAPPED_BASE. */
509 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
510 - len, limit);
511 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
512 +
513 if (addr != (unsigned long) -ENOMEM)
514 return addr;
515
516 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
517 index fadd5f8..904e73a 100644
518 --- a/arch/alpha/mm/fault.c
519 +++ b/arch/alpha/mm/fault.c
520 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
521 __reload_thread(pcb);
522 }
523
524 +#ifdef CONFIG_PAX_PAGEEXEC
525 +/*
526 + * PaX: decide what to do with offenders (regs->pc = fault address)
527 + *
528 + * returns 1 when task should be killed
529 + * 2 when patched PLT trampoline was detected
530 + * 3 when unpatched PLT trampoline was detected
531 + */
532 +static int pax_handle_fetch_fault(struct pt_regs *regs)
533 +{
534 +
535 +#ifdef CONFIG_PAX_EMUPLT
536 + int err;
537 +
538 + do { /* PaX: patched PLT emulation #1 */
539 + unsigned int ldah, ldq, jmp;
540 +
541 + err = get_user(ldah, (unsigned int *)regs->pc);
542 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
543 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
544 +
545 + if (err)
546 + break;
547 +
548 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
549 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
550 + jmp == 0x6BFB0000U)
551 + {
552 + unsigned long r27, addr;
553 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
554 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
555 +
556 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
557 + err = get_user(r27, (unsigned long *)addr);
558 + if (err)
559 + break;
560 +
561 + regs->r27 = r27;
562 + regs->pc = r27;
563 + return 2;
564 + }
565 + } while (0);
566 +
567 + do { /* PaX: patched PLT emulation #2 */
568 + unsigned int ldah, lda, br;
569 +
570 + err = get_user(ldah, (unsigned int *)regs->pc);
571 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
572 + err |= get_user(br, (unsigned int *)(regs->pc+8));
573 +
574 + if (err)
575 + break;
576 +
577 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
578 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
579 + (br & 0xFFE00000U) == 0xC3E00000U)
580 + {
581 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
582 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
583 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
584 +
585 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
586 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
587 + return 2;
588 + }
589 + } while (0);
590 +
591 + do { /* PaX: unpatched PLT emulation */
592 + unsigned int br;
593 +
594 + err = get_user(br, (unsigned int *)regs->pc);
595 +
596 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
597 + unsigned int br2, ldq, nop, jmp;
598 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
599 +
600 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
601 + err = get_user(br2, (unsigned int *)addr);
602 + err |= get_user(ldq, (unsigned int *)(addr+4));
603 + err |= get_user(nop, (unsigned int *)(addr+8));
604 + err |= get_user(jmp, (unsigned int *)(addr+12));
605 + err |= get_user(resolver, (unsigned long *)(addr+16));
606 +
607 + if (err)
608 + break;
609 +
610 + if (br2 == 0xC3600000U &&
611 + ldq == 0xA77B000CU &&
612 + nop == 0x47FF041FU &&
613 + jmp == 0x6B7B0000U)
614 + {
615 + regs->r28 = regs->pc+4;
616 + regs->r27 = addr+16;
617 + regs->pc = resolver;
618 + return 3;
619 + }
620 + }
621 + } while (0);
622 +#endif
623 +
624 + return 1;
625 +}
626 +
627 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
628 +{
629 + unsigned long i;
630 +
631 + printk(KERN_ERR "PAX: bytes at PC: ");
632 + for (i = 0; i < 5; i++) {
633 + unsigned int c;
634 + if (get_user(c, (unsigned int *)pc+i))
635 + printk(KERN_CONT "???????? ");
636 + else
637 + printk(KERN_CONT "%08x ", c);
638 + }
639 + printk("\n");
640 +}
641 +#endif
642
643 /*
644 * This routine handles page faults. It determines the address,
645 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
646 good_area:
647 si_code = SEGV_ACCERR;
648 if (cause < 0) {
649 - if (!(vma->vm_flags & VM_EXEC))
650 + if (!(vma->vm_flags & VM_EXEC)) {
651 +
652 +#ifdef CONFIG_PAX_PAGEEXEC
653 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
654 + goto bad_area;
655 +
656 + up_read(&mm->mmap_sem);
657 + switch (pax_handle_fetch_fault(regs)) {
658 +
659 +#ifdef CONFIG_PAX_EMUPLT
660 + case 2:
661 + case 3:
662 + return;
663 +#endif
664 +
665 + }
666 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
667 + do_group_exit(SIGKILL);
668 +#else
669 goto bad_area;
670 +#endif
671 +
672 + }
673 } else if (!cause) {
674 /* Allow reads even for write-only mappings */
675 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
676 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
677 index 86976d0..683de93 100644
678 --- a/arch/arm/include/asm/atomic.h
679 +++ b/arch/arm/include/asm/atomic.h
680 @@ -15,6 +15,10 @@
681 #include <linux/types.h>
682 #include <asm/system.h>
683
684 +#ifdef CONFIG_GENERIC_ATOMIC64
685 +#include <asm-generic/atomic64.h>
686 +#endif
687 +
688 #define ATOMIC_INIT(i) { (i) }
689
690 #ifdef __KERNEL__
691 @@ -25,7 +29,15 @@
692 * atomic_set() is the clrex or dummy strex done on every exception return.
693 */
694 #define atomic_read(v) (*(volatile int *)&(v)->counter)
695 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
696 +{
697 + return v->counter;
698 +}
699 #define atomic_set(v,i) (((v)->counter) = (i))
700 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
701 +{
702 + v->counter = i;
703 +}
704
705 #if __LINUX_ARM_ARCH__ >= 6
706
707 @@ -40,6 +52,35 @@ static inline void atomic_add(int i, atomic_t *v)
708 int result;
709
710 __asm__ __volatile__("@ atomic_add\n"
711 +"1: ldrex %1, [%3]\n"
712 +" adds %0, %1, %4\n"
713 +
714 +#ifdef CONFIG_PAX_REFCOUNT
715 +" bvc 3f\n"
716 +"2: bkpt 0xf103\n"
717 +"3:\n"
718 +#endif
719 +
720 +" strex %1, %0, [%3]\n"
721 +" teq %1, #0\n"
722 +" bne 1b"
723 +
724 +#ifdef CONFIG_PAX_REFCOUNT
725 +"\n4:\n"
726 + _ASM_EXTABLE(2b, 4b)
727 +#endif
728 +
729 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
730 + : "r" (&v->counter), "Ir" (i)
731 + : "cc");
732 +}
733 +
734 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
735 +{
736 + unsigned long tmp;
737 + int result;
738 +
739 + __asm__ __volatile__("@ atomic_add_unchecked\n"
740 "1: ldrex %0, [%3]\n"
741 " add %0, %0, %4\n"
742 " strex %1, %0, [%3]\n"
743 @@ -58,6 +99,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
744 smp_mb();
745
746 __asm__ __volatile__("@ atomic_add_return\n"
747 +"1: ldrex %1, [%3]\n"
748 +" adds %0, %1, %4\n"
749 +
750 +#ifdef CONFIG_PAX_REFCOUNT
751 +" bvc 3f\n"
752 +" mov %0, %1\n"
753 +"2: bkpt 0xf103\n"
754 +"3:\n"
755 +#endif
756 +
757 +" strex %1, %0, [%3]\n"
758 +" teq %1, #0\n"
759 +" bne 1b"
760 +
761 +#ifdef CONFIG_PAX_REFCOUNT
762 +"\n4:\n"
763 + _ASM_EXTABLE(2b, 4b)
764 +#endif
765 +
766 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
767 + : "r" (&v->counter), "Ir" (i)
768 + : "cc");
769 +
770 + smp_mb();
771 +
772 + return result;
773 +}
774 +
775 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
776 +{
777 + unsigned long tmp;
778 + int result;
779 +
780 + smp_mb();
781 +
782 + __asm__ __volatile__("@ atomic_add_return_unchecked\n"
783 "1: ldrex %0, [%3]\n"
784 " add %0, %0, %4\n"
785 " strex %1, %0, [%3]\n"
786 @@ -78,6 +155,35 @@ static inline void atomic_sub(int i, atomic_t *v)
787 int result;
788
789 __asm__ __volatile__("@ atomic_sub\n"
790 +"1: ldrex %1, [%3]\n"
791 +" subs %0, %1, %4\n"
792 +
793 +#ifdef CONFIG_PAX_REFCOUNT
794 +" bvc 3f\n"
795 +"2: bkpt 0xf103\n"
796 +"3:\n"
797 +#endif
798 +
799 +" strex %1, %0, [%3]\n"
800 +" teq %1, #0\n"
801 +" bne 1b"
802 +
803 +#ifdef CONFIG_PAX_REFCOUNT
804 +"\n4:\n"
805 + _ASM_EXTABLE(2b, 4b)
806 +#endif
807 +
808 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
809 + : "r" (&v->counter), "Ir" (i)
810 + : "cc");
811 +}
812 +
813 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
814 +{
815 + unsigned long tmp;
816 + int result;
817 +
818 + __asm__ __volatile__("@ atomic_sub_unchecked\n"
819 "1: ldrex %0, [%3]\n"
820 " sub %0, %0, %4\n"
821 " strex %1, %0, [%3]\n"
822 @@ -96,11 +202,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
823 smp_mb();
824
825 __asm__ __volatile__("@ atomic_sub_return\n"
826 -"1: ldrex %0, [%3]\n"
827 -" sub %0, %0, %4\n"
828 +"1: ldrex %1, [%3]\n"
829 +" sub %0, %1, %4\n"
830 +
831 +#ifdef CONFIG_PAX_REFCOUNT
832 +" bvc 3f\n"
833 +" mov %0, %1\n"
834 +"2: bkpt 0xf103\n"
835 +"3:\n"
836 +#endif
837 +
838 " strex %1, %0, [%3]\n"
839 " teq %1, #0\n"
840 " bne 1b"
841 +
842 +#ifdef CONFIG_PAX_REFCOUNT
843 +"\n4:\n"
844 + _ASM_EXTABLE(2b, 4b)
845 +#endif
846 +
847 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
848 : "r" (&v->counter), "Ir" (i)
849 : "cc");
850 @@ -132,6 +252,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
851 return oldval;
852 }
853
854 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
855 +{
856 + unsigned long oldval, res;
857 +
858 + smp_mb();
859 +
860 + do {
861 + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
862 + "ldrex %1, [%3]\n"
863 + "mov %0, #0\n"
864 + "teq %1, %4\n"
865 + "strexeq %0, %5, [%3]\n"
866 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
867 + : "r" (&ptr->counter), "Ir" (old), "r" (new)
868 + : "cc");
869 + } while (res);
870 +
871 + smp_mb();
872 +
873 + return oldval;
874 +}
875 +
876 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
877 {
878 unsigned long tmp, tmp2;
879 @@ -207,6 +349,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
880 #endif /* __LINUX_ARM_ARCH__ */
881
882 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
883 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
884 +{
885 + return xchg(&v->counter, new);
886 +}
887
888 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
889 {
890 @@ -219,11 +365,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
891 }
892
893 #define atomic_inc(v) atomic_add(1, v)
894 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
895 +{
896 + atomic_add_unchecked(1, v);
897 +}
898 #define atomic_dec(v) atomic_sub(1, v)
899 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
900 +{
901 + atomic_sub_unchecked(1, v);
902 +}
903
904 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
905 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
906 +{
907 + return atomic_add_return_unchecked(1, v) == 0;
908 +}
909 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
910 #define atomic_inc_return(v) (atomic_add_return(1, v))
911 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
912 +{
913 + return atomic_add_return_unchecked(1, v);
914 +}
915 #define atomic_dec_return(v) (atomic_sub_return(1, v))
916 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
917
918 @@ -239,6 +401,14 @@ typedef struct {
919 u64 __aligned(8) counter;
920 } atomic64_t;
921
922 +#ifdef CONFIG_PAX_REFCOUNT
923 +typedef struct {
924 + u64 __aligned(8) counter;
925 +} atomic64_unchecked_t;
926 +#else
927 +typedef atomic64_t atomic64_unchecked_t;
928 +#endif
929 +
930 #define ATOMIC64_INIT(i) { (i) }
931
932 static inline u64 atomic64_read(atomic64_t *v)
933 @@ -254,6 +424,19 @@ static inline u64 atomic64_read(atomic64_t *v)
934 return result;
935 }
936
937 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
938 +{
939 + u64 result;
940 +
941 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
942 +" ldrexd %0, %H0, [%1]"
943 + : "=&r" (result)
944 + : "r" (&v->counter), "Qo" (v->counter)
945 + );
946 +
947 + return result;
948 +}
949 +
950 static inline void atomic64_set(atomic64_t *v, u64 i)
951 {
952 u64 tmp;
953 @@ -268,6 +451,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
954 : "cc");
955 }
956
957 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
958 +{
959 + u64 tmp;
960 +
961 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
962 +"1: ldrexd %0, %H0, [%2]\n"
963 +" strexd %0, %3, %H3, [%2]\n"
964 +" teq %0, #0\n"
965 +" bne 1b"
966 + : "=&r" (tmp), "=Qo" (v->counter)
967 + : "r" (&v->counter), "r" (i)
968 + : "cc");
969 +}
970 +
971 static inline void atomic64_add(u64 i, atomic64_t *v)
972 {
973 u64 result;
974 @@ -276,6 +473,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
975 __asm__ __volatile__("@ atomic64_add\n"
976 "1: ldrexd %0, %H0, [%3]\n"
977 " adds %0, %0, %4\n"
978 +" adcs %H0, %H0, %H4\n"
979 +
980 +#ifdef CONFIG_PAX_REFCOUNT
981 +" bvc 3f\n"
982 +"2: bkpt 0xf103\n"
983 +"3:\n"
984 +#endif
985 +
986 +" strexd %1, %0, %H0, [%3]\n"
987 +" teq %1, #0\n"
988 +" bne 1b"
989 +
990 +#ifdef CONFIG_PAX_REFCOUNT
991 +"\n4:\n"
992 + _ASM_EXTABLE(2b, 4b)
993 +#endif
994 +
995 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
996 + : "r" (&v->counter), "r" (i)
997 + : "cc");
998 +}
999 +
1000 +static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1001 +{
1002 + u64 result;
1003 + unsigned long tmp;
1004 +
1005 + __asm__ __volatile__("@ atomic64_add_unchecked\n"
1006 +"1: ldrexd %0, %H0, [%3]\n"
1007 +" adds %0, %0, %4\n"
1008 " adc %H0, %H0, %H4\n"
1009 " strexd %1, %0, %H0, [%3]\n"
1010 " teq %1, #0\n"
1011 @@ -287,12 +514,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1012
1013 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1014 {
1015 - u64 result;
1016 - unsigned long tmp;
1017 + u64 result, tmp;
1018
1019 smp_mb();
1020
1021 __asm__ __volatile__("@ atomic64_add_return\n"
1022 +"1: ldrexd %1, %H1, [%3]\n"
1023 +" adds %0, %1, %4\n"
1024 +" adcs %H0, %H1, %H4\n"
1025 +
1026 +#ifdef CONFIG_PAX_REFCOUNT
1027 +" bvc 3f\n"
1028 +" mov %0, %1\n"
1029 +" mov %H0, %H1\n"
1030 +"2: bkpt 0xf103\n"
1031 +"3:\n"
1032 +#endif
1033 +
1034 +" strexd %1, %0, %H0, [%3]\n"
1035 +" teq %1, #0\n"
1036 +" bne 1b"
1037 +
1038 +#ifdef CONFIG_PAX_REFCOUNT
1039 +"\n4:\n"
1040 + _ASM_EXTABLE(2b, 4b)
1041 +#endif
1042 +
1043 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1044 + : "r" (&v->counter), "r" (i)
1045 + : "cc");
1046 +
1047 + smp_mb();
1048 +
1049 + return result;
1050 +}
1051 +
1052 +static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1053 +{
1054 + u64 result;
1055 + unsigned long tmp;
1056 +
1057 + smp_mb();
1058 +
1059 + __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1060 "1: ldrexd %0, %H0, [%3]\n"
1061 " adds %0, %0, %4\n"
1062 " adc %H0, %H0, %H4\n"
1063 @@ -316,6 +580,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1064 __asm__ __volatile__("@ atomic64_sub\n"
1065 "1: ldrexd %0, %H0, [%3]\n"
1066 " subs %0, %0, %4\n"
1067 +" sbcs %H0, %H0, %H4\n"
1068 +
1069 +#ifdef CONFIG_PAX_REFCOUNT
1070 +" bvc 3f\n"
1071 +"2: bkpt 0xf103\n"
1072 +"3:\n"
1073 +#endif
1074 +
1075 +" strexd %1, %0, %H0, [%3]\n"
1076 +" teq %1, #0\n"
1077 +" bne 1b"
1078 +
1079 +#ifdef CONFIG_PAX_REFCOUNT
1080 +"\n4:\n"
1081 + _ASM_EXTABLE(2b, 4b)
1082 +#endif
1083 +
1084 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1085 + : "r" (&v->counter), "r" (i)
1086 + : "cc");
1087 +}
1088 +
1089 +static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1090 +{
1091 + u64 result;
1092 + unsigned long tmp;
1093 +
1094 + __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1095 +"1: ldrexd %0, %H0, [%3]\n"
1096 +" subs %0, %0, %4\n"
1097 " sbc %H0, %H0, %H4\n"
1098 " strexd %1, %0, %H0, [%3]\n"
1099 " teq %1, #0\n"
1100 @@ -327,18 +621,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1101
1102 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1103 {
1104 - u64 result;
1105 - unsigned long tmp;
1106 + u64 result, tmp;
1107
1108 smp_mb();
1109
1110 __asm__ __volatile__("@ atomic64_sub_return\n"
1111 -"1: ldrexd %0, %H0, [%3]\n"
1112 -" subs %0, %0, %4\n"
1113 -" sbc %H0, %H0, %H4\n"
1114 +"1: ldrexd %1, %H1, [%3]\n"
1115 +" subs %0, %1, %4\n"
1116 +" sbc %H0, %H1, %H4\n"
1117 +
1118 +#ifdef CONFIG_PAX_REFCOUNT
1119 +" bvc 3f\n"
1120 +" mov %0, %1\n"
1121 +" mov %H0, %H1\n"
1122 +"2: bkpt 0xf103\n"
1123 +"3:\n"
1124 +#endif
1125 +
1126 " strexd %1, %0, %H0, [%3]\n"
1127 " teq %1, #0\n"
1128 " bne 1b"
1129 +
1130 +#ifdef CONFIG_PAX_REFCOUNT
1131 +"\n4:\n"
1132 + _ASM_EXTABLE(2b, 4b)
1133 +#endif
1134 +
1135 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1136 : "r" (&v->counter), "r" (i)
1137 : "cc");
1138 @@ -372,6 +680,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1139 return oldval;
1140 }
1141
1142 +static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1143 +{
1144 + u64 oldval;
1145 + unsigned long res;
1146 +
1147 + smp_mb();
1148 +
1149 + do {
1150 + __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1151 + "ldrexd %1, %H1, [%3]\n"
1152 + "mov %0, #0\n"
1153 + "teq %1, %4\n"
1154 + "teqeq %H1, %H4\n"
1155 + "strexdeq %0, %5, %H5, [%3]"
1156 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1157 + : "r" (&ptr->counter), "r" (old), "r" (new)
1158 + : "cc");
1159 + } while (res);
1160 +
1161 + smp_mb();
1162 +
1163 + return oldval;
1164 +}
1165 +
1166 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1167 {
1168 u64 result;
1169 @@ -395,21 +727,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1170
1171 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1172 {
1173 - u64 result;
1174 - unsigned long tmp;
1175 + u64 result, tmp;
1176
1177 smp_mb();
1178
1179 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1180 -"1: ldrexd %0, %H0, [%3]\n"
1181 -" subs %0, %0, #1\n"
1182 -" sbc %H0, %H0, #0\n"
1183 +"1: ldrexd %1, %H1, [%3]\n"
1184 +" subs %0, %1, #1\n"
1185 +" sbc %H0, %H1, #0\n"
1186 +
1187 +#ifdef CONFIG_PAX_REFCOUNT
1188 +" bvc 3f\n"
1189 +" mov %0, %1\n"
1190 +" mov %H0, %H1\n"
1191 +"2: bkpt 0xf103\n"
1192 +"3:\n"
1193 +#endif
1194 +
1195 " teq %H0, #0\n"
1196 -" bmi 2f\n"
1197 +" bmi 4f\n"
1198 " strexd %1, %0, %H0, [%3]\n"
1199 " teq %1, #0\n"
1200 " bne 1b\n"
1201 -"2:"
1202 +"4:\n"
1203 +
1204 +#ifdef CONFIG_PAX_REFCOUNT
1205 + _ASM_EXTABLE(2b, 4b)
1206 +#endif
1207 +
1208 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1209 : "r" (&v->counter)
1210 : "cc");
1211 @@ -432,13 +777,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1212 " teq %0, %5\n"
1213 " teqeq %H0, %H5\n"
1214 " moveq %1, #0\n"
1215 -" beq 2f\n"
1216 +" beq 4f\n"
1217 " adds %0, %0, %6\n"
1218 " adc %H0, %H0, %H6\n"
1219 +
1220 +#ifdef CONFIG_PAX_REFCOUNT
1221 +" bvc 3f\n"
1222 +"2: bkpt 0xf103\n"
1223 +"3:\n"
1224 +#endif
1225 +
1226 " strexd %2, %0, %H0, [%4]\n"
1227 " teq %2, #0\n"
1228 " bne 1b\n"
1229 -"2:"
1230 +"4:\n"
1231 +
1232 +#ifdef CONFIG_PAX_REFCOUNT
1233 + _ASM_EXTABLE(2b, 4b)
1234 +#endif
1235 +
1236 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1237 : "r" (&v->counter), "r" (u), "r" (a)
1238 : "cc");
1239 @@ -451,10 +808,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1240
1241 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1242 #define atomic64_inc(v) atomic64_add(1LL, (v))
1243 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1244 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1245 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1246 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1247 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1248 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1249 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1250 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1251 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1252 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1253 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1254 index 75fe66b..2255c86 100644
1255 --- a/arch/arm/include/asm/cache.h
1256 +++ b/arch/arm/include/asm/cache.h
1257 @@ -4,8 +4,10 @@
1258 #ifndef __ASMARM_CACHE_H
1259 #define __ASMARM_CACHE_H
1260
1261 +#include <linux/const.h>
1262 +
1263 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1264 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1265 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1266
1267 /*
1268 * Memory returned by kmalloc() may be used for DMA, so we must make
1269 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1270 index d5d8d5c..ad92c96 100644
1271 --- a/arch/arm/include/asm/cacheflush.h
1272 +++ b/arch/arm/include/asm/cacheflush.h
1273 @@ -108,7 +108,7 @@ struct cpu_cache_fns {
1274 void (*dma_unmap_area)(const void *, size_t, int);
1275
1276 void (*dma_flush_range)(const void *, const void *);
1277 -};
1278 +} __no_const;
1279
1280 /*
1281 * Select the calling method
1282 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1283 index 0e9ce8d..6ef1e03 100644
1284 --- a/arch/arm/include/asm/elf.h
1285 +++ b/arch/arm/include/asm/elf.h
1286 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1287 the loader. We need to make sure that it is out of the way of the program
1288 that it will "exec", and that there is sufficient room for the brk. */
1289
1290 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1291 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1292 +
1293 +#ifdef CONFIG_PAX_ASLR
1294 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1295 +
1296 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1297 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1298 +#endif
1299
1300 /* When the program starts, a1 contains a pointer to a function to be
1301 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1302 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1303 extern void elf_set_personality(const struct elf32_hdr *);
1304 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1305
1306 -struct mm_struct;
1307 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1308 -#define arch_randomize_brk arch_randomize_brk
1309 -
1310 extern int vectors_user_mapping(void);
1311 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
1312 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
1313 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1314 index e51b1e8..32a3113 100644
1315 --- a/arch/arm/include/asm/kmap_types.h
1316 +++ b/arch/arm/include/asm/kmap_types.h
1317 @@ -21,6 +21,7 @@ enum km_type {
1318 KM_L1_CACHE,
1319 KM_L2_CACHE,
1320 KM_KDB,
1321 + KM_CLEARPAGE,
1322 KM_TYPE_NR
1323 };
1324
1325 diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1326 index 53426c6..c7baff3 100644
1327 --- a/arch/arm/include/asm/outercache.h
1328 +++ b/arch/arm/include/asm/outercache.h
1329 @@ -35,7 +35,7 @@ struct outer_cache_fns {
1330 #endif
1331 void (*set_debug)(unsigned long);
1332 void (*resume)(void);
1333 -};
1334 +} __no_const;
1335
1336 #ifdef CONFIG_OUTER_CACHE
1337
1338 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1339 index ca94653..6ac0d56 100644
1340 --- a/arch/arm/include/asm/page.h
1341 +++ b/arch/arm/include/asm/page.h
1342 @@ -123,7 +123,7 @@ struct cpu_user_fns {
1343 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1344 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1345 unsigned long vaddr, struct vm_area_struct *vma);
1346 -};
1347 +} __no_const;
1348
1349 #ifdef MULTI_USER
1350 extern struct cpu_user_fns cpu_user;
1351 diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
1352 index 984014b..a6d914f 100644
1353 --- a/arch/arm/include/asm/system.h
1354 +++ b/arch/arm/include/asm/system.h
1355 @@ -90,6 +90,8 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
1356
1357 #define xchg(ptr,x) \
1358 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1359 +#define xchg_unchecked(ptr,x) \
1360 + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1361
1362 extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
1363
1364 @@ -101,7 +103,7 @@ extern int __pure cpu_architecture(void);
1365 extern void cpu_init(void);
1366
1367 void arm_machine_restart(char mode, const char *cmd);
1368 -extern void (*arm_pm_restart)(char str, const char *cmd);
1369 +extern void (*arm_pm_restart)(char str, const char *cmd) __noreturn;
1370
1371 #define UDBG_UNDEFINED (1 << 0)
1372 #define UDBG_SYSCALL (1 << 1)
1373 @@ -526,6 +528,13 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
1374
1375 #endif /* __LINUX_ARM_ARCH__ >= 6 */
1376
1377 +#define _ASM_EXTABLE(from, to) \
1378 +" .pushsection __ex_table,\"a\"\n"\
1379 +" .align 3\n" \
1380 +" .long " #from ", " #to"\n" \
1381 +" .popsection"
1382 +
1383 +
1384 #endif /* __ASSEMBLY__ */
1385
1386 #define arch_align_stack(x) (x)
1387 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1388 index b293616..96310e5 100644
1389 --- a/arch/arm/include/asm/uaccess.h
1390 +++ b/arch/arm/include/asm/uaccess.h
1391 @@ -22,6 +22,8 @@
1392 #define VERIFY_READ 0
1393 #define VERIFY_WRITE 1
1394
1395 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1396 +
1397 /*
1398 * The exception table consists of pairs of addresses: the first is the
1399 * address of an instruction that is allowed to fault, and the second is
1400 @@ -387,8 +389,23 @@ do { \
1401
1402
1403 #ifdef CONFIG_MMU
1404 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1405 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1406 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1407 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1408 +
1409 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1410 +{
1411 + if (!__builtin_constant_p(n))
1412 + check_object_size(to, n, false);
1413 + return ___copy_from_user(to, from, n);
1414 +}
1415 +
1416 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1417 +{
1418 + if (!__builtin_constant_p(n))
1419 + check_object_size(from, n, true);
1420 + return ___copy_to_user(to, from, n);
1421 +}
1422 +
1423 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1424 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1425 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1426 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1427
1428 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1429 {
1430 + if ((long)n < 0)
1431 + return n;
1432 +
1433 if (access_ok(VERIFY_READ, from, n))
1434 n = __copy_from_user(to, from, n);
1435 else /* security hole - plug it */
1436 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1437
1438 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1439 {
1440 + if ((long)n < 0)
1441 + return n;
1442 +
1443 if (access_ok(VERIFY_WRITE, to, n))
1444 n = __copy_to_user(to, from, n);
1445 return n;
1446 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1447 index 5b0bce6..becd81c 100644
1448 --- a/arch/arm/kernel/armksyms.c
1449 +++ b/arch/arm/kernel/armksyms.c
1450 @@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1451 #ifdef CONFIG_MMU
1452 EXPORT_SYMBOL(copy_page);
1453
1454 -EXPORT_SYMBOL(__copy_from_user);
1455 -EXPORT_SYMBOL(__copy_to_user);
1456 +EXPORT_SYMBOL(___copy_from_user);
1457 +EXPORT_SYMBOL(___copy_to_user);
1458 EXPORT_SYMBOL(__clear_user);
1459
1460 EXPORT_SYMBOL(__get_user_1);
1461 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1462 index 3d0c6fb..9d326fa 100644
1463 --- a/arch/arm/kernel/process.c
1464 +++ b/arch/arm/kernel/process.c
1465 @@ -28,7 +28,6 @@
1466 #include <linux/tick.h>
1467 #include <linux/utsname.h>
1468 #include <linux/uaccess.h>
1469 -#include <linux/random.h>
1470 #include <linux/hw_breakpoint.h>
1471 #include <linux/cpuidle.h>
1472
1473 @@ -92,7 +91,7 @@ static int __init hlt_setup(char *__unused)
1474 __setup("nohlt", nohlt_setup);
1475 __setup("hlt", hlt_setup);
1476
1477 -void arm_machine_restart(char mode, const char *cmd)
1478 +__noreturn void arm_machine_restart(char mode, const char *cmd)
1479 {
1480 /* Disable interrupts first */
1481 local_irq_disable();
1482 @@ -134,7 +133,7 @@ void arm_machine_restart(char mode, const char *cmd)
1483 void (*pm_power_off)(void);
1484 EXPORT_SYMBOL(pm_power_off);
1485
1486 -void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart;
1487 +void (*arm_pm_restart)(char str, const char *cmd) __noreturn = arm_machine_restart;
1488 EXPORT_SYMBOL_GPL(arm_pm_restart);
1489
1490 static void do_nothing(void *unused)
1491 @@ -248,6 +247,7 @@ void machine_power_off(void)
1492 machine_shutdown();
1493 if (pm_power_off)
1494 pm_power_off();
1495 + BUG();
1496 }
1497
1498 void machine_restart(char *cmd)
1499 @@ -484,12 +484,6 @@ unsigned long get_wchan(struct task_struct *p)
1500 return 0;
1501 }
1502
1503 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1504 -{
1505 - unsigned long range_end = mm->brk + 0x02000000;
1506 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
1507 -}
1508 -
1509 #ifdef CONFIG_MMU
1510 /*
1511 * The vectors page is always readable from user space for the
1512 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1513 index 8fc2c8f..064c150 100644
1514 --- a/arch/arm/kernel/setup.c
1515 +++ b/arch/arm/kernel/setup.c
1516 @@ -108,13 +108,13 @@ struct processor processor __read_mostly;
1517 struct cpu_tlb_fns cpu_tlb __read_mostly;
1518 #endif
1519 #ifdef MULTI_USER
1520 -struct cpu_user_fns cpu_user __read_mostly;
1521 +struct cpu_user_fns cpu_user __read_only;
1522 #endif
1523 #ifdef MULTI_CACHE
1524 -struct cpu_cache_fns cpu_cache __read_mostly;
1525 +struct cpu_cache_fns cpu_cache __read_only;
1526 #endif
1527 #ifdef CONFIG_OUTER_CACHE
1528 -struct outer_cache_fns outer_cache __read_mostly;
1529 +struct outer_cache_fns outer_cache __read_only;
1530 EXPORT_SYMBOL(outer_cache);
1531 #endif
1532
1533 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1534 index 99a5727..a3d5bb1 100644
1535 --- a/arch/arm/kernel/traps.c
1536 +++ b/arch/arm/kernel/traps.c
1537 @@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
1538
1539 static DEFINE_RAW_SPINLOCK(die_lock);
1540
1541 +extern void gr_handle_kernel_exploit(void);
1542 +
1543 /*
1544 * This function is protected against re-entrancy.
1545 */
1546 @@ -288,6 +290,9 @@ void die(const char *str, struct pt_regs *regs, int err)
1547 panic("Fatal exception in interrupt");
1548 if (panic_on_oops)
1549 panic("Fatal exception");
1550 +
1551 + gr_handle_kernel_exploit();
1552 +
1553 if (ret != NOTIFY_STOP)
1554 do_exit(SIGSEGV);
1555 }
1556 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1557 index 66a477a..bee61d3 100644
1558 --- a/arch/arm/lib/copy_from_user.S
1559 +++ b/arch/arm/lib/copy_from_user.S
1560 @@ -16,7 +16,7 @@
1561 /*
1562 * Prototype:
1563 *
1564 - * size_t __copy_from_user(void *to, const void *from, size_t n)
1565 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
1566 *
1567 * Purpose:
1568 *
1569 @@ -84,11 +84,11 @@
1570
1571 .text
1572
1573 -ENTRY(__copy_from_user)
1574 +ENTRY(___copy_from_user)
1575
1576 #include "copy_template.S"
1577
1578 -ENDPROC(__copy_from_user)
1579 +ENDPROC(___copy_from_user)
1580
1581 .pushsection .fixup,"ax"
1582 .align 0
1583 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1584 index 6ee2f67..d1cce76 100644
1585 --- a/arch/arm/lib/copy_page.S
1586 +++ b/arch/arm/lib/copy_page.S
1587 @@ -10,6 +10,7 @@
1588 * ASM optimised string functions
1589 */
1590 #include <linux/linkage.h>
1591 +#include <linux/const.h>
1592 #include <asm/assembler.h>
1593 #include <asm/asm-offsets.h>
1594 #include <asm/cache.h>
1595 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1596 index d066df6..df28194 100644
1597 --- a/arch/arm/lib/copy_to_user.S
1598 +++ b/arch/arm/lib/copy_to_user.S
1599 @@ -16,7 +16,7 @@
1600 /*
1601 * Prototype:
1602 *
1603 - * size_t __copy_to_user(void *to, const void *from, size_t n)
1604 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
1605 *
1606 * Purpose:
1607 *
1608 @@ -88,11 +88,11 @@
1609 .text
1610
1611 ENTRY(__copy_to_user_std)
1612 -WEAK(__copy_to_user)
1613 +WEAK(___copy_to_user)
1614
1615 #include "copy_template.S"
1616
1617 -ENDPROC(__copy_to_user)
1618 +ENDPROC(___copy_to_user)
1619 ENDPROC(__copy_to_user_std)
1620
1621 .pushsection .fixup,"ax"
1622 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
1623 index d0ece2a..5ae2f39 100644
1624 --- a/arch/arm/lib/uaccess.S
1625 +++ b/arch/arm/lib/uaccess.S
1626 @@ -20,7 +20,7 @@
1627
1628 #define PAGE_SHIFT 12
1629
1630 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1631 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1632 * Purpose : copy a block to user memory from kernel memory
1633 * Params : to - user memory
1634 * : from - kernel memory
1635 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
1636 sub r2, r2, ip
1637 b .Lc2u_dest_aligned
1638
1639 -ENTRY(__copy_to_user)
1640 +ENTRY(___copy_to_user)
1641 stmfd sp!, {r2, r4 - r7, lr}
1642 cmp r2, #4
1643 blt .Lc2u_not_enough
1644 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May fault
1645 ldrgtb r3, [r1], #0
1646 USER( T(strgtb) r3, [r0], #1) @ May fault
1647 b .Lc2u_finished
1648 -ENDPROC(__copy_to_user)
1649 +ENDPROC(___copy_to_user)
1650
1651 .pushsection .fixup,"ax"
1652 .align 0
1653 9001: ldmfd sp!, {r0, r4 - r7, pc}
1654 .popsection
1655
1656 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1657 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1658 * Purpose : copy a block from user memory to kernel memory
1659 * Params : to - kernel memory
1660 * : from - user memory
1661 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault
1662 sub r2, r2, ip
1663 b .Lcfu_dest_aligned
1664
1665 -ENTRY(__copy_from_user)
1666 +ENTRY(___copy_from_user)
1667 stmfd sp!, {r0, r2, r4 - r7, lr}
1668 cmp r2, #4
1669 blt .Lcfu_not_enough
1670 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May fault
1671 USER( T(ldrgtb) r3, [r1], #1) @ May fault
1672 strgtb r3, [r0], #1
1673 b .Lcfu_finished
1674 -ENDPROC(__copy_from_user)
1675 +ENDPROC(___copy_from_user)
1676
1677 .pushsection .fixup,"ax"
1678 .align 0
1679 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1680 index 025f742..8432b08 100644
1681 --- a/arch/arm/lib/uaccess_with_memcpy.c
1682 +++ b/arch/arm/lib/uaccess_with_memcpy.c
1683 @@ -104,7 +104,7 @@ out:
1684 }
1685
1686 unsigned long
1687 -__copy_to_user(void __user *to, const void *from, unsigned long n)
1688 +___copy_to_user(void __user *to, const void *from, unsigned long n)
1689 {
1690 /*
1691 * This test is stubbed out of the main function above to keep
1692 diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
1693 index 2b2d51c..0127490 100644
1694 --- a/arch/arm/mach-ux500/mbox-db5500.c
1695 +++ b/arch/arm/mach-ux500/mbox-db5500.c
1696 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
1697 return sprintf(buf, "0x%X\n", mbox_value);
1698 }
1699
1700 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
1701 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
1702
1703 static int mbox_show(struct seq_file *s, void *data)
1704 {
1705 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1706 index aa33949..d366075 100644
1707 --- a/arch/arm/mm/fault.c
1708 +++ b/arch/arm/mm/fault.c
1709 @@ -183,6 +183,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1710 }
1711 #endif
1712
1713 +#ifdef CONFIG_PAX_PAGEEXEC
1714 + if (fsr & FSR_LNX_PF) {
1715 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1716 + do_group_exit(SIGKILL);
1717 + }
1718 +#endif
1719 +
1720 tsk->thread.address = addr;
1721 tsk->thread.error_code = fsr;
1722 tsk->thread.trap_no = 14;
1723 @@ -384,6 +391,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1724 }
1725 #endif /* CONFIG_MMU */
1726
1727 +#ifdef CONFIG_PAX_PAGEEXEC
1728 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1729 +{
1730 + long i;
1731 +
1732 + printk(KERN_ERR "PAX: bytes at PC: ");
1733 + for (i = 0; i < 20; i++) {
1734 + unsigned char c;
1735 + if (get_user(c, (__force unsigned char __user *)pc+i))
1736 + printk(KERN_CONT "?? ");
1737 + else
1738 + printk(KERN_CONT "%02x ", c);
1739 + }
1740 + printk("\n");
1741 +
1742 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1743 + for (i = -1; i < 20; i++) {
1744 + unsigned long c;
1745 + if (get_user(c, (__force unsigned long __user *)sp+i))
1746 + printk(KERN_CONT "???????? ");
1747 + else
1748 + printk(KERN_CONT "%08lx ", c);
1749 + }
1750 + printk("\n");
1751 +}
1752 +#endif
1753 +
1754 /*
1755 * First Level Translation Fault Handler
1756 *
1757 @@ -628,6 +662,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1758 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1759 struct siginfo info;
1760
1761 +#ifdef CONFIG_PAX_REFCOUNT
1762 + if (fsr_fs(ifsr) == 2) {
1763 + unsigned int bkpt;
1764 +
1765 + if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1766 + current->thread.error_code = ifsr;
1767 + current->thread.trap_no = 0;
1768 + pax_report_refcount_overflow(regs);
1769 + fixup_exception(regs);
1770 + return;
1771 + }
1772 + }
1773 +#endif
1774 +
1775 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1776 return;
1777
1778 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1779 index 44b628e..623ee2a 100644
1780 --- a/arch/arm/mm/mmap.c
1781 +++ b/arch/arm/mm/mmap.c
1782 @@ -54,6 +54,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1783 if (len > TASK_SIZE)
1784 return -ENOMEM;
1785
1786 +#ifdef CONFIG_PAX_RANDMMAP
1787 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1788 +#endif
1789 +
1790 if (addr) {
1791 if (do_align)
1792 addr = COLOUR_ALIGN(addr, pgoff);
1793 @@ -61,15 +65,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1794 addr = PAGE_ALIGN(addr);
1795
1796 vma = find_vma(mm, addr);
1797 - if (TASK_SIZE - len >= addr &&
1798 - (!vma || addr + len <= vma->vm_start))
1799 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1800 return addr;
1801 }
1802 if (len > mm->cached_hole_size) {
1803 - start_addr = addr = mm->free_area_cache;
1804 + start_addr = addr = mm->free_area_cache;
1805 } else {
1806 - start_addr = addr = TASK_UNMAPPED_BASE;
1807 - mm->cached_hole_size = 0;
1808 + start_addr = addr = mm->mmap_base;
1809 + mm->cached_hole_size = 0;
1810 }
1811 /* 8 bits of randomness in 20 address space bits */
1812 if ((current->flags & PF_RANDOMIZE) &&
1813 @@ -89,14 +92,14 @@ full_search:
1814 * Start a new search - just in case we missed
1815 * some holes.
1816 */
1817 - if (start_addr != TASK_UNMAPPED_BASE) {
1818 - start_addr = addr = TASK_UNMAPPED_BASE;
1819 + if (start_addr != mm->mmap_base) {
1820 + start_addr = addr = mm->mmap_base;
1821 mm->cached_hole_size = 0;
1822 goto full_search;
1823 }
1824 return -ENOMEM;
1825 }
1826 - if (!vma || addr + len <= vma->vm_start) {
1827 + if (check_heap_stack_gap(vma, addr, len)) {
1828 /*
1829 * Remember the place where we stopped the search:
1830 */
1831 diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
1832 index 4c1a363..df311d0 100644
1833 --- a/arch/arm/plat-samsung/include/plat/dma-ops.h
1834 +++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
1835 @@ -41,7 +41,7 @@ struct samsung_dma_ops {
1836 int (*started)(unsigned ch);
1837 int (*flush)(unsigned ch);
1838 int (*stop)(unsigned ch);
1839 -};
1840 +} __no_const;
1841
1842 extern void *samsung_dmadev_get_ops(void);
1843 extern void *s3c_dma_get_ops(void);
1844 diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
1845 index 5f28cae..3d23723 100644
1846 --- a/arch/arm/plat-samsung/include/plat/ehci.h
1847 +++ b/arch/arm/plat-samsung/include/plat/ehci.h
1848 @@ -14,7 +14,7 @@
1849 struct s5p_ehci_platdata {
1850 int (*phy_init)(struct platform_device *pdev, int type);
1851 int (*phy_exit)(struct platform_device *pdev, int type);
1852 -};
1853 +} __no_const;
1854
1855 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
1856
1857 diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
1858 index c3a58a1..78fbf54 100644
1859 --- a/arch/avr32/include/asm/cache.h
1860 +++ b/arch/avr32/include/asm/cache.h
1861 @@ -1,8 +1,10 @@
1862 #ifndef __ASM_AVR32_CACHE_H
1863 #define __ASM_AVR32_CACHE_H
1864
1865 +#include <linux/const.h>
1866 +
1867 #define L1_CACHE_SHIFT 5
1868 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1869 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1870
1871 /*
1872 * Memory returned by kmalloc() may be used for DMA, so we must make
1873 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1874 index 3b3159b..425ea94 100644
1875 --- a/arch/avr32/include/asm/elf.h
1876 +++ b/arch/avr32/include/asm/elf.h
1877 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1878 the loader. We need to make sure that it is out of the way of the program
1879 that it will "exec", and that there is sufficient room for the brk. */
1880
1881 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1882 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1883
1884 +#ifdef CONFIG_PAX_ASLR
1885 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1886 +
1887 +#define PAX_DELTA_MMAP_LEN 15
1888 +#define PAX_DELTA_STACK_LEN 15
1889 +#endif
1890
1891 /* This yields a mask that user programs can use to figure out what
1892 instruction set this CPU supports. This could be done in user space,
1893 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1894 index b7f5c68..556135c 100644
1895 --- a/arch/avr32/include/asm/kmap_types.h
1896 +++ b/arch/avr32/include/asm/kmap_types.h
1897 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1898 D(11) KM_IRQ1,
1899 D(12) KM_SOFTIRQ0,
1900 D(13) KM_SOFTIRQ1,
1901 -D(14) KM_TYPE_NR
1902 +D(14) KM_CLEARPAGE,
1903 +D(15) KM_TYPE_NR
1904 };
1905
1906 #undef D
1907 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1908 index f7040a1..db9f300 100644
1909 --- a/arch/avr32/mm/fault.c
1910 +++ b/arch/avr32/mm/fault.c
1911 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1912
1913 int exception_trace = 1;
1914
1915 +#ifdef CONFIG_PAX_PAGEEXEC
1916 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1917 +{
1918 + unsigned long i;
1919 +
1920 + printk(KERN_ERR "PAX: bytes at PC: ");
1921 + for (i = 0; i < 20; i++) {
1922 + unsigned char c;
1923 + if (get_user(c, (unsigned char *)pc+i))
1924 + printk(KERN_CONT "???????? ");
1925 + else
1926 + printk(KERN_CONT "%02x ", c);
1927 + }
1928 + printk("\n");
1929 +}
1930 +#endif
1931 +
1932 /*
1933 * This routine handles page faults. It determines the address and the
1934 * problem, and then passes it off to one of the appropriate routines.
1935 @@ -156,6 +173,16 @@ bad_area:
1936 up_read(&mm->mmap_sem);
1937
1938 if (user_mode(regs)) {
1939 +
1940 +#ifdef CONFIG_PAX_PAGEEXEC
1941 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1942 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1943 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1944 + do_group_exit(SIGKILL);
1945 + }
1946 + }
1947 +#endif
1948 +
1949 if (exception_trace && printk_ratelimit())
1950 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1951 "sp %08lx ecr %lu\n",
1952 diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
1953 index 568885a..f8008df 100644
1954 --- a/arch/blackfin/include/asm/cache.h
1955 +++ b/arch/blackfin/include/asm/cache.h
1956 @@ -7,6 +7,7 @@
1957 #ifndef __ARCH_BLACKFIN_CACHE_H
1958 #define __ARCH_BLACKFIN_CACHE_H
1959
1960 +#include <linux/const.h>
1961 #include <linux/linkage.h> /* for asmlinkage */
1962
1963 /*
1964 @@ -14,7 +15,7 @@
1965 * Blackfin loads 32 bytes for cache
1966 */
1967 #define L1_CACHE_SHIFT 5
1968 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1969 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1970 #define SMP_CACHE_BYTES L1_CACHE_BYTES
1971
1972 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
1973 diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
1974 index aea2718..3639a60 100644
1975 --- a/arch/cris/include/arch-v10/arch/cache.h
1976 +++ b/arch/cris/include/arch-v10/arch/cache.h
1977 @@ -1,8 +1,9 @@
1978 #ifndef _ASM_ARCH_CACHE_H
1979 #define _ASM_ARCH_CACHE_H
1980
1981 +#include <linux/const.h>
1982 /* Etrax 100LX have 32-byte cache-lines. */
1983 -#define L1_CACHE_BYTES 32
1984 #define L1_CACHE_SHIFT 5
1985 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1986
1987 #endif /* _ASM_ARCH_CACHE_H */
1988 diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
1989 index 1de779f..336fad3 100644
1990 --- a/arch/cris/include/arch-v32/arch/cache.h
1991 +++ b/arch/cris/include/arch-v32/arch/cache.h
1992 @@ -1,11 +1,12 @@
1993 #ifndef _ASM_CRIS_ARCH_CACHE_H
1994 #define _ASM_CRIS_ARCH_CACHE_H
1995
1996 +#include <linux/const.h>
1997 #include <arch/hwregs/dma.h>
1998
1999 /* A cache-line is 32 bytes. */
2000 -#define L1_CACHE_BYTES 32
2001 #define L1_CACHE_SHIFT 5
2002 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2003
2004 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
2005
2006 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
2007 index 0d8a7d6..d0c9ff5 100644
2008 --- a/arch/frv/include/asm/atomic.h
2009 +++ b/arch/frv/include/asm/atomic.h
2010 @@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
2011 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
2012 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
2013
2014 +#define atomic64_read_unchecked(v) atomic64_read(v)
2015 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2016 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2017 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2018 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2019 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2020 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2021 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2022 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2023 +
2024 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2025 {
2026 int c, old;
2027 diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
2028 index 2797163..c2a401d 100644
2029 --- a/arch/frv/include/asm/cache.h
2030 +++ b/arch/frv/include/asm/cache.h
2031 @@ -12,10 +12,11 @@
2032 #ifndef __ASM_CACHE_H
2033 #define __ASM_CACHE_H
2034
2035 +#include <linux/const.h>
2036
2037 /* bytes per L1 cache line */
2038 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
2039 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2040 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2041
2042 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2043 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2044 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
2045 index f8e16b2..c73ff79 100644
2046 --- a/arch/frv/include/asm/kmap_types.h
2047 +++ b/arch/frv/include/asm/kmap_types.h
2048 @@ -23,6 +23,7 @@ enum km_type {
2049 KM_IRQ1,
2050 KM_SOFTIRQ0,
2051 KM_SOFTIRQ1,
2052 + KM_CLEARPAGE,
2053 KM_TYPE_NR
2054 };
2055
2056 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
2057 index 385fd30..6c3d97e 100644
2058 --- a/arch/frv/mm/elf-fdpic.c
2059 +++ b/arch/frv/mm/elf-fdpic.c
2060 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2061 if (addr) {
2062 addr = PAGE_ALIGN(addr);
2063 vma = find_vma(current->mm, addr);
2064 - if (TASK_SIZE - len >= addr &&
2065 - (!vma || addr + len <= vma->vm_start))
2066 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2067 goto success;
2068 }
2069
2070 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2071 for (; vma; vma = vma->vm_next) {
2072 if (addr > limit)
2073 break;
2074 - if (addr + len <= vma->vm_start)
2075 + if (check_heap_stack_gap(vma, addr, len))
2076 goto success;
2077 addr = vma->vm_end;
2078 }
2079 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2080 for (; vma; vma = vma->vm_next) {
2081 if (addr > limit)
2082 break;
2083 - if (addr + len <= vma->vm_start)
2084 + if (check_heap_stack_gap(vma, addr, len))
2085 goto success;
2086 addr = vma->vm_end;
2087 }
2088 diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
2089 index c635028..6d9445a 100644
2090 --- a/arch/h8300/include/asm/cache.h
2091 +++ b/arch/h8300/include/asm/cache.h
2092 @@ -1,8 +1,10 @@
2093 #ifndef __ARCH_H8300_CACHE_H
2094 #define __ARCH_H8300_CACHE_H
2095
2096 +#include <linux/const.h>
2097 +
2098 /* bytes per L1 cache line */
2099 -#define L1_CACHE_BYTES 4
2100 +#define L1_CACHE_BYTES _AC(4,UL)
2101
2102 /* m68k-elf-gcc 2.95.2 doesn't like these */
2103
2104 diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
2105 index 0f01de2..d37d309 100644
2106 --- a/arch/hexagon/include/asm/cache.h
2107 +++ b/arch/hexagon/include/asm/cache.h
2108 @@ -21,9 +21,11 @@
2109 #ifndef __ASM_CACHE_H
2110 #define __ASM_CACHE_H
2111
2112 +#include <linux/const.h>
2113 +
2114 /* Bytes per L1 cache line */
2115 -#define L1_CACHE_SHIFT (5)
2116 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2117 +#define L1_CACHE_SHIFT 5
2118 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2119
2120 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
2121 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
2122 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2123 index 3fad89e..3047da5 100644
2124 --- a/arch/ia64/include/asm/atomic.h
2125 +++ b/arch/ia64/include/asm/atomic.h
2126 @@ -209,6 +209,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2127 #define atomic64_inc(v) atomic64_add(1, (v))
2128 #define atomic64_dec(v) atomic64_sub(1, (v))
2129
2130 +#define atomic64_read_unchecked(v) atomic64_read(v)
2131 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2132 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2133 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2134 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2135 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2136 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2137 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2138 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2139 +
2140 /* Atomic operations are already serializing */
2141 #define smp_mb__before_atomic_dec() barrier()
2142 #define smp_mb__after_atomic_dec() barrier()
2143 diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2144 index 988254a..e1ee885 100644
2145 --- a/arch/ia64/include/asm/cache.h
2146 +++ b/arch/ia64/include/asm/cache.h
2147 @@ -1,6 +1,7 @@
2148 #ifndef _ASM_IA64_CACHE_H
2149 #define _ASM_IA64_CACHE_H
2150
2151 +#include <linux/const.h>
2152
2153 /*
2154 * Copyright (C) 1998-2000 Hewlett-Packard Co
2155 @@ -9,7 +10,7 @@
2156
2157 /* Bytes per L1 (data) cache line. */
2158 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2159 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2160 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2161
2162 #ifdef CONFIG_SMP
2163 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2164 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2165 index b5298eb..67c6e62 100644
2166 --- a/arch/ia64/include/asm/elf.h
2167 +++ b/arch/ia64/include/asm/elf.h
2168 @@ -42,6 +42,13 @@
2169 */
2170 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2171
2172 +#ifdef CONFIG_PAX_ASLR
2173 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2174 +
2175 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2176 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2177 +#endif
2178 +
2179 #define PT_IA_64_UNWIND 0x70000001
2180
2181 /* IA-64 relocations: */
2182 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2183 index 1a97af3..7529d31 100644
2184 --- a/arch/ia64/include/asm/pgtable.h
2185 +++ b/arch/ia64/include/asm/pgtable.h
2186 @@ -12,7 +12,7 @@
2187 * David Mosberger-Tang <davidm@hpl.hp.com>
2188 */
2189
2190 -
2191 +#include <linux/const.h>
2192 #include <asm/mman.h>
2193 #include <asm/page.h>
2194 #include <asm/processor.h>
2195 @@ -143,6 +143,17 @@
2196 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2197 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2198 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2199 +
2200 +#ifdef CONFIG_PAX_PAGEEXEC
2201 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2202 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2203 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2204 +#else
2205 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2206 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2207 +# define PAGE_COPY_NOEXEC PAGE_COPY
2208 +#endif
2209 +
2210 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2211 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2212 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2213 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2214 index b77768d..e0795eb 100644
2215 --- a/arch/ia64/include/asm/spinlock.h
2216 +++ b/arch/ia64/include/asm/spinlock.h
2217 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
2218 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2219
2220 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2221 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2222 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2223 }
2224
2225 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
2226 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2227 index 449c8c0..432a3d2 100644
2228 --- a/arch/ia64/include/asm/uaccess.h
2229 +++ b/arch/ia64/include/asm/uaccess.h
2230 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2231 const void *__cu_from = (from); \
2232 long __cu_len = (n); \
2233 \
2234 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
2235 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2236 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2237 __cu_len; \
2238 })
2239 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2240 long __cu_len = (n); \
2241 \
2242 __chk_user_ptr(__cu_from); \
2243 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
2244 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2245 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2246 __cu_len; \
2247 })
2248 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2249 index 24603be..948052d 100644
2250 --- a/arch/ia64/kernel/module.c
2251 +++ b/arch/ia64/kernel/module.c
2252 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
2253 void
2254 module_free (struct module *mod, void *module_region)
2255 {
2256 - if (mod && mod->arch.init_unw_table &&
2257 - module_region == mod->module_init) {
2258 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2259 unw_remove_unwind_table(mod->arch.init_unw_table);
2260 mod->arch.init_unw_table = NULL;
2261 }
2262 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2263 }
2264
2265 static inline int
2266 +in_init_rx (const struct module *mod, uint64_t addr)
2267 +{
2268 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2269 +}
2270 +
2271 +static inline int
2272 +in_init_rw (const struct module *mod, uint64_t addr)
2273 +{
2274 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2275 +}
2276 +
2277 +static inline int
2278 in_init (const struct module *mod, uint64_t addr)
2279 {
2280 - return addr - (uint64_t) mod->module_init < mod->init_size;
2281 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2282 +}
2283 +
2284 +static inline int
2285 +in_core_rx (const struct module *mod, uint64_t addr)
2286 +{
2287 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2288 +}
2289 +
2290 +static inline int
2291 +in_core_rw (const struct module *mod, uint64_t addr)
2292 +{
2293 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2294 }
2295
2296 static inline int
2297 in_core (const struct module *mod, uint64_t addr)
2298 {
2299 - return addr - (uint64_t) mod->module_core < mod->core_size;
2300 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2301 }
2302
2303 static inline int
2304 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2305 break;
2306
2307 case RV_BDREL:
2308 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2309 + if (in_init_rx(mod, val))
2310 + val -= (uint64_t) mod->module_init_rx;
2311 + else if (in_init_rw(mod, val))
2312 + val -= (uint64_t) mod->module_init_rw;
2313 + else if (in_core_rx(mod, val))
2314 + val -= (uint64_t) mod->module_core_rx;
2315 + else if (in_core_rw(mod, val))
2316 + val -= (uint64_t) mod->module_core_rw;
2317 break;
2318
2319 case RV_LTV:
2320 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2321 * addresses have been selected...
2322 */
2323 uint64_t gp;
2324 - if (mod->core_size > MAX_LTOFF)
2325 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2326 /*
2327 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2328 * at the end of the module.
2329 */
2330 - gp = mod->core_size - MAX_LTOFF / 2;
2331 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2332 else
2333 - gp = mod->core_size / 2;
2334 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2335 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2336 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2337 mod->arch.gp = gp;
2338 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2339 }
2340 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2341 index 609d500..7dde2a8 100644
2342 --- a/arch/ia64/kernel/sys_ia64.c
2343 +++ b/arch/ia64/kernel/sys_ia64.c
2344 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2345 if (REGION_NUMBER(addr) == RGN_HPAGE)
2346 addr = 0;
2347 #endif
2348 +
2349 +#ifdef CONFIG_PAX_RANDMMAP
2350 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2351 + addr = mm->free_area_cache;
2352 + else
2353 +#endif
2354 +
2355 if (!addr)
2356 addr = mm->free_area_cache;
2357
2358 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2359 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2360 /* At this point: (!vma || addr < vma->vm_end). */
2361 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2362 - if (start_addr != TASK_UNMAPPED_BASE) {
2363 + if (start_addr != mm->mmap_base) {
2364 /* Start a new search --- just in case we missed some holes. */
2365 - addr = TASK_UNMAPPED_BASE;
2366 + addr = mm->mmap_base;
2367 goto full_search;
2368 }
2369 return -ENOMEM;
2370 }
2371 - if (!vma || addr + len <= vma->vm_start) {
2372 + if (check_heap_stack_gap(vma, addr, len)) {
2373 /* Remember the address where we stopped this search: */
2374 mm->free_area_cache = addr + len;
2375 return addr;
2376 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2377 index 53c0ba0..2accdde 100644
2378 --- a/arch/ia64/kernel/vmlinux.lds.S
2379 +++ b/arch/ia64/kernel/vmlinux.lds.S
2380 @@ -199,7 +199,7 @@ SECTIONS {
2381 /* Per-cpu data: */
2382 . = ALIGN(PERCPU_PAGE_SIZE);
2383 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
2384 - __phys_per_cpu_start = __per_cpu_load;
2385 + __phys_per_cpu_start = per_cpu_load;
2386 /*
2387 * ensure percpu data fits
2388 * into percpu page size
2389 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2390 index 20b3593..1ce77f0 100644
2391 --- a/arch/ia64/mm/fault.c
2392 +++ b/arch/ia64/mm/fault.c
2393 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
2394 return pte_present(pte);
2395 }
2396
2397 +#ifdef CONFIG_PAX_PAGEEXEC
2398 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2399 +{
2400 + unsigned long i;
2401 +
2402 + printk(KERN_ERR "PAX: bytes at PC: ");
2403 + for (i = 0; i < 8; i++) {
2404 + unsigned int c;
2405 + if (get_user(c, (unsigned int *)pc+i))
2406 + printk(KERN_CONT "???????? ");
2407 + else
2408 + printk(KERN_CONT "%08x ", c);
2409 + }
2410 + printk("\n");
2411 +}
2412 +#endif
2413 +
2414 void __kprobes
2415 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2416 {
2417 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2418 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2419 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2420
2421 - if ((vma->vm_flags & mask) != mask)
2422 + if ((vma->vm_flags & mask) != mask) {
2423 +
2424 +#ifdef CONFIG_PAX_PAGEEXEC
2425 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2426 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2427 + goto bad_area;
2428 +
2429 + up_read(&mm->mmap_sem);
2430 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2431 + do_group_exit(SIGKILL);
2432 + }
2433 +#endif
2434 +
2435 goto bad_area;
2436
2437 + }
2438 +
2439 /*
2440 * If for any reason at all we couldn't handle the fault, make
2441 * sure we exit gracefully rather than endlessly redo the
2442 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2443 index 5ca674b..e0e1b70 100644
2444 --- a/arch/ia64/mm/hugetlbpage.c
2445 +++ b/arch/ia64/mm/hugetlbpage.c
2446 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2447 /* At this point: (!vmm || addr < vmm->vm_end). */
2448 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2449 return -ENOMEM;
2450 - if (!vmm || (addr + len) <= vmm->vm_start)
2451 + if (check_heap_stack_gap(vmm, addr, len))
2452 return addr;
2453 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2454 }
2455 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2456 index 00cb0e2..2ad8024 100644
2457 --- a/arch/ia64/mm/init.c
2458 +++ b/arch/ia64/mm/init.c
2459 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
2460 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2461 vma->vm_end = vma->vm_start + PAGE_SIZE;
2462 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2463 +
2464 +#ifdef CONFIG_PAX_PAGEEXEC
2465 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2466 + vma->vm_flags &= ~VM_EXEC;
2467 +
2468 +#ifdef CONFIG_PAX_MPROTECT
2469 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
2470 + vma->vm_flags &= ~VM_MAYEXEC;
2471 +#endif
2472 +
2473 + }
2474 +#endif
2475 +
2476 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2477 down_write(&current->mm->mmap_sem);
2478 if (insert_vm_struct(current->mm, vma)) {
2479 diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2480 index 40b3ee9..8c2c112 100644
2481 --- a/arch/m32r/include/asm/cache.h
2482 +++ b/arch/m32r/include/asm/cache.h
2483 @@ -1,8 +1,10 @@
2484 #ifndef _ASM_M32R_CACHE_H
2485 #define _ASM_M32R_CACHE_H
2486
2487 +#include <linux/const.h>
2488 +
2489 /* L1 cache line size */
2490 #define L1_CACHE_SHIFT 4
2491 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2492 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2493
2494 #endif /* _ASM_M32R_CACHE_H */
2495 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2496 index 82abd15..d95ae5d 100644
2497 --- a/arch/m32r/lib/usercopy.c
2498 +++ b/arch/m32r/lib/usercopy.c
2499 @@ -14,6 +14,9 @@
2500 unsigned long
2501 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2502 {
2503 + if ((long)n < 0)
2504 + return n;
2505 +
2506 prefetch(from);
2507 if (access_ok(VERIFY_WRITE, to, n))
2508 __copy_user(to,from,n);
2509 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2510 unsigned long
2511 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2512 {
2513 + if ((long)n < 0)
2514 + return n;
2515 +
2516 prefetchw(to);
2517 if (access_ok(VERIFY_READ, from, n))
2518 __copy_user_zeroing(to,from,n);
2519 diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2520 index 0395c51..5f26031 100644
2521 --- a/arch/m68k/include/asm/cache.h
2522 +++ b/arch/m68k/include/asm/cache.h
2523 @@ -4,9 +4,11 @@
2524 #ifndef __ARCH_M68K_CACHE_H
2525 #define __ARCH_M68K_CACHE_H
2526
2527 +#include <linux/const.h>
2528 +
2529 /* bytes per L1 cache line */
2530 #define L1_CACHE_SHIFT 4
2531 -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2532 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2533
2534 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2535
2536 diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2537 index 4efe96a..60e8699 100644
2538 --- a/arch/microblaze/include/asm/cache.h
2539 +++ b/arch/microblaze/include/asm/cache.h
2540 @@ -13,11 +13,12 @@
2541 #ifndef _ASM_MICROBLAZE_CACHE_H
2542 #define _ASM_MICROBLAZE_CACHE_H
2543
2544 +#include <linux/const.h>
2545 #include <asm/registers.h>
2546
2547 #define L1_CACHE_SHIFT 5
2548 /* word-granular cache in microblaze */
2549 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2550 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2551
2552 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2553
2554 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2555 index 1d93f81..67794d0 100644
2556 --- a/arch/mips/include/asm/atomic.h
2557 +++ b/arch/mips/include/asm/atomic.h
2558 @@ -21,6 +21,10 @@
2559 #include <asm/war.h>
2560 #include <asm/system.h>
2561
2562 +#ifdef CONFIG_GENERIC_ATOMIC64
2563 +#include <asm-generic/atomic64.h>
2564 +#endif
2565 +
2566 #define ATOMIC_INIT(i) { (i) }
2567
2568 /*
2569 @@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2570 */
2571 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2572
2573 +#define atomic64_read_unchecked(v) atomic64_read(v)
2574 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2575 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2576 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2577 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2578 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2579 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2580 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2581 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2582 +
2583 #endif /* CONFIG_64BIT */
2584
2585 /*
2586 diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2587 index b4db69f..8f3b093 100644
2588 --- a/arch/mips/include/asm/cache.h
2589 +++ b/arch/mips/include/asm/cache.h
2590 @@ -9,10 +9,11 @@
2591 #ifndef _ASM_CACHE_H
2592 #define _ASM_CACHE_H
2593
2594 +#include <linux/const.h>
2595 #include <kmalloc.h>
2596
2597 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2598 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2599 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2600
2601 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2602 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2603 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2604 index 455c0ac..ad65fbe 100644
2605 --- a/arch/mips/include/asm/elf.h
2606 +++ b/arch/mips/include/asm/elf.h
2607 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
2608 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2609 #endif
2610
2611 +#ifdef CONFIG_PAX_ASLR
2612 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2613 +
2614 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2615 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2616 +#endif
2617 +
2618 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2619 struct linux_binprm;
2620 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2621 int uses_interp);
2622
2623 -struct mm_struct;
2624 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2625 -#define arch_randomize_brk arch_randomize_brk
2626 -
2627 #endif /* _ASM_ELF_H */
2628 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2629 index e59cd1a..8e329d6 100644
2630 --- a/arch/mips/include/asm/page.h
2631 +++ b/arch/mips/include/asm/page.h
2632 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2633 #ifdef CONFIG_CPU_MIPS32
2634 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2635 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2636 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2637 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2638 #else
2639 typedef struct { unsigned long long pte; } pte_t;
2640 #define pte_val(x) ((x).pte)
2641 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
2642 index 6018c80..7c37203 100644
2643 --- a/arch/mips/include/asm/system.h
2644 +++ b/arch/mips/include/asm/system.h
2645 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
2646 */
2647 #define __ARCH_WANT_UNLOCKED_CTXSW
2648
2649 -extern unsigned long arch_align_stack(unsigned long sp);
2650 +#define arch_align_stack(x) ((x) & ~0xfUL)
2651
2652 #endif /* _ASM_SYSTEM_H */
2653 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2654 index 9fdd8bc..4bd7f1a 100644
2655 --- a/arch/mips/kernel/binfmt_elfn32.c
2656 +++ b/arch/mips/kernel/binfmt_elfn32.c
2657 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2658 #undef ELF_ET_DYN_BASE
2659 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2660
2661 +#ifdef CONFIG_PAX_ASLR
2662 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2663 +
2664 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2665 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2666 +#endif
2667 +
2668 #include <asm/processor.h>
2669 #include <linux/module.h>
2670 #include <linux/elfcore.h>
2671 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2672 index ff44823..97f8906 100644
2673 --- a/arch/mips/kernel/binfmt_elfo32.c
2674 +++ b/arch/mips/kernel/binfmt_elfo32.c
2675 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2676 #undef ELF_ET_DYN_BASE
2677 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2678
2679 +#ifdef CONFIG_PAX_ASLR
2680 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2681 +
2682 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2683 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2684 +#endif
2685 +
2686 #include <asm/processor.h>
2687
2688 /*
2689 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2690 index c47f96e..661d418 100644
2691 --- a/arch/mips/kernel/process.c
2692 +++ b/arch/mips/kernel/process.c
2693 @@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_struct *task)
2694 out:
2695 return pc;
2696 }
2697 -
2698 -/*
2699 - * Don't forget that the stack pointer must be aligned on a 8 bytes
2700 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2701 - */
2702 -unsigned long arch_align_stack(unsigned long sp)
2703 -{
2704 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2705 - sp -= get_random_int() & ~PAGE_MASK;
2706 -
2707 - return sp & ALMASK;
2708 -}
2709 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2710 index 937cf33..adb39bb 100644
2711 --- a/arch/mips/mm/fault.c
2712 +++ b/arch/mips/mm/fault.c
2713 @@ -28,6 +28,23 @@
2714 #include <asm/highmem.h> /* For VMALLOC_END */
2715 #include <linux/kdebug.h>
2716
2717 +#ifdef CONFIG_PAX_PAGEEXEC
2718 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2719 +{
2720 + unsigned long i;
2721 +
2722 + printk(KERN_ERR "PAX: bytes at PC: ");
2723 + for (i = 0; i < 5; i++) {
2724 + unsigned int c;
2725 + if (get_user(c, (unsigned int *)pc+i))
2726 + printk(KERN_CONT "???????? ");
2727 + else
2728 + printk(KERN_CONT "%08x ", c);
2729 + }
2730 + printk("\n");
2731 +}
2732 +#endif
2733 +
2734 /*
2735 * This routine handles page faults. It determines the address,
2736 * and the problem, and then passes it off to one of the appropriate
2737 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
2738 index 302d779..7d35bf8 100644
2739 --- a/arch/mips/mm/mmap.c
2740 +++ b/arch/mips/mm/mmap.c
2741 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2742 do_color_align = 1;
2743
2744 /* requesting a specific address */
2745 +
2746 +#ifdef CONFIG_PAX_RANDMMAP
2747 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2748 +#endif
2749 +
2750 if (addr) {
2751 if (do_color_align)
2752 addr = COLOUR_ALIGN(addr, pgoff);
2753 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2754 addr = PAGE_ALIGN(addr);
2755
2756 vma = find_vma(mm, addr);
2757 - if (TASK_SIZE - len >= addr &&
2758 - (!vma || addr + len <= vma->vm_start))
2759 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
2760 return addr;
2761 }
2762
2763 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2764 /* At this point: (!vma || addr < vma->vm_end). */
2765 if (TASK_SIZE - len < addr)
2766 return -ENOMEM;
2767 - if (!vma || addr + len <= vma->vm_start)
2768 + if (check_heap_stack_gap(vmm, addr, len))
2769 return addr;
2770 addr = vma->vm_end;
2771 if (do_color_align)
2772 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2773 /* make sure it can fit in the remaining address space */
2774 if (likely(addr > len)) {
2775 vma = find_vma(mm, addr - len);
2776 - if (!vma || addr <= vma->vm_start) {
2777 + if (check_heap_stack_gap(vmm, addr - len, len))
2778 /* cache the address as a hint for next time */
2779 return mm->free_area_cache = addr - len;
2780 }
2781 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2782 * return with success:
2783 */
2784 vma = find_vma(mm, addr);
2785 - if (likely(!vma || addr + len <= vma->vm_start)) {
2786 + if (check_heap_stack_gap(vmm, addr, len)) {
2787 /* cache the address as a hint for next time */
2788 return mm->free_area_cache = addr;
2789 }
2790 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2791 mm->unmap_area = arch_unmap_area_topdown;
2792 }
2793 }
2794 -
2795 -static inline unsigned long brk_rnd(void)
2796 -{
2797 - unsigned long rnd = get_random_int();
2798 -
2799 - rnd = rnd << PAGE_SHIFT;
2800 - /* 8MB for 32bit, 256MB for 64bit */
2801 - if (TASK_IS_32BIT_ADDR)
2802 - rnd = rnd & 0x7ffffful;
2803 - else
2804 - rnd = rnd & 0xffffffful;
2805 -
2806 - return rnd;
2807 -}
2808 -
2809 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2810 -{
2811 - unsigned long base = mm->brk;
2812 - unsigned long ret;
2813 -
2814 - ret = PAGE_ALIGN(base + brk_rnd());
2815 -
2816 - if (ret < mm->brk)
2817 - return mm->brk;
2818 -
2819 - return ret;
2820 -}
2821 diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2822 index 967d144..db12197 100644
2823 --- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
2824 +++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2825 @@ -11,12 +11,14 @@
2826 #ifndef _ASM_PROC_CACHE_H
2827 #define _ASM_PROC_CACHE_H
2828
2829 +#include <linux/const.h>
2830 +
2831 /* L1 cache */
2832
2833 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
2834 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
2835 -#define L1_CACHE_BYTES 16 /* bytes per entry */
2836 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
2837 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
2838 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
2839
2840 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
2841 diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
2842 index bcb5df2..84fabd2 100644
2843 --- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
2844 +++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
2845 @@ -16,13 +16,15 @@
2846 #ifndef _ASM_PROC_CACHE_H
2847 #define _ASM_PROC_CACHE_H
2848
2849 +#include <linux/const.h>
2850 +
2851 /*
2852 * L1 cache
2853 */
2854 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
2855 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
2856 -#define L1_CACHE_BYTES 32 /* bytes per entry */
2857 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
2858 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
2859 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
2860
2861 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
2862 diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
2863 index 4ce7a01..449202a 100644
2864 --- a/arch/openrisc/include/asm/cache.h
2865 +++ b/arch/openrisc/include/asm/cache.h
2866 @@ -19,11 +19,13 @@
2867 #ifndef __ASM_OPENRISC_CACHE_H
2868 #define __ASM_OPENRISC_CACHE_H
2869
2870 +#include <linux/const.h>
2871 +
2872 /* FIXME: How can we replace these with values from the CPU...
2873 * they shouldn't be hard-coded!
2874 */
2875
2876 -#define L1_CACHE_BYTES 16
2877 #define L1_CACHE_SHIFT 4
2878 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2879
2880 #endif /* __ASM_OPENRISC_CACHE_H */
2881 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
2882 index 4054b31..a10c105 100644
2883 --- a/arch/parisc/include/asm/atomic.h
2884 +++ b/arch/parisc/include/asm/atomic.h
2885 @@ -335,6 +335,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2886
2887 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
2888
2889 +#define atomic64_read_unchecked(v) atomic64_read(v)
2890 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2891 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2892 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2893 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2894 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2895 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2896 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2897 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2898 +
2899 #endif /* !CONFIG_64BIT */
2900
2901
2902 diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
2903 index 47f11c7..3420df2 100644
2904 --- a/arch/parisc/include/asm/cache.h
2905 +++ b/arch/parisc/include/asm/cache.h
2906 @@ -5,6 +5,7 @@
2907 #ifndef __ARCH_PARISC_CACHE_H
2908 #define __ARCH_PARISC_CACHE_H
2909
2910 +#include <linux/const.h>
2911
2912 /*
2913 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
2914 @@ -15,13 +16,13 @@
2915 * just ruin performance.
2916 */
2917 #ifdef CONFIG_PA20
2918 -#define L1_CACHE_BYTES 64
2919 #define L1_CACHE_SHIFT 6
2920 #else
2921 -#define L1_CACHE_BYTES 32
2922 #define L1_CACHE_SHIFT 5
2923 #endif
2924
2925 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2926 +
2927 #ifndef __ASSEMBLY__
2928
2929 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2930 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
2931 index 19f6cb1..6c78cf2 100644
2932 --- a/arch/parisc/include/asm/elf.h
2933 +++ b/arch/parisc/include/asm/elf.h
2934 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
2935
2936 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
2937
2938 +#ifdef CONFIG_PAX_ASLR
2939 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
2940 +
2941 +#define PAX_DELTA_MMAP_LEN 16
2942 +#define PAX_DELTA_STACK_LEN 16
2943 +#endif
2944 +
2945 /* This yields a mask that user programs can use to figure out what
2946 instruction set this CPU supports. This could be done in user space,
2947 but it's not easy, and we've already done it here. */
2948 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
2949 index 22dadeb..f6c2be4 100644
2950 --- a/arch/parisc/include/asm/pgtable.h
2951 +++ b/arch/parisc/include/asm/pgtable.h
2952 @@ -210,6 +210,17 @@ struct vm_area_struct;
2953 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
2954 #define PAGE_COPY PAGE_EXECREAD
2955 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
2956 +
2957 +#ifdef CONFIG_PAX_PAGEEXEC
2958 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
2959 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2960 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2961 +#else
2962 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2963 +# define PAGE_COPY_NOEXEC PAGE_COPY
2964 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2965 +#endif
2966 +
2967 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
2968 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
2969 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
2970 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
2971 index 5e34ccf..672bc9c 100644
2972 --- a/arch/parisc/kernel/module.c
2973 +++ b/arch/parisc/kernel/module.c
2974 @@ -98,16 +98,38 @@
2975
2976 /* three functions to determine where in the module core
2977 * or init pieces the location is */
2978 +static inline int in_init_rx(struct module *me, void *loc)
2979 +{
2980 + return (loc >= me->module_init_rx &&
2981 + loc < (me->module_init_rx + me->init_size_rx));
2982 +}
2983 +
2984 +static inline int in_init_rw(struct module *me, void *loc)
2985 +{
2986 + return (loc >= me->module_init_rw &&
2987 + loc < (me->module_init_rw + me->init_size_rw));
2988 +}
2989 +
2990 static inline int in_init(struct module *me, void *loc)
2991 {
2992 - return (loc >= me->module_init &&
2993 - loc <= (me->module_init + me->init_size));
2994 + return in_init_rx(me, loc) || in_init_rw(me, loc);
2995 +}
2996 +
2997 +static inline int in_core_rx(struct module *me, void *loc)
2998 +{
2999 + return (loc >= me->module_core_rx &&
3000 + loc < (me->module_core_rx + me->core_size_rx));
3001 +}
3002 +
3003 +static inline int in_core_rw(struct module *me, void *loc)
3004 +{
3005 + return (loc >= me->module_core_rw &&
3006 + loc < (me->module_core_rw + me->core_size_rw));
3007 }
3008
3009 static inline int in_core(struct module *me, void *loc)
3010 {
3011 - return (loc >= me->module_core &&
3012 - loc <= (me->module_core + me->core_size));
3013 + return in_core_rx(me, loc) || in_core_rw(me, loc);
3014 }
3015
3016 static inline int in_local(struct module *me, void *loc)
3017 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3018 }
3019
3020 /* align things a bit */
3021 - me->core_size = ALIGN(me->core_size, 16);
3022 - me->arch.got_offset = me->core_size;
3023 - me->core_size += gots * sizeof(struct got_entry);
3024 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3025 + me->arch.got_offset = me->core_size_rw;
3026 + me->core_size_rw += gots * sizeof(struct got_entry);
3027
3028 - me->core_size = ALIGN(me->core_size, 16);
3029 - me->arch.fdesc_offset = me->core_size;
3030 - me->core_size += fdescs * sizeof(Elf_Fdesc);
3031 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3032 + me->arch.fdesc_offset = me->core_size_rw;
3033 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3034
3035 me->arch.got_max = gots;
3036 me->arch.fdesc_max = fdescs;
3037 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3038
3039 BUG_ON(value == 0);
3040
3041 - got = me->module_core + me->arch.got_offset;
3042 + got = me->module_core_rw + me->arch.got_offset;
3043 for (i = 0; got[i].addr; i++)
3044 if (got[i].addr == value)
3045 goto out;
3046 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3047 #ifdef CONFIG_64BIT
3048 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3049 {
3050 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3051 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3052
3053 if (!value) {
3054 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3055 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3056
3057 /* Create new one */
3058 fdesc->addr = value;
3059 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3060 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3061 return (Elf_Addr)fdesc;
3062 }
3063 #endif /* CONFIG_64BIT */
3064 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
3065
3066 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3067 end = table + sechdrs[me->arch.unwind_section].sh_size;
3068 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3069 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3070
3071 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3072 me->arch.unwind_section, table, end, gp);
3073 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3074 index c9b9322..02d8940 100644
3075 --- a/arch/parisc/kernel/sys_parisc.c
3076 +++ b/arch/parisc/kernel/sys_parisc.c
3077 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3078 /* At this point: (!vma || addr < vma->vm_end). */
3079 if (TASK_SIZE - len < addr)
3080 return -ENOMEM;
3081 - if (!vma || addr + len <= vma->vm_start)
3082 + if (check_heap_stack_gap(vma, addr, len))
3083 return addr;
3084 addr = vma->vm_end;
3085 }
3086 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3087 /* At this point: (!vma || addr < vma->vm_end). */
3088 if (TASK_SIZE - len < addr)
3089 return -ENOMEM;
3090 - if (!vma || addr + len <= vma->vm_start)
3091 + if (check_heap_stack_gap(vma, addr, len))
3092 return addr;
3093 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3094 if (addr < vma->vm_end) /* handle wraparound */
3095 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3096 if (flags & MAP_FIXED)
3097 return addr;
3098 if (!addr)
3099 - addr = TASK_UNMAPPED_BASE;
3100 + addr = current->mm->mmap_base;
3101
3102 if (filp) {
3103 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3104 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3105 index f19e660..414fe24 100644
3106 --- a/arch/parisc/kernel/traps.c
3107 +++ b/arch/parisc/kernel/traps.c
3108 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3109
3110 down_read(&current->mm->mmap_sem);
3111 vma = find_vma(current->mm,regs->iaoq[0]);
3112 - if (vma && (regs->iaoq[0] >= vma->vm_start)
3113 - && (vma->vm_flags & VM_EXEC)) {
3114 -
3115 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3116 fault_address = regs->iaoq[0];
3117 fault_space = regs->iasq[0];
3118
3119 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3120 index 18162ce..94de376 100644
3121 --- a/arch/parisc/mm/fault.c
3122 +++ b/arch/parisc/mm/fault.c
3123 @@ -15,6 +15,7 @@
3124 #include <linux/sched.h>
3125 #include <linux/interrupt.h>
3126 #include <linux/module.h>
3127 +#include <linux/unistd.h>
3128
3129 #include <asm/uaccess.h>
3130 #include <asm/traps.h>
3131 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3132 static unsigned long
3133 parisc_acctyp(unsigned long code, unsigned int inst)
3134 {
3135 - if (code == 6 || code == 16)
3136 + if (code == 6 || code == 7 || code == 16)
3137 return VM_EXEC;
3138
3139 switch (inst & 0xf0000000) {
3140 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3141 }
3142 #endif
3143
3144 +#ifdef CONFIG_PAX_PAGEEXEC
3145 +/*
3146 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3147 + *
3148 + * returns 1 when task should be killed
3149 + * 2 when rt_sigreturn trampoline was detected
3150 + * 3 when unpatched PLT trampoline was detected
3151 + */
3152 +static int pax_handle_fetch_fault(struct pt_regs *regs)
3153 +{
3154 +
3155 +#ifdef CONFIG_PAX_EMUPLT
3156 + int err;
3157 +
3158 + do { /* PaX: unpatched PLT emulation */
3159 + unsigned int bl, depwi;
3160 +
3161 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3162 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3163 +
3164 + if (err)
3165 + break;
3166 +
3167 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3168 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3169 +
3170 + err = get_user(ldw, (unsigned int *)addr);
3171 + err |= get_user(bv, (unsigned int *)(addr+4));
3172 + err |= get_user(ldw2, (unsigned int *)(addr+8));
3173 +
3174 + if (err)
3175 + break;
3176 +
3177 + if (ldw == 0x0E801096U &&
3178 + bv == 0xEAC0C000U &&
3179 + ldw2 == 0x0E881095U)
3180 + {
3181 + unsigned int resolver, map;
3182 +
3183 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3184 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3185 + if (err)
3186 + break;
3187 +
3188 + regs->gr[20] = instruction_pointer(regs)+8;
3189 + regs->gr[21] = map;
3190 + regs->gr[22] = resolver;
3191 + regs->iaoq[0] = resolver | 3UL;
3192 + regs->iaoq[1] = regs->iaoq[0] + 4;
3193 + return 3;
3194 + }
3195 + }
3196 + } while (0);
3197 +#endif
3198 +
3199 +#ifdef CONFIG_PAX_EMUTRAMP
3200 +
3201 +#ifndef CONFIG_PAX_EMUSIGRT
3202 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3203 + return 1;
3204 +#endif
3205 +
3206 + do { /* PaX: rt_sigreturn emulation */
3207 + unsigned int ldi1, ldi2, bel, nop;
3208 +
3209 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3210 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3211 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3212 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3213 +
3214 + if (err)
3215 + break;
3216 +
3217 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3218 + ldi2 == 0x3414015AU &&
3219 + bel == 0xE4008200U &&
3220 + nop == 0x08000240U)
3221 + {
3222 + regs->gr[25] = (ldi1 & 2) >> 1;
3223 + regs->gr[20] = __NR_rt_sigreturn;
3224 + regs->gr[31] = regs->iaoq[1] + 16;
3225 + regs->sr[0] = regs->iasq[1];
3226 + regs->iaoq[0] = 0x100UL;
3227 + regs->iaoq[1] = regs->iaoq[0] + 4;
3228 + regs->iasq[0] = regs->sr[2];
3229 + regs->iasq[1] = regs->sr[2];
3230 + return 2;
3231 + }
3232 + } while (0);
3233 +#endif
3234 +
3235 + return 1;
3236 +}
3237 +
3238 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3239 +{
3240 + unsigned long i;
3241 +
3242 + printk(KERN_ERR "PAX: bytes at PC: ");
3243 + for (i = 0; i < 5; i++) {
3244 + unsigned int c;
3245 + if (get_user(c, (unsigned int *)pc+i))
3246 + printk(KERN_CONT "???????? ");
3247 + else
3248 + printk(KERN_CONT "%08x ", c);
3249 + }
3250 + printk("\n");
3251 +}
3252 +#endif
3253 +
3254 int fixup_exception(struct pt_regs *regs)
3255 {
3256 const struct exception_table_entry *fix;
3257 @@ -192,8 +303,33 @@ good_area:
3258
3259 acc_type = parisc_acctyp(code,regs->iir);
3260
3261 - if ((vma->vm_flags & acc_type) != acc_type)
3262 + if ((vma->vm_flags & acc_type) != acc_type) {
3263 +
3264 +#ifdef CONFIG_PAX_PAGEEXEC
3265 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3266 + (address & ~3UL) == instruction_pointer(regs))
3267 + {
3268 + up_read(&mm->mmap_sem);
3269 + switch (pax_handle_fetch_fault(regs)) {
3270 +
3271 +#ifdef CONFIG_PAX_EMUPLT
3272 + case 3:
3273 + return;
3274 +#endif
3275 +
3276 +#ifdef CONFIG_PAX_EMUTRAMP
3277 + case 2:
3278 + return;
3279 +#endif
3280 +
3281 + }
3282 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3283 + do_group_exit(SIGKILL);
3284 + }
3285 +#endif
3286 +
3287 goto bad_area;
3288 + }
3289
3290 /*
3291 * If for any reason at all we couldn't handle the fault, make
3292 diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
3293 index 02e41b5..ec6e26c 100644
3294 --- a/arch/powerpc/include/asm/atomic.h
3295 +++ b/arch/powerpc/include/asm/atomic.h
3296 @@ -469,6 +469,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3297
3298 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3299
3300 +#define atomic64_read_unchecked(v) atomic64_read(v)
3301 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3302 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3303 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3304 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3305 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3306 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3307 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3308 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3309 +
3310 #endif /* __powerpc64__ */
3311
3312 #endif /* __KERNEL__ */
3313 diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3314 index 4b50941..5605819 100644
3315 --- a/arch/powerpc/include/asm/cache.h
3316 +++ b/arch/powerpc/include/asm/cache.h
3317 @@ -3,6 +3,7 @@
3318
3319 #ifdef __KERNEL__
3320
3321 +#include <linux/const.h>
3322
3323 /* bytes per L1 cache line */
3324 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3325 @@ -22,7 +23,7 @@
3326 #define L1_CACHE_SHIFT 7
3327 #endif
3328
3329 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3330 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3331
3332 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3333
3334 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3335 index 3bf9cca..e7457d0 100644
3336 --- a/arch/powerpc/include/asm/elf.h
3337 +++ b/arch/powerpc/include/asm/elf.h
3338 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3339 the loader. We need to make sure that it is out of the way of the program
3340 that it will "exec", and that there is sufficient room for the brk. */
3341
3342 -extern unsigned long randomize_et_dyn(unsigned long base);
3343 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3344 +#define ELF_ET_DYN_BASE (0x20000000)
3345 +
3346 +#ifdef CONFIG_PAX_ASLR
3347 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3348 +
3349 +#ifdef __powerpc64__
3350 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
3351 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
3352 +#else
3353 +#define PAX_DELTA_MMAP_LEN 15
3354 +#define PAX_DELTA_STACK_LEN 15
3355 +#endif
3356 +#endif
3357
3358 /*
3359 * Our registers are always unsigned longs, whether we're a 32 bit
3360 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3361 (0x7ff >> (PAGE_SHIFT - 12)) : \
3362 (0x3ffff >> (PAGE_SHIFT - 12)))
3363
3364 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3365 -#define arch_randomize_brk arch_randomize_brk
3366 -
3367 #endif /* __KERNEL__ */
3368
3369 /*
3370 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3371 index bca8fdc..61e9580 100644
3372 --- a/arch/powerpc/include/asm/kmap_types.h
3373 +++ b/arch/powerpc/include/asm/kmap_types.h
3374 @@ -27,6 +27,7 @@ enum km_type {
3375 KM_PPC_SYNC_PAGE,
3376 KM_PPC_SYNC_ICACHE,
3377 KM_KDB,
3378 + KM_CLEARPAGE,
3379 KM_TYPE_NR
3380 };
3381
3382 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
3383 index d4a7f64..451de1c 100644
3384 --- a/arch/powerpc/include/asm/mman.h
3385 +++ b/arch/powerpc/include/asm/mman.h
3386 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
3387 }
3388 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
3389
3390 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
3391 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
3392 {
3393 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
3394 }
3395 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3396 index dd9c4fd..a2ced87 100644
3397 --- a/arch/powerpc/include/asm/page.h
3398 +++ b/arch/powerpc/include/asm/page.h
3399 @@ -141,8 +141,9 @@ extern phys_addr_t kernstart_addr;
3400 * and needs to be executable. This means the whole heap ends
3401 * up being executable.
3402 */
3403 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3404 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3405 +#define VM_DATA_DEFAULT_FLAGS32 \
3406 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3407 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3408
3409 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3410 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3411 @@ -170,6 +171,9 @@ extern phys_addr_t kernstart_addr;
3412 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3413 #endif
3414
3415 +#define ktla_ktva(addr) (addr)
3416 +#define ktva_ktla(addr) (addr)
3417 +
3418 /*
3419 * Use the top bit of the higher-level page table entries to indicate whether
3420 * the entries we point to contain hugepages. This works because we know that
3421 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3422 index fb40ede..d3ce956 100644
3423 --- a/arch/powerpc/include/asm/page_64.h
3424 +++ b/arch/powerpc/include/asm/page_64.h
3425 @@ -144,15 +144,18 @@ do { \
3426 * stack by default, so in the absence of a PT_GNU_STACK program header
3427 * we turn execute permission off.
3428 */
3429 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3430 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3431 +#define VM_STACK_DEFAULT_FLAGS32 \
3432 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3433 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3434
3435 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3436 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3437
3438 +#ifndef CONFIG_PAX_PAGEEXEC
3439 #define VM_STACK_DEFAULT_FLAGS \
3440 (is_32bit_task() ? \
3441 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3442 +#endif
3443
3444 #include <asm-generic/getorder.h>
3445
3446 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3447 index 88b0bd9..e32bc67 100644
3448 --- a/arch/powerpc/include/asm/pgtable.h
3449 +++ b/arch/powerpc/include/asm/pgtable.h
3450 @@ -2,6 +2,7 @@
3451 #define _ASM_POWERPC_PGTABLE_H
3452 #ifdef __KERNEL__
3453
3454 +#include <linux/const.h>
3455 #ifndef __ASSEMBLY__
3456 #include <asm/processor.h> /* For TASK_SIZE */
3457 #include <asm/mmu.h>
3458 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3459 index 4aad413..85d86bf 100644
3460 --- a/arch/powerpc/include/asm/pte-hash32.h
3461 +++ b/arch/powerpc/include/asm/pte-hash32.h
3462 @@ -21,6 +21,7 @@
3463 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3464 #define _PAGE_USER 0x004 /* usermode access allowed */
3465 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3466 +#define _PAGE_EXEC _PAGE_GUARDED
3467 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3468 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3469 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3470 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3471 index 559da19..7e5835c 100644
3472 --- a/arch/powerpc/include/asm/reg.h
3473 +++ b/arch/powerpc/include/asm/reg.h
3474 @@ -212,6 +212,7 @@
3475 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3476 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3477 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3478 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3479 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3480 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3481 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3482 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
3483 index e30a13d..2b7d994 100644
3484 --- a/arch/powerpc/include/asm/system.h
3485 +++ b/arch/powerpc/include/asm/system.h
3486 @@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
3487 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
3488 #endif
3489
3490 -extern unsigned long arch_align_stack(unsigned long sp);
3491 +#define arch_align_stack(x) ((x) & ~0xfUL)
3492
3493 /* Used in very early kernel initialization. */
3494 extern unsigned long reloc_offset(void);
3495 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3496 index bd0fb84..a42a14b 100644
3497 --- a/arch/powerpc/include/asm/uaccess.h
3498 +++ b/arch/powerpc/include/asm/uaccess.h
3499 @@ -13,6 +13,8 @@
3500 #define VERIFY_READ 0
3501 #define VERIFY_WRITE 1
3502
3503 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3504 +
3505 /*
3506 * The fs value determines whether argument validity checking should be
3507 * performed or not. If get_fs() == USER_DS, checking is performed, with
3508 @@ -327,52 +329,6 @@ do { \
3509 extern unsigned long __copy_tofrom_user(void __user *to,
3510 const void __user *from, unsigned long size);
3511
3512 -#ifndef __powerpc64__
3513 -
3514 -static inline unsigned long copy_from_user(void *to,
3515 - const void __user *from, unsigned long n)
3516 -{
3517 - unsigned long over;
3518 -
3519 - if (access_ok(VERIFY_READ, from, n))
3520 - return __copy_tofrom_user((__force void __user *)to, from, n);
3521 - if ((unsigned long)from < TASK_SIZE) {
3522 - over = (unsigned long)from + n - TASK_SIZE;
3523 - return __copy_tofrom_user((__force void __user *)to, from,
3524 - n - over) + over;
3525 - }
3526 - return n;
3527 -}
3528 -
3529 -static inline unsigned long copy_to_user(void __user *to,
3530 - const void *from, unsigned long n)
3531 -{
3532 - unsigned long over;
3533 -
3534 - if (access_ok(VERIFY_WRITE, to, n))
3535 - return __copy_tofrom_user(to, (__force void __user *)from, n);
3536 - if ((unsigned long)to < TASK_SIZE) {
3537 - over = (unsigned long)to + n - TASK_SIZE;
3538 - return __copy_tofrom_user(to, (__force void __user *)from,
3539 - n - over) + over;
3540 - }
3541 - return n;
3542 -}
3543 -
3544 -#else /* __powerpc64__ */
3545 -
3546 -#define __copy_in_user(to, from, size) \
3547 - __copy_tofrom_user((to), (from), (size))
3548 -
3549 -extern unsigned long copy_from_user(void *to, const void __user *from,
3550 - unsigned long n);
3551 -extern unsigned long copy_to_user(void __user *to, const void *from,
3552 - unsigned long n);
3553 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
3554 - unsigned long n);
3555 -
3556 -#endif /* __powerpc64__ */
3557 -
3558 static inline unsigned long __copy_from_user_inatomic(void *to,
3559 const void __user *from, unsigned long n)
3560 {
3561 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3562 if (ret == 0)
3563 return 0;
3564 }
3565 +
3566 + if (!__builtin_constant_p(n))
3567 + check_object_size(to, n, false);
3568 +
3569 return __copy_tofrom_user((__force void __user *)to, from, n);
3570 }
3571
3572 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
3573 if (ret == 0)
3574 return 0;
3575 }
3576 +
3577 + if (!__builtin_constant_p(n))
3578 + check_object_size(from, n, true);
3579 +
3580 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3581 }
3582
3583 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
3584 return __copy_to_user_inatomic(to, from, size);
3585 }
3586
3587 +#ifndef __powerpc64__
3588 +
3589 +static inline unsigned long __must_check copy_from_user(void *to,
3590 + const void __user *from, unsigned long n)
3591 +{
3592 + unsigned long over;
3593 +
3594 + if ((long)n < 0)
3595 + return n;
3596 +
3597 + if (access_ok(VERIFY_READ, from, n)) {
3598 + if (!__builtin_constant_p(n))
3599 + check_object_size(to, n, false);
3600 + return __copy_tofrom_user((__force void __user *)to, from, n);
3601 + }
3602 + if ((unsigned long)from < TASK_SIZE) {
3603 + over = (unsigned long)from + n - TASK_SIZE;
3604 + if (!__builtin_constant_p(n - over))
3605 + check_object_size(to, n - over, false);
3606 + return __copy_tofrom_user((__force void __user *)to, from,
3607 + n - over) + over;
3608 + }
3609 + return n;
3610 +}
3611 +
3612 +static inline unsigned long __must_check copy_to_user(void __user *to,
3613 + const void *from, unsigned long n)
3614 +{
3615 + unsigned long over;
3616 +
3617 + if ((long)n < 0)
3618 + return n;
3619 +
3620 + if (access_ok(VERIFY_WRITE, to, n)) {
3621 + if (!__builtin_constant_p(n))
3622 + check_object_size(from, n, true);
3623 + return __copy_tofrom_user(to, (__force void __user *)from, n);
3624 + }
3625 + if ((unsigned long)to < TASK_SIZE) {
3626 + over = (unsigned long)to + n - TASK_SIZE;
3627 + if (!__builtin_constant_p(n))
3628 + check_object_size(from, n - over, true);
3629 + return __copy_tofrom_user(to, (__force void __user *)from,
3630 + n - over) + over;
3631 + }
3632 + return n;
3633 +}
3634 +
3635 +#else /* __powerpc64__ */
3636 +
3637 +#define __copy_in_user(to, from, size) \
3638 + __copy_tofrom_user((to), (from), (size))
3639 +
3640 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
3641 +{
3642 + if ((long)n < 0 || n > INT_MAX)
3643 + return n;
3644 +
3645 + if (!__builtin_constant_p(n))
3646 + check_object_size(to, n, false);
3647 +
3648 + if (likely(access_ok(VERIFY_READ, from, n)))
3649 + n = __copy_from_user(to, from, n);
3650 + else
3651 + memset(to, 0, n);
3652 + return n;
3653 +}
3654 +
3655 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
3656 +{
3657 + if ((long)n < 0 || n > INT_MAX)
3658 + return n;
3659 +
3660 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
3661 + if (!__builtin_constant_p(n))
3662 + check_object_size(from, n, true);
3663 + n = __copy_to_user(to, from, n);
3664 + }
3665 + return n;
3666 +}
3667 +
3668 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
3669 + unsigned long n);
3670 +
3671 +#endif /* __powerpc64__ */
3672 +
3673 extern unsigned long __clear_user(void __user *addr, unsigned long size);
3674
3675 static inline unsigned long clear_user(void __user *addr, unsigned long size)
3676 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3677 index 429983c..7af363b 100644
3678 --- a/arch/powerpc/kernel/exceptions-64e.S
3679 +++ b/arch/powerpc/kernel/exceptions-64e.S
3680 @@ -587,6 +587,7 @@ storage_fault_common:
3681 std r14,_DAR(r1)
3682 std r15,_DSISR(r1)
3683 addi r3,r1,STACK_FRAME_OVERHEAD
3684 + bl .save_nvgprs
3685 mr r4,r14
3686 mr r5,r15
3687 ld r14,PACA_EXGEN+EX_R14(r13)
3688 @@ -596,8 +597,7 @@ storage_fault_common:
3689 cmpdi r3,0
3690 bne- 1f
3691 b .ret_from_except_lite
3692 -1: bl .save_nvgprs
3693 - mr r5,r3
3694 +1: mr r5,r3
3695 addi r3,r1,STACK_FRAME_OVERHEAD
3696 ld r4,_DAR(r1)
3697 bl .bad_page_fault
3698 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3699 index cf9c69b..ebc9640 100644
3700 --- a/arch/powerpc/kernel/exceptions-64s.S
3701 +++ b/arch/powerpc/kernel/exceptions-64s.S
3702 @@ -1004,10 +1004,10 @@ handle_page_fault:
3703 11: ld r4,_DAR(r1)
3704 ld r5,_DSISR(r1)
3705 addi r3,r1,STACK_FRAME_OVERHEAD
3706 + bl .save_nvgprs
3707 bl .do_page_fault
3708 cmpdi r3,0
3709 beq+ 13f
3710 - bl .save_nvgprs
3711 mr r5,r3
3712 addi r3,r1,STACK_FRAME_OVERHEAD
3713 lwz r4,_DAR(r1)
3714 diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
3715 index 745c1e7..59d97a6 100644
3716 --- a/arch/powerpc/kernel/irq.c
3717 +++ b/arch/powerpc/kernel/irq.c
3718 @@ -547,9 +547,6 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
3719 host->ops = ops;
3720 host->of_node = of_node_get(of_node);
3721
3722 - if (host->ops->match == NULL)
3723 - host->ops->match = default_irq_host_match;
3724 -
3725 raw_spin_lock_irqsave(&irq_big_lock, flags);
3726
3727 /* If it's a legacy controller, check for duplicates and
3728 @@ -622,7 +619,12 @@ struct irq_host *irq_find_host(struct device_node *node)
3729 */
3730 raw_spin_lock_irqsave(&irq_big_lock, flags);
3731 list_for_each_entry(h, &irq_hosts, link)
3732 - if (h->ops->match(h, node)) {
3733 + if (h->ops->match) {
3734 + if (h->ops->match(h, node)) {
3735 + found = h;
3736 + break;
3737 + }
3738 + } else if (default_irq_host_match(h, node)) {
3739 found = h;
3740 break;
3741 }
3742 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
3743 index 0b6d796..d760ddb 100644
3744 --- a/arch/powerpc/kernel/module_32.c
3745 +++ b/arch/powerpc/kernel/module_32.c
3746 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
3747 me->arch.core_plt_section = i;
3748 }
3749 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
3750 - printk("Module doesn't contain .plt or .init.plt sections.\n");
3751 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
3752 return -ENOEXEC;
3753 }
3754
3755 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
3756
3757 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
3758 /* Init, or core PLT? */
3759 - if (location >= mod->module_core
3760 - && location < mod->module_core + mod->core_size)
3761 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
3762 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
3763 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
3764 - else
3765 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
3766 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
3767 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
3768 + else {
3769 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
3770 + return ~0UL;
3771 + }
3772
3773 /* Find this entry, or if that fails, the next avail. entry */
3774 while (entry->jump[0]) {
3775 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
3776 index 6457574..08b28d3 100644
3777 --- a/arch/powerpc/kernel/process.c
3778 +++ b/arch/powerpc/kernel/process.c
3779 @@ -660,8 +660,8 @@ void show_regs(struct pt_regs * regs)
3780 * Lookup NIP late so we have the best change of getting the
3781 * above info out without failing
3782 */
3783 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
3784 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
3785 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
3786 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
3787 #endif
3788 show_stack(current, (unsigned long *) regs->gpr[1]);
3789 if (!user_mode(regs))
3790 @@ -1165,10 +1165,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3791 newsp = stack[0];
3792 ip = stack[STACK_FRAME_LR_SAVE];
3793 if (!firstframe || ip != lr) {
3794 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
3795 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
3796 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3797 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
3798 - printk(" (%pS)",
3799 + printk(" (%pA)",
3800 (void *)current->ret_stack[curr_frame].ret);
3801 curr_frame--;
3802 }
3803 @@ -1188,7 +1188,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3804 struct pt_regs *regs = (struct pt_regs *)
3805 (sp + STACK_FRAME_OVERHEAD);
3806 lr = regs->link;
3807 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
3808 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
3809 regs->trap, (void *)regs->nip, (void *)lr);
3810 firstframe = 1;
3811 }
3812 @@ -1263,58 +1263,3 @@ void thread_info_cache_init(void)
3813 }
3814
3815 #endif /* THREAD_SHIFT < PAGE_SHIFT */
3816 -
3817 -unsigned long arch_align_stack(unsigned long sp)
3818 -{
3819 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3820 - sp -= get_random_int() & ~PAGE_MASK;
3821 - return sp & ~0xf;
3822 -}
3823 -
3824 -static inline unsigned long brk_rnd(void)
3825 -{
3826 - unsigned long rnd = 0;
3827 -
3828 - /* 8MB for 32bit, 1GB for 64bit */
3829 - if (is_32bit_task())
3830 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
3831 - else
3832 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
3833 -
3834 - return rnd << PAGE_SHIFT;
3835 -}
3836 -
3837 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3838 -{
3839 - unsigned long base = mm->brk;
3840 - unsigned long ret;
3841 -
3842 -#ifdef CONFIG_PPC_STD_MMU_64
3843 - /*
3844 - * If we are using 1TB segments and we are allowed to randomise
3845 - * the heap, we can put it above 1TB so it is backed by a 1TB
3846 - * segment. Otherwise the heap will be in the bottom 1TB
3847 - * which always uses 256MB segments and this may result in a
3848 - * performance penalty.
3849 - */
3850 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
3851 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
3852 -#endif
3853 -
3854 - ret = PAGE_ALIGN(base + brk_rnd());
3855 -
3856 - if (ret < mm->brk)
3857 - return mm->brk;
3858 -
3859 - return ret;
3860 -}
3861 -
3862 -unsigned long randomize_et_dyn(unsigned long base)
3863 -{
3864 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3865 -
3866 - if (ret < base)
3867 - return base;
3868 -
3869 - return ret;
3870 -}
3871 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
3872 index 836a5a1..27289a3 100644
3873 --- a/arch/powerpc/kernel/signal_32.c
3874 +++ b/arch/powerpc/kernel/signal_32.c
3875 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
3876 /* Save user registers on the stack */
3877 frame = &rt_sf->uc.uc_mcontext;
3878 addr = frame;
3879 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
3880 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3881 if (save_user_regs(regs, frame, 0, 1))
3882 goto badframe;
3883 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
3884 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
3885 index a50b5ec..547078a 100644
3886 --- a/arch/powerpc/kernel/signal_64.c
3887 +++ b/arch/powerpc/kernel/signal_64.c
3888 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
3889 current->thread.fpscr.val = 0;
3890
3891 /* Set up to return from userspace. */
3892 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
3893 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3894 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
3895 } else {
3896 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
3897 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
3898 index 5459d14..10f8070 100644
3899 --- a/arch/powerpc/kernel/traps.c
3900 +++ b/arch/powerpc/kernel/traps.c
3901 @@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
3902 static inline void pmac_backlight_unblank(void) { }
3903 #endif
3904
3905 +extern void gr_handle_kernel_exploit(void);
3906 +
3907 int die(const char *str, struct pt_regs *regs, long err)
3908 {
3909 static struct {
3910 @@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err)
3911 if (panic_on_oops)
3912 panic("Fatal exception");
3913
3914 + gr_handle_kernel_exploit();
3915 +
3916 oops_exit();
3917 do_exit(err);
3918
3919 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
3920 index 7d14bb6..1305601 100644
3921 --- a/arch/powerpc/kernel/vdso.c
3922 +++ b/arch/powerpc/kernel/vdso.c
3923 @@ -35,6 +35,7 @@
3924 #include <asm/firmware.h>
3925 #include <asm/vdso.h>
3926 #include <asm/vdso_datapage.h>
3927 +#include <asm/mman.h>
3928
3929 #include "setup.h"
3930
3931 @@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3932 vdso_base = VDSO32_MBASE;
3933 #endif
3934
3935 - current->mm->context.vdso_base = 0;
3936 + current->mm->context.vdso_base = ~0UL;
3937
3938 /* vDSO has a problem and was disabled, just don't "enable" it for the
3939 * process
3940 @@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3941 vdso_base = get_unmapped_area(NULL, vdso_base,
3942 (vdso_pages << PAGE_SHIFT) +
3943 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
3944 - 0, 0);
3945 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
3946 if (IS_ERR_VALUE(vdso_base)) {
3947 rc = vdso_base;
3948 goto fail_mmapsem;
3949 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
3950 index 5eea6f3..5d10396 100644
3951 --- a/arch/powerpc/lib/usercopy_64.c
3952 +++ b/arch/powerpc/lib/usercopy_64.c
3953 @@ -9,22 +9,6 @@
3954 #include <linux/module.h>
3955 #include <asm/uaccess.h>
3956
3957 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3958 -{
3959 - if (likely(access_ok(VERIFY_READ, from, n)))
3960 - n = __copy_from_user(to, from, n);
3961 - else
3962 - memset(to, 0, n);
3963 - return n;
3964 -}
3965 -
3966 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3967 -{
3968 - if (likely(access_ok(VERIFY_WRITE, to, n)))
3969 - n = __copy_to_user(to, from, n);
3970 - return n;
3971 -}
3972 -
3973 unsigned long copy_in_user(void __user *to, const void __user *from,
3974 unsigned long n)
3975 {
3976 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
3977 return n;
3978 }
3979
3980 -EXPORT_SYMBOL(copy_from_user);
3981 -EXPORT_SYMBOL(copy_to_user);
3982 EXPORT_SYMBOL(copy_in_user);
3983
3984 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
3985 index 5efe8c9..db9ceef 100644
3986 --- a/arch/powerpc/mm/fault.c
3987 +++ b/arch/powerpc/mm/fault.c
3988 @@ -32,6 +32,10 @@
3989 #include <linux/perf_event.h>
3990 #include <linux/magic.h>
3991 #include <linux/ratelimit.h>
3992 +#include <linux/slab.h>
3993 +#include <linux/pagemap.h>
3994 +#include <linux/compiler.h>
3995 +#include <linux/unistd.h>
3996
3997 #include <asm/firmware.h>
3998 #include <asm/page.h>
3999 @@ -43,6 +47,7 @@
4000 #include <asm/tlbflush.h>
4001 #include <asm/siginfo.h>
4002 #include <mm/mmu_decl.h>
4003 +#include <asm/ptrace.h>
4004
4005 #ifdef CONFIG_KPROBES
4006 static inline int notify_page_fault(struct pt_regs *regs)
4007 @@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4008 }
4009 #endif
4010
4011 +#ifdef CONFIG_PAX_PAGEEXEC
4012 +/*
4013 + * PaX: decide what to do with offenders (regs->nip = fault address)
4014 + *
4015 + * returns 1 when task should be killed
4016 + */
4017 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4018 +{
4019 + return 1;
4020 +}
4021 +
4022 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4023 +{
4024 + unsigned long i;
4025 +
4026 + printk(KERN_ERR "PAX: bytes at PC: ");
4027 + for (i = 0; i < 5; i++) {
4028 + unsigned int c;
4029 + if (get_user(c, (unsigned int __user *)pc+i))
4030 + printk(KERN_CONT "???????? ");
4031 + else
4032 + printk(KERN_CONT "%08x ", c);
4033 + }
4034 + printk("\n");
4035 +}
4036 +#endif
4037 +
4038 /*
4039 * Check whether the instruction at regs->nip is a store using
4040 * an update addressing form which will update r1.
4041 @@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4042 * indicate errors in DSISR but can validly be set in SRR1.
4043 */
4044 if (trap == 0x400)
4045 - error_code &= 0x48200000;
4046 + error_code &= 0x58200000;
4047 else
4048 is_write = error_code & DSISR_ISSTORE;
4049 #else
4050 @@ -259,7 +291,7 @@ good_area:
4051 * "undefined". Of those that can be set, this is the only
4052 * one which seems bad.
4053 */
4054 - if (error_code & 0x10000000)
4055 + if (error_code & DSISR_GUARDED)
4056 /* Guarded storage error. */
4057 goto bad_area;
4058 #endif /* CONFIG_8xx */
4059 @@ -274,7 +306,7 @@ good_area:
4060 * processors use the same I/D cache coherency mechanism
4061 * as embedded.
4062 */
4063 - if (error_code & DSISR_PROTFAULT)
4064 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4065 goto bad_area;
4066 #endif /* CONFIG_PPC_STD_MMU */
4067
4068 @@ -343,6 +375,23 @@ bad_area:
4069 bad_area_nosemaphore:
4070 /* User mode accesses cause a SIGSEGV */
4071 if (user_mode(regs)) {
4072 +
4073 +#ifdef CONFIG_PAX_PAGEEXEC
4074 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4075 +#ifdef CONFIG_PPC_STD_MMU
4076 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4077 +#else
4078 + if (is_exec && regs->nip == address) {
4079 +#endif
4080 + switch (pax_handle_fetch_fault(regs)) {
4081 + }
4082 +
4083 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4084 + do_group_exit(SIGKILL);
4085 + }
4086 + }
4087 +#endif
4088 +
4089 _exception(SIGSEGV, regs, code, address);
4090 return 0;
4091 }
4092 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4093 index 5a783d8..c23e14b 100644
4094 --- a/arch/powerpc/mm/mmap_64.c
4095 +++ b/arch/powerpc/mm/mmap_64.c
4096 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4097 */
4098 if (mmap_is_legacy()) {
4099 mm->mmap_base = TASK_UNMAPPED_BASE;
4100 +
4101 +#ifdef CONFIG_PAX_RANDMMAP
4102 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4103 + mm->mmap_base += mm->delta_mmap;
4104 +#endif
4105 +
4106 mm->get_unmapped_area = arch_get_unmapped_area;
4107 mm->unmap_area = arch_unmap_area;
4108 } else {
4109 mm->mmap_base = mmap_base();
4110 +
4111 +#ifdef CONFIG_PAX_RANDMMAP
4112 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4113 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4114 +#endif
4115 +
4116 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4117 mm->unmap_area = arch_unmap_area_topdown;
4118 }
4119 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4120 index 73709f7..6b90313 100644
4121 --- a/arch/powerpc/mm/slice.c
4122 +++ b/arch/powerpc/mm/slice.c
4123 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4124 if ((mm->task_size - len) < addr)
4125 return 0;
4126 vma = find_vma(mm, addr);
4127 - return (!vma || (addr + len) <= vma->vm_start);
4128 + return check_heap_stack_gap(vma, addr, len);
4129 }
4130
4131 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4132 @@ -256,7 +256,7 @@ full_search:
4133 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4134 continue;
4135 }
4136 - if (!vma || addr + len <= vma->vm_start) {
4137 + if (check_heap_stack_gap(vma, addr, len)) {
4138 /*
4139 * Remember the place where we stopped the search:
4140 */
4141 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4142 }
4143 }
4144
4145 - addr = mm->mmap_base;
4146 - while (addr > len) {
4147 + if (mm->mmap_base < len)
4148 + addr = -ENOMEM;
4149 + else
4150 + addr = mm->mmap_base - len;
4151 +
4152 + while (!IS_ERR_VALUE(addr)) {
4153 /* Go down by chunk size */
4154 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4155 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
4156
4157 /* Check for hit with different page size */
4158 mask = slice_range_to_mask(addr, len);
4159 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4160 * return with success:
4161 */
4162 vma = find_vma(mm, addr);
4163 - if (!vma || (addr + len) <= vma->vm_start) {
4164 + if (check_heap_stack_gap(vma, addr, len)) {
4165 /* remember the address as a hint for next time */
4166 if (use_cache)
4167 mm->free_area_cache = addr;
4168 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4169 mm->cached_hole_size = vma->vm_start - addr;
4170
4171 /* try just below the current vma->vm_start */
4172 - addr = vma->vm_start;
4173 + addr = skip_heap_stack_gap(vma, len);
4174 }
4175
4176 /*
4177 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4178 if (fixed && addr > (mm->task_size - len))
4179 return -EINVAL;
4180
4181 +#ifdef CONFIG_PAX_RANDMMAP
4182 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4183 + addr = 0;
4184 +#endif
4185 +
4186 /* If hint, make sure it matches our alignment restrictions */
4187 if (!fixed && addr) {
4188 addr = _ALIGN_UP(addr, 1ul << pshift);
4189 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4190 index 8517d2a..d2738d4 100644
4191 --- a/arch/s390/include/asm/atomic.h
4192 +++ b/arch/s390/include/asm/atomic.h
4193 @@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
4194 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4195 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4196
4197 +#define atomic64_read_unchecked(v) atomic64_read(v)
4198 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4199 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4200 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4201 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4202 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4203 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4204 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4205 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4206 +
4207 #define smp_mb__before_atomic_dec() smp_mb()
4208 #define smp_mb__after_atomic_dec() smp_mb()
4209 #define smp_mb__before_atomic_inc() smp_mb()
4210 diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4211 index 2a30d5a..5e5586f 100644
4212 --- a/arch/s390/include/asm/cache.h
4213 +++ b/arch/s390/include/asm/cache.h
4214 @@ -11,8 +11,10 @@
4215 #ifndef __ARCH_S390_CACHE_H
4216 #define __ARCH_S390_CACHE_H
4217
4218 -#define L1_CACHE_BYTES 256
4219 +#include <linux/const.h>
4220 +
4221 #define L1_CACHE_SHIFT 8
4222 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4223 #define NET_SKB_PAD 32
4224
4225 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4226 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4227 index 547f1a6..0b22b53 100644
4228 --- a/arch/s390/include/asm/elf.h
4229 +++ b/arch/s390/include/asm/elf.h
4230 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
4231 the loader. We need to make sure that it is out of the way of the program
4232 that it will "exec", and that there is sufficient room for the brk. */
4233
4234 -extern unsigned long randomize_et_dyn(unsigned long base);
4235 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
4236 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4237 +
4238 +#ifdef CONFIG_PAX_ASLR
4239 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4240 +
4241 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4242 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4243 +#endif
4244
4245 /* This yields a mask that user programs can use to figure out what
4246 instruction set this CPU supports. */
4247 @@ -211,7 +217,4 @@ struct linux_binprm;
4248 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
4249 int arch_setup_additional_pages(struct linux_binprm *, int);
4250
4251 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4252 -#define arch_randomize_brk arch_randomize_brk
4253 -
4254 #endif
4255 diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
4256 index ef573c1..75a1ce6 100644
4257 --- a/arch/s390/include/asm/system.h
4258 +++ b/arch/s390/include/asm/system.h
4259 @@ -262,7 +262,7 @@ extern void (*_machine_restart)(char *command);
4260 extern void (*_machine_halt)(void);
4261 extern void (*_machine_power_off)(void);
4262
4263 -extern unsigned long arch_align_stack(unsigned long sp);
4264 +#define arch_align_stack(x) ((x) & ~0xfUL)
4265
4266 static inline int tprot(unsigned long addr)
4267 {
4268 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4269 index 2b23885..e136e31 100644
4270 --- a/arch/s390/include/asm/uaccess.h
4271 +++ b/arch/s390/include/asm/uaccess.h
4272 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
4273 copy_to_user(void __user *to, const void *from, unsigned long n)
4274 {
4275 might_fault();
4276 +
4277 + if ((long)n < 0)
4278 + return n;
4279 +
4280 if (access_ok(VERIFY_WRITE, to, n))
4281 n = __copy_to_user(to, from, n);
4282 return n;
4283 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4284 static inline unsigned long __must_check
4285 __copy_from_user(void *to, const void __user *from, unsigned long n)
4286 {
4287 + if ((long)n < 0)
4288 + return n;
4289 +
4290 if (__builtin_constant_p(n) && (n <= 256))
4291 return uaccess.copy_from_user_small(n, from, to);
4292 else
4293 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
4294 unsigned int sz = __compiletime_object_size(to);
4295
4296 might_fault();
4297 +
4298 + if ((long)n < 0)
4299 + return n;
4300 +
4301 if (unlikely(sz != -1 && sz < n)) {
4302 copy_from_user_overflow();
4303 return n;
4304 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4305 index dfcb343..eda788a 100644
4306 --- a/arch/s390/kernel/module.c
4307 +++ b/arch/s390/kernel/module.c
4308 @@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4309
4310 /* Increase core size by size of got & plt and set start
4311 offsets for got and plt. */
4312 - me->core_size = ALIGN(me->core_size, 4);
4313 - me->arch.got_offset = me->core_size;
4314 - me->core_size += me->arch.got_size;
4315 - me->arch.plt_offset = me->core_size;
4316 - me->core_size += me->arch.plt_size;
4317 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
4318 + me->arch.got_offset = me->core_size_rw;
4319 + me->core_size_rw += me->arch.got_size;
4320 + me->arch.plt_offset = me->core_size_rx;
4321 + me->core_size_rx += me->arch.plt_size;
4322 return 0;
4323 }
4324
4325 @@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4326 if (info->got_initialized == 0) {
4327 Elf_Addr *gotent;
4328
4329 - gotent = me->module_core + me->arch.got_offset +
4330 + gotent = me->module_core_rw + me->arch.got_offset +
4331 info->got_offset;
4332 *gotent = val;
4333 info->got_initialized = 1;
4334 @@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4335 else if (r_type == R_390_GOTENT ||
4336 r_type == R_390_GOTPLTENT)
4337 *(unsigned int *) loc =
4338 - (val + (Elf_Addr) me->module_core - loc) >> 1;
4339 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4340 else if (r_type == R_390_GOT64 ||
4341 r_type == R_390_GOTPLT64)
4342 *(unsigned long *) loc = val;
4343 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4344 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4345 if (info->plt_initialized == 0) {
4346 unsigned int *ip;
4347 - ip = me->module_core + me->arch.plt_offset +
4348 + ip = me->module_core_rx + me->arch.plt_offset +
4349 info->plt_offset;
4350 #ifndef CONFIG_64BIT
4351 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4352 @@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4353 val - loc + 0xffffUL < 0x1ffffeUL) ||
4354 (r_type == R_390_PLT32DBL &&
4355 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4356 - val = (Elf_Addr) me->module_core +
4357 + val = (Elf_Addr) me->module_core_rx +
4358 me->arch.plt_offset +
4359 info->plt_offset;
4360 val += rela->r_addend - loc;
4361 @@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4362 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4363 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4364 val = val + rela->r_addend -
4365 - ((Elf_Addr) me->module_core + me->arch.got_offset);
4366 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4367 if (r_type == R_390_GOTOFF16)
4368 *(unsigned short *) loc = val;
4369 else if (r_type == R_390_GOTOFF32)
4370 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4371 break;
4372 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4373 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4374 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
4375 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4376 rela->r_addend - loc;
4377 if (r_type == R_390_GOTPC)
4378 *(unsigned int *) loc = val;
4379 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
4380 index 53088e2..9f44a36 100644
4381 --- a/arch/s390/kernel/process.c
4382 +++ b/arch/s390/kernel/process.c
4383 @@ -320,39 +320,3 @@ unsigned long get_wchan(struct task_struct *p)
4384 }
4385 return 0;
4386 }
4387 -
4388 -unsigned long arch_align_stack(unsigned long sp)
4389 -{
4390 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4391 - sp -= get_random_int() & ~PAGE_MASK;
4392 - return sp & ~0xf;
4393 -}
4394 -
4395 -static inline unsigned long brk_rnd(void)
4396 -{
4397 - /* 8MB for 32bit, 1GB for 64bit */
4398 - if (is_32bit_task())
4399 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
4400 - else
4401 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
4402 -}
4403 -
4404 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4405 -{
4406 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
4407 -
4408 - if (ret < mm->brk)
4409 - return mm->brk;
4410 - return ret;
4411 -}
4412 -
4413 -unsigned long randomize_et_dyn(unsigned long base)
4414 -{
4415 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4416 -
4417 - if (!(current->flags & PF_RANDOMIZE))
4418 - return base;
4419 - if (ret < base)
4420 - return base;
4421 - return ret;
4422 -}
4423 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4424 index a0155c0..34cc491 100644
4425 --- a/arch/s390/mm/mmap.c
4426 +++ b/arch/s390/mm/mmap.c
4427 @@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4428 */
4429 if (mmap_is_legacy()) {
4430 mm->mmap_base = TASK_UNMAPPED_BASE;
4431 +
4432 +#ifdef CONFIG_PAX_RANDMMAP
4433 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4434 + mm->mmap_base += mm->delta_mmap;
4435 +#endif
4436 +
4437 mm->get_unmapped_area = arch_get_unmapped_area;
4438 mm->unmap_area = arch_unmap_area;
4439 } else {
4440 mm->mmap_base = mmap_base();
4441 +
4442 +#ifdef CONFIG_PAX_RANDMMAP
4443 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4444 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4445 +#endif
4446 +
4447 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4448 mm->unmap_area = arch_unmap_area_topdown;
4449 }
4450 @@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4451 */
4452 if (mmap_is_legacy()) {
4453 mm->mmap_base = TASK_UNMAPPED_BASE;
4454 +
4455 +#ifdef CONFIG_PAX_RANDMMAP
4456 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4457 + mm->mmap_base += mm->delta_mmap;
4458 +#endif
4459 +
4460 mm->get_unmapped_area = s390_get_unmapped_area;
4461 mm->unmap_area = arch_unmap_area;
4462 } else {
4463 mm->mmap_base = mmap_base();
4464 +
4465 +#ifdef CONFIG_PAX_RANDMMAP
4466 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4467 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4468 +#endif
4469 +
4470 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4471 mm->unmap_area = arch_unmap_area_topdown;
4472 }
4473 diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
4474 index ae3d59f..f65f075 100644
4475 --- a/arch/score/include/asm/cache.h
4476 +++ b/arch/score/include/asm/cache.h
4477 @@ -1,7 +1,9 @@
4478 #ifndef _ASM_SCORE_CACHE_H
4479 #define _ASM_SCORE_CACHE_H
4480
4481 +#include <linux/const.h>
4482 +
4483 #define L1_CACHE_SHIFT 4
4484 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4485 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4486
4487 #endif /* _ASM_SCORE_CACHE_H */
4488 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4489 index 589d5c7..669e274 100644
4490 --- a/arch/score/include/asm/system.h
4491 +++ b/arch/score/include/asm/system.h
4492 @@ -17,7 +17,7 @@ do { \
4493 #define finish_arch_switch(prev) do {} while (0)
4494
4495 typedef void (*vi_handler_t)(void);
4496 -extern unsigned long arch_align_stack(unsigned long sp);
4497 +#define arch_align_stack(x) (x)
4498
4499 #define mb() barrier()
4500 #define rmb() barrier()
4501 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4502 index 25d0803..d6c8e36 100644
4503 --- a/arch/score/kernel/process.c
4504 +++ b/arch/score/kernel/process.c
4505 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4506
4507 return task_pt_regs(task)->cp0_epc;
4508 }
4509 -
4510 -unsigned long arch_align_stack(unsigned long sp)
4511 -{
4512 - return sp;
4513 -}
4514 diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
4515 index ef9e555..331bd29 100644
4516 --- a/arch/sh/include/asm/cache.h
4517 +++ b/arch/sh/include/asm/cache.h
4518 @@ -9,10 +9,11 @@
4519 #define __ASM_SH_CACHE_H
4520 #ifdef __KERNEL__
4521
4522 +#include <linux/const.h>
4523 #include <linux/init.h>
4524 #include <cpu/cache.h>
4525
4526 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4527 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4528
4529 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4530
4531 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4532 index afeb710..d1d1289 100644
4533 --- a/arch/sh/mm/mmap.c
4534 +++ b/arch/sh/mm/mmap.c
4535 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4536 addr = PAGE_ALIGN(addr);
4537
4538 vma = find_vma(mm, addr);
4539 - if (TASK_SIZE - len >= addr &&
4540 - (!vma || addr + len <= vma->vm_start))
4541 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4542 return addr;
4543 }
4544
4545 @@ -106,7 +105,7 @@ full_search:
4546 }
4547 return -ENOMEM;
4548 }
4549 - if (likely(!vma || addr + len <= vma->vm_start)) {
4550 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4551 /*
4552 * Remember the place where we stopped the search:
4553 */
4554 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4555 addr = PAGE_ALIGN(addr);
4556
4557 vma = find_vma(mm, addr);
4558 - if (TASK_SIZE - len >= addr &&
4559 - (!vma || addr + len <= vma->vm_start))
4560 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4561 return addr;
4562 }
4563
4564 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4565 /* make sure it can fit in the remaining address space */
4566 if (likely(addr > len)) {
4567 vma = find_vma(mm, addr-len);
4568 - if (!vma || addr <= vma->vm_start) {
4569 + if (check_heap_stack_gap(vma, addr - len, len)) {
4570 /* remember the address as a hint for next time */
4571 return (mm->free_area_cache = addr-len);
4572 }
4573 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4574 if (unlikely(mm->mmap_base < len))
4575 goto bottomup;
4576
4577 - addr = mm->mmap_base-len;
4578 - if (do_colour_align)
4579 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4580 + addr = mm->mmap_base - len;
4581
4582 do {
4583 + if (do_colour_align)
4584 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4585 /*
4586 * Lookup failure means no vma is above this address,
4587 * else if new region fits below vma->vm_start,
4588 * return with success:
4589 */
4590 vma = find_vma(mm, addr);
4591 - if (likely(!vma || addr+len <= vma->vm_start)) {
4592 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4593 /* remember the address as a hint for next time */
4594 return (mm->free_area_cache = addr);
4595 }
4596 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4597 mm->cached_hole_size = vma->vm_start - addr;
4598
4599 /* try just below the current vma->vm_start */
4600 - addr = vma->vm_start-len;
4601 - if (do_colour_align)
4602 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4603 - } while (likely(len < vma->vm_start));
4604 + addr = skip_heap_stack_gap(vma, len);
4605 + } while (!IS_ERR_VALUE(addr));
4606
4607 bottomup:
4608 /*
4609 diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
4610 index f92602e..27060b2 100644
4611 --- a/arch/sparc/Kconfig
4612 +++ b/arch/sparc/Kconfig
4613 @@ -31,6 +31,7 @@ config SPARC
4614
4615 config SPARC32
4616 def_bool !64BIT
4617 + select GENERIC_ATOMIC64
4618
4619 config SPARC64
4620 def_bool 64BIT
4621 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4622 index ad1fb5d..fc5315b 100644
4623 --- a/arch/sparc/Makefile
4624 +++ b/arch/sparc/Makefile
4625 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
4626 # Export what is needed by arch/sparc/boot/Makefile
4627 export VMLINUX_INIT VMLINUX_MAIN
4628 VMLINUX_INIT := $(head-y) $(init-y)
4629 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4630 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4631 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4632 VMLINUX_MAIN += $(drivers-y) $(net-y)
4633
4634 diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
4635 index 5c3c8b6..ba822fa 100644
4636 --- a/arch/sparc/include/asm/atomic_32.h
4637 +++ b/arch/sparc/include/asm/atomic_32.h
4638 @@ -13,6 +13,8 @@
4639
4640 #include <linux/types.h>
4641
4642 +#include <asm-generic/atomic64.h>
4643 +
4644 #ifdef __KERNEL__
4645
4646 #include <asm/system.h>
4647 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
4648 index 9f421df..b81fc12 100644
4649 --- a/arch/sparc/include/asm/atomic_64.h
4650 +++ b/arch/sparc/include/asm/atomic_64.h
4651 @@ -14,18 +14,40 @@
4652 #define ATOMIC64_INIT(i) { (i) }
4653
4654 #define atomic_read(v) (*(volatile int *)&(v)->counter)
4655 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
4656 +{
4657 + return v->counter;
4658 +}
4659 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
4660 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
4661 +{
4662 + return v->counter;
4663 +}
4664
4665 #define atomic_set(v, i) (((v)->counter) = i)
4666 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
4667 +{
4668 + v->counter = i;
4669 +}
4670 #define atomic64_set(v, i) (((v)->counter) = i)
4671 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
4672 +{
4673 + v->counter = i;
4674 +}
4675
4676 extern void atomic_add(int, atomic_t *);
4677 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
4678 extern void atomic64_add(long, atomic64_t *);
4679 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
4680 extern void atomic_sub(int, atomic_t *);
4681 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
4682 extern void atomic64_sub(long, atomic64_t *);
4683 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
4684
4685 extern int atomic_add_ret(int, atomic_t *);
4686 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
4687 extern long atomic64_add_ret(long, atomic64_t *);
4688 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
4689 extern int atomic_sub_ret(int, atomic_t *);
4690 extern long atomic64_sub_ret(long, atomic64_t *);
4691
4692 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4693 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
4694
4695 #define atomic_inc_return(v) atomic_add_ret(1, v)
4696 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
4697 +{
4698 + return atomic_add_ret_unchecked(1, v);
4699 +}
4700 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
4701 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
4702 +{
4703 + return atomic64_add_ret_unchecked(1, v);
4704 +}
4705
4706 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
4707 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
4708
4709 #define atomic_add_return(i, v) atomic_add_ret(i, v)
4710 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
4711 +{
4712 + return atomic_add_ret_unchecked(i, v);
4713 +}
4714 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
4715 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
4716 +{
4717 + return atomic64_add_ret_unchecked(i, v);
4718 +}
4719
4720 /*
4721 * atomic_inc_and_test - increment and test
4722 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4723 * other cases.
4724 */
4725 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
4726 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
4727 +{
4728 + return atomic_inc_return_unchecked(v) == 0;
4729 +}
4730 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
4731
4732 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
4733 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4734 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
4735
4736 #define atomic_inc(v) atomic_add(1, v)
4737 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
4738 +{
4739 + atomic_add_unchecked(1, v);
4740 +}
4741 #define atomic64_inc(v) atomic64_add(1, v)
4742 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
4743 +{
4744 + atomic64_add_unchecked(1, v);
4745 +}
4746
4747 #define atomic_dec(v) atomic_sub(1, v)
4748 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
4749 +{
4750 + atomic_sub_unchecked(1, v);
4751 +}
4752 #define atomic64_dec(v) atomic64_sub(1, v)
4753 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
4754 +{
4755 + atomic64_sub_unchecked(1, v);
4756 +}
4757
4758 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
4759 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
4760
4761 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
4762 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
4763 +{
4764 + return cmpxchg(&v->counter, old, new);
4765 +}
4766 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4767 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
4768 +{
4769 + return xchg(&v->counter, new);
4770 +}
4771
4772 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
4773 {
4774 - int c, old;
4775 + int c, old, new;
4776 c = atomic_read(v);
4777 for (;;) {
4778 - if (unlikely(c == (u)))
4779 + if (unlikely(c == u))
4780 break;
4781 - old = atomic_cmpxchg((v), c, c + (a));
4782 +
4783 + asm volatile("addcc %2, %0, %0\n"
4784 +
4785 +#ifdef CONFIG_PAX_REFCOUNT
4786 + "tvs %%icc, 6\n"
4787 +#endif
4788 +
4789 + : "=r" (new)
4790 + : "0" (c), "ir" (a)
4791 + : "cc");
4792 +
4793 + old = atomic_cmpxchg(v, c, new);
4794 if (likely(old == c))
4795 break;
4796 c = old;
4797 @@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
4798 #define atomic64_cmpxchg(v, o, n) \
4799 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
4800 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
4801 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
4802 +{
4803 + return xchg(&v->counter, new);
4804 +}
4805
4806 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
4807 {
4808 - long c, old;
4809 + long c, old, new;
4810 c = atomic64_read(v);
4811 for (;;) {
4812 - if (unlikely(c == (u)))
4813 + if (unlikely(c == u))
4814 break;
4815 - old = atomic64_cmpxchg((v), c, c + (a));
4816 +
4817 + asm volatile("addcc %2, %0, %0\n"
4818 +
4819 +#ifdef CONFIG_PAX_REFCOUNT
4820 + "tvs %%xcc, 6\n"
4821 +#endif
4822 +
4823 + : "=r" (new)
4824 + : "0" (c), "ir" (a)
4825 + : "cc");
4826 +
4827 + old = atomic64_cmpxchg(v, c, new);
4828 if (likely(old == c))
4829 break;
4830 c = old;
4831 }
4832 - return c != (u);
4833 + return c != u;
4834 }
4835
4836 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4837 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
4838 index 69358b5..9d0d492 100644
4839 --- a/arch/sparc/include/asm/cache.h
4840 +++ b/arch/sparc/include/asm/cache.h
4841 @@ -7,10 +7,12 @@
4842 #ifndef _SPARC_CACHE_H
4843 #define _SPARC_CACHE_H
4844
4845 +#include <linux/const.h>
4846 +
4847 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
4848
4849 #define L1_CACHE_SHIFT 5
4850 -#define L1_CACHE_BYTES 32
4851 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4852
4853 #ifdef CONFIG_SPARC32
4854 #define SMP_CACHE_BYTES_SHIFT 5
4855 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
4856 index 4269ca6..e3da77f 100644
4857 --- a/arch/sparc/include/asm/elf_32.h
4858 +++ b/arch/sparc/include/asm/elf_32.h
4859 @@ -114,6 +114,13 @@ typedef struct {
4860
4861 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
4862
4863 +#ifdef CONFIG_PAX_ASLR
4864 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
4865 +
4866 +#define PAX_DELTA_MMAP_LEN 16
4867 +#define PAX_DELTA_STACK_LEN 16
4868 +#endif
4869 +
4870 /* This yields a mask that user programs can use to figure out what
4871 instruction set this cpu supports. This can NOT be done in userspace
4872 on Sparc. */
4873 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
4874 index 7df8b7f..4946269 100644
4875 --- a/arch/sparc/include/asm/elf_64.h
4876 +++ b/arch/sparc/include/asm/elf_64.h
4877 @@ -180,6 +180,13 @@ typedef struct {
4878 #define ELF_ET_DYN_BASE 0x0000010000000000UL
4879 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
4880
4881 +#ifdef CONFIG_PAX_ASLR
4882 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
4883 +
4884 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
4885 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
4886 +#endif
4887 +
4888 extern unsigned long sparc64_elf_hwcap;
4889 #define ELF_HWCAP sparc64_elf_hwcap
4890
4891 diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
4892 index 156707b..aefa786 100644
4893 --- a/arch/sparc/include/asm/page_32.h
4894 +++ b/arch/sparc/include/asm/page_32.h
4895 @@ -8,6 +8,8 @@
4896 #ifndef _SPARC_PAGE_H
4897 #define _SPARC_PAGE_H
4898
4899 +#include <linux/const.h>
4900 +
4901 #define PAGE_SHIFT 12
4902
4903 #ifndef __ASSEMBLY__
4904 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
4905 index a790cc6..091ed94 100644
4906 --- a/arch/sparc/include/asm/pgtable_32.h
4907 +++ b/arch/sparc/include/asm/pgtable_32.h
4908 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
4909 BTFIXUPDEF_INT(page_none)
4910 BTFIXUPDEF_INT(page_copy)
4911 BTFIXUPDEF_INT(page_readonly)
4912 +
4913 +#ifdef CONFIG_PAX_PAGEEXEC
4914 +BTFIXUPDEF_INT(page_shared_noexec)
4915 +BTFIXUPDEF_INT(page_copy_noexec)
4916 +BTFIXUPDEF_INT(page_readonly_noexec)
4917 +#endif
4918 +
4919 BTFIXUPDEF_INT(page_kernel)
4920
4921 #define PMD_SHIFT SUN4C_PMD_SHIFT
4922 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
4923 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4924 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4925
4926 +#ifdef CONFIG_PAX_PAGEEXEC
4927 +extern pgprot_t PAGE_SHARED_NOEXEC;
4928 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4929 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4930 +#else
4931 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
4932 +# define PAGE_COPY_NOEXEC PAGE_COPY
4933 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
4934 +#endif
4935 +
4936 extern unsigned long page_kernel;
4937
4938 #ifdef MODULE
4939 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
4940 index f6ae2b2..b03ffc7 100644
4941 --- a/arch/sparc/include/asm/pgtsrmmu.h
4942 +++ b/arch/sparc/include/asm/pgtsrmmu.h
4943 @@ -115,6 +115,13 @@
4944 SRMMU_EXEC | SRMMU_REF)
4945 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4946 SRMMU_EXEC | SRMMU_REF)
4947 +
4948 +#ifdef CONFIG_PAX_PAGEEXEC
4949 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4950 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4951 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4952 +#endif
4953 +
4954 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4955 SRMMU_DIRTY | SRMMU_REF)
4956
4957 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
4958 index 9689176..63c18ea 100644
4959 --- a/arch/sparc/include/asm/spinlock_64.h
4960 +++ b/arch/sparc/include/asm/spinlock_64.h
4961 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
4962
4963 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4964
4965 -static void inline arch_read_lock(arch_rwlock_t *lock)
4966 +static inline void arch_read_lock(arch_rwlock_t *lock)
4967 {
4968 unsigned long tmp1, tmp2;
4969
4970 __asm__ __volatile__ (
4971 "1: ldsw [%2], %0\n"
4972 " brlz,pn %0, 2f\n"
4973 -"4: add %0, 1, %1\n"
4974 +"4: addcc %0, 1, %1\n"
4975 +
4976 +#ifdef CONFIG_PAX_REFCOUNT
4977 +" tvs %%icc, 6\n"
4978 +#endif
4979 +
4980 " cas [%2], %0, %1\n"
4981 " cmp %0, %1\n"
4982 " bne,pn %%icc, 1b\n"
4983 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
4984 " .previous"
4985 : "=&r" (tmp1), "=&r" (tmp2)
4986 : "r" (lock)
4987 - : "memory");
4988 + : "memory", "cc");
4989 }
4990
4991 -static int inline arch_read_trylock(arch_rwlock_t *lock)
4992 +static inline int arch_read_trylock(arch_rwlock_t *lock)
4993 {
4994 int tmp1, tmp2;
4995
4996 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
4997 "1: ldsw [%2], %0\n"
4998 " brlz,a,pn %0, 2f\n"
4999 " mov 0, %0\n"
5000 -" add %0, 1, %1\n"
5001 +" addcc %0, 1, %1\n"
5002 +
5003 +#ifdef CONFIG_PAX_REFCOUNT
5004 +" tvs %%icc, 6\n"
5005 +#endif
5006 +
5007 " cas [%2], %0, %1\n"
5008 " cmp %0, %1\n"
5009 " bne,pn %%icc, 1b\n"
5010 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5011 return tmp1;
5012 }
5013
5014 -static void inline arch_read_unlock(arch_rwlock_t *lock)
5015 +static inline void arch_read_unlock(arch_rwlock_t *lock)
5016 {
5017 unsigned long tmp1, tmp2;
5018
5019 __asm__ __volatile__(
5020 "1: lduw [%2], %0\n"
5021 -" sub %0, 1, %1\n"
5022 +" subcc %0, 1, %1\n"
5023 +
5024 +#ifdef CONFIG_PAX_REFCOUNT
5025 +" tvs %%icc, 6\n"
5026 +#endif
5027 +
5028 " cas [%2], %0, %1\n"
5029 " cmp %0, %1\n"
5030 " bne,pn %%xcc, 1b\n"
5031 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
5032 : "memory");
5033 }
5034
5035 -static void inline arch_write_lock(arch_rwlock_t *lock)
5036 +static inline void arch_write_lock(arch_rwlock_t *lock)
5037 {
5038 unsigned long mask, tmp1, tmp2;
5039
5040 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
5041 : "memory");
5042 }
5043
5044 -static void inline arch_write_unlock(arch_rwlock_t *lock)
5045 +static inline void arch_write_unlock(arch_rwlock_t *lock)
5046 {
5047 __asm__ __volatile__(
5048 " stw %%g0, [%0]"
5049 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
5050 : "memory");
5051 }
5052
5053 -static int inline arch_write_trylock(arch_rwlock_t *lock)
5054 +static inline int arch_write_trylock(arch_rwlock_t *lock)
5055 {
5056 unsigned long mask, tmp1, tmp2, result;
5057
5058 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5059 index fa57532..e1a4c53 100644
5060 --- a/arch/sparc/include/asm/thread_info_32.h
5061 +++ b/arch/sparc/include/asm/thread_info_32.h
5062 @@ -50,6 +50,8 @@ struct thread_info {
5063 unsigned long w_saved;
5064
5065 struct restart_block restart_block;
5066 +
5067 + unsigned long lowest_stack;
5068 };
5069
5070 /*
5071 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5072 index 60d86be..952dea1 100644
5073 --- a/arch/sparc/include/asm/thread_info_64.h
5074 +++ b/arch/sparc/include/asm/thread_info_64.h
5075 @@ -63,6 +63,8 @@ struct thread_info {
5076 struct pt_regs *kern_una_regs;
5077 unsigned int kern_una_insn;
5078
5079 + unsigned long lowest_stack;
5080 +
5081 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5082 };
5083
5084 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5085 index e88fbe5..96b0ce5 100644
5086 --- a/arch/sparc/include/asm/uaccess.h
5087 +++ b/arch/sparc/include/asm/uaccess.h
5088 @@ -1,5 +1,13 @@
5089 #ifndef ___ASM_SPARC_UACCESS_H
5090 #define ___ASM_SPARC_UACCESS_H
5091 +
5092 +#ifdef __KERNEL__
5093 +#ifndef __ASSEMBLY__
5094 +#include <linux/types.h>
5095 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
5096 +#endif
5097 +#endif
5098 +
5099 #if defined(__sparc__) && defined(__arch64__)
5100 #include <asm/uaccess_64.h>
5101 #else
5102 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5103 index 8303ac4..07f333d 100644
5104 --- a/arch/sparc/include/asm/uaccess_32.h
5105 +++ b/arch/sparc/include/asm/uaccess_32.h
5106 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5107
5108 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5109 {
5110 - if (n && __access_ok((unsigned long) to, n))
5111 + if ((long)n < 0)
5112 + return n;
5113 +
5114 + if (n && __access_ok((unsigned long) to, n)) {
5115 + if (!__builtin_constant_p(n))
5116 + check_object_size(from, n, true);
5117 return __copy_user(to, (__force void __user *) from, n);
5118 - else
5119 + } else
5120 return n;
5121 }
5122
5123 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5124 {
5125 + if ((long)n < 0)
5126 + return n;
5127 +
5128 + if (!__builtin_constant_p(n))
5129 + check_object_size(from, n, true);
5130 +
5131 return __copy_user(to, (__force void __user *) from, n);
5132 }
5133
5134 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5135 {
5136 - if (n && __access_ok((unsigned long) from, n))
5137 + if ((long)n < 0)
5138 + return n;
5139 +
5140 + if (n && __access_ok((unsigned long) from, n)) {
5141 + if (!__builtin_constant_p(n))
5142 + check_object_size(to, n, false);
5143 return __copy_user((__force void __user *) to, from, n);
5144 - else
5145 + } else
5146 return n;
5147 }
5148
5149 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5150 {
5151 + if ((long)n < 0)
5152 + return n;
5153 +
5154 return __copy_user((__force void __user *) to, from, n);
5155 }
5156
5157 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5158 index 3e1449f..5293a0e 100644
5159 --- a/arch/sparc/include/asm/uaccess_64.h
5160 +++ b/arch/sparc/include/asm/uaccess_64.h
5161 @@ -10,6 +10,7 @@
5162 #include <linux/compiler.h>
5163 #include <linux/string.h>
5164 #include <linux/thread_info.h>
5165 +#include <linux/kernel.h>
5166 #include <asm/asi.h>
5167 #include <asm/system.h>
5168 #include <asm/spitfire.h>
5169 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5170 static inline unsigned long __must_check
5171 copy_from_user(void *to, const void __user *from, unsigned long size)
5172 {
5173 - unsigned long ret = ___copy_from_user(to, from, size);
5174 + unsigned long ret;
5175
5176 + if ((long)size < 0 || size > INT_MAX)
5177 + return size;
5178 +
5179 + if (!__builtin_constant_p(size))
5180 + check_object_size(to, size, false);
5181 +
5182 + ret = ___copy_from_user(to, from, size);
5183 if (unlikely(ret))
5184 ret = copy_from_user_fixup(to, from, size);
5185
5186 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5187 static inline unsigned long __must_check
5188 copy_to_user(void __user *to, const void *from, unsigned long size)
5189 {
5190 - unsigned long ret = ___copy_to_user(to, from, size);
5191 + unsigned long ret;
5192
5193 + if ((long)size < 0 || size > INT_MAX)
5194 + return size;
5195 +
5196 + if (!__builtin_constant_p(size))
5197 + check_object_size(from, size, true);
5198 +
5199 + ret = ___copy_to_user(to, from, size);
5200 if (unlikely(ret))
5201 ret = copy_to_user_fixup(to, from, size);
5202 return ret;
5203 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5204 index cb85458..e063f17 100644
5205 --- a/arch/sparc/kernel/Makefile
5206 +++ b/arch/sparc/kernel/Makefile
5207 @@ -3,7 +3,7 @@
5208 #
5209
5210 asflags-y := -ansi
5211 -ccflags-y := -Werror
5212 +#ccflags-y := -Werror
5213
5214 extra-y := head_$(BITS).o
5215 extra-y += init_task.o
5216 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5217 index f793742..4d880af 100644
5218 --- a/arch/sparc/kernel/process_32.c
5219 +++ b/arch/sparc/kernel/process_32.c
5220 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
5221 rw->ins[4], rw->ins[5],
5222 rw->ins[6],
5223 rw->ins[7]);
5224 - printk("%pS\n", (void *) rw->ins[7]);
5225 + printk("%pA\n", (void *) rw->ins[7]);
5226 rw = (struct reg_window32 *) rw->ins[6];
5227 }
5228 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5229 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
5230
5231 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5232 r->psr, r->pc, r->npc, r->y, print_tainted());
5233 - printk("PC: <%pS>\n", (void *) r->pc);
5234 + printk("PC: <%pA>\n", (void *) r->pc);
5235 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5236 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5237 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5238 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5239 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5240 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5241 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5242 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5243
5244 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5245 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5246 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5247 rw = (struct reg_window32 *) fp;
5248 pc = rw->ins[7];
5249 printk("[%08lx : ", pc);
5250 - printk("%pS ] ", (void *) pc);
5251 + printk("%pA ] ", (void *) pc);
5252 fp = rw->ins[6];
5253 } while (++count < 16);
5254 printk("\n");
5255 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5256 index 3739a06..48b2ff0 100644
5257 --- a/arch/sparc/kernel/process_64.c
5258 +++ b/arch/sparc/kernel/process_64.c
5259 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
5260 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5261 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5262 if (regs->tstate & TSTATE_PRIV)
5263 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5264 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5265 }
5266
5267 void show_regs(struct pt_regs *regs)
5268 {
5269 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5270 regs->tpc, regs->tnpc, regs->y, print_tainted());
5271 - printk("TPC: <%pS>\n", (void *) regs->tpc);
5272 + printk("TPC: <%pA>\n", (void *) regs->tpc);
5273 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5274 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5275 regs->u_regs[3]);
5276 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
5277 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5278 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5279 regs->u_regs[15]);
5280 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5281 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5282 show_regwindow(regs);
5283 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
5284 }
5285 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void)
5286 ((tp && tp->task) ? tp->task->pid : -1));
5287
5288 if (gp->tstate & TSTATE_PRIV) {
5289 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5290 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5291 (void *) gp->tpc,
5292 (void *) gp->o7,
5293 (void *) gp->i7,
5294 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5295 index 42b282f..28ce9f2 100644
5296 --- a/arch/sparc/kernel/sys_sparc_32.c
5297 +++ b/arch/sparc/kernel/sys_sparc_32.c
5298 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5299 if (ARCH_SUN4C && len > 0x20000000)
5300 return -ENOMEM;
5301 if (!addr)
5302 - addr = TASK_UNMAPPED_BASE;
5303 + addr = current->mm->mmap_base;
5304
5305 if (flags & MAP_SHARED)
5306 addr = COLOUR_ALIGN(addr);
5307 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5308 }
5309 if (TASK_SIZE - PAGE_SIZE - len < addr)
5310 return -ENOMEM;
5311 - if (!vmm || addr + len <= vmm->vm_start)
5312 + if (check_heap_stack_gap(vmm, addr, len))
5313 return addr;
5314 addr = vmm->vm_end;
5315 if (flags & MAP_SHARED)
5316 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5317 index 441521a..b767073 100644
5318 --- a/arch/sparc/kernel/sys_sparc_64.c
5319 +++ b/arch/sparc/kernel/sys_sparc_64.c
5320 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5321 /* We do not accept a shared mapping if it would violate
5322 * cache aliasing constraints.
5323 */
5324 - if ((flags & MAP_SHARED) &&
5325 + if ((filp || (flags & MAP_SHARED)) &&
5326 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5327 return -EINVAL;
5328 return addr;
5329 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5330 if (filp || (flags & MAP_SHARED))
5331 do_color_align = 1;
5332
5333 +#ifdef CONFIG_PAX_RANDMMAP
5334 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5335 +#endif
5336 +
5337 if (addr) {
5338 if (do_color_align)
5339 addr = COLOUR_ALIGN(addr, pgoff);
5340 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5341 addr = PAGE_ALIGN(addr);
5342
5343 vma = find_vma(mm, addr);
5344 - if (task_size - len >= addr &&
5345 - (!vma || addr + len <= vma->vm_start))
5346 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5347 return addr;
5348 }
5349
5350 if (len > mm->cached_hole_size) {
5351 - start_addr = addr = mm->free_area_cache;
5352 + start_addr = addr = mm->free_area_cache;
5353 } else {
5354 - start_addr = addr = TASK_UNMAPPED_BASE;
5355 + start_addr = addr = mm->mmap_base;
5356 mm->cached_hole_size = 0;
5357 }
5358
5359 @@ -174,14 +177,14 @@ full_search:
5360 vma = find_vma(mm, VA_EXCLUDE_END);
5361 }
5362 if (unlikely(task_size < addr)) {
5363 - if (start_addr != TASK_UNMAPPED_BASE) {
5364 - start_addr = addr = TASK_UNMAPPED_BASE;
5365 + if (start_addr != mm->mmap_base) {
5366 + start_addr = addr = mm->mmap_base;
5367 mm->cached_hole_size = 0;
5368 goto full_search;
5369 }
5370 return -ENOMEM;
5371 }
5372 - if (likely(!vma || addr + len <= vma->vm_start)) {
5373 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5374 /*
5375 * Remember the place where we stopped the search:
5376 */
5377 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5378 /* We do not accept a shared mapping if it would violate
5379 * cache aliasing constraints.
5380 */
5381 - if ((flags & MAP_SHARED) &&
5382 + if ((filp || (flags & MAP_SHARED)) &&
5383 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5384 return -EINVAL;
5385 return addr;
5386 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5387 addr = PAGE_ALIGN(addr);
5388
5389 vma = find_vma(mm, addr);
5390 - if (task_size - len >= addr &&
5391 - (!vma || addr + len <= vma->vm_start))
5392 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5393 return addr;
5394 }
5395
5396 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5397 /* make sure it can fit in the remaining address space */
5398 if (likely(addr > len)) {
5399 vma = find_vma(mm, addr-len);
5400 - if (!vma || addr <= vma->vm_start) {
5401 + if (check_heap_stack_gap(vma, addr - len, len)) {
5402 /* remember the address as a hint for next time */
5403 return (mm->free_area_cache = addr-len);
5404 }
5405 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5406 if (unlikely(mm->mmap_base < len))
5407 goto bottomup;
5408
5409 - addr = mm->mmap_base-len;
5410 - if (do_color_align)
5411 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5412 + addr = mm->mmap_base - len;
5413
5414 do {
5415 + if (do_color_align)
5416 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5417 /*
5418 * Lookup failure means no vma is above this address,
5419 * else if new region fits below vma->vm_start,
5420 * return with success:
5421 */
5422 vma = find_vma(mm, addr);
5423 - if (likely(!vma || addr+len <= vma->vm_start)) {
5424 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5425 /* remember the address as a hint for next time */
5426 return (mm->free_area_cache = addr);
5427 }
5428 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5429 mm->cached_hole_size = vma->vm_start - addr;
5430
5431 /* try just below the current vma->vm_start */
5432 - addr = vma->vm_start-len;
5433 - if (do_color_align)
5434 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5435 - } while (likely(len < vma->vm_start));
5436 + addr = skip_heap_stack_gap(vma, len);
5437 + } while (!IS_ERR_VALUE(addr));
5438
5439 bottomup:
5440 /*
5441 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5442 gap == RLIM_INFINITY ||
5443 sysctl_legacy_va_layout) {
5444 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5445 +
5446 +#ifdef CONFIG_PAX_RANDMMAP
5447 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5448 + mm->mmap_base += mm->delta_mmap;
5449 +#endif
5450 +
5451 mm->get_unmapped_area = arch_get_unmapped_area;
5452 mm->unmap_area = arch_unmap_area;
5453 } else {
5454 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5455 gap = (task_size / 6 * 5);
5456
5457 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5458 +
5459 +#ifdef CONFIG_PAX_RANDMMAP
5460 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5461 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5462 +#endif
5463 +
5464 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5465 mm->unmap_area = arch_unmap_area_topdown;
5466 }
5467 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5468 index 591f20c..0f1b925 100644
5469 --- a/arch/sparc/kernel/traps_32.c
5470 +++ b/arch/sparc/kernel/traps_32.c
5471 @@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
5472 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5473 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5474
5475 +extern void gr_handle_kernel_exploit(void);
5476 +
5477 void die_if_kernel(char *str, struct pt_regs *regs)
5478 {
5479 static int die_counter;
5480 @@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5481 count++ < 30 &&
5482 (((unsigned long) rw) >= PAGE_OFFSET) &&
5483 !(((unsigned long) rw) & 0x7)) {
5484 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
5485 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
5486 (void *) rw->ins[7]);
5487 rw = (struct reg_window32 *)rw->ins[6];
5488 }
5489 }
5490 printk("Instruction DUMP:");
5491 instruction_dump ((unsigned long *) regs->pc);
5492 - if(regs->psr & PSR_PS)
5493 + if(regs->psr & PSR_PS) {
5494 + gr_handle_kernel_exploit();
5495 do_exit(SIGKILL);
5496 + }
5497 do_exit(SIGSEGV);
5498 }
5499
5500 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
5501 index 0cbdaa4..438e4c9 100644
5502 --- a/arch/sparc/kernel/traps_64.c
5503 +++ b/arch/sparc/kernel/traps_64.c
5504 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
5505 i + 1,
5506 p->trapstack[i].tstate, p->trapstack[i].tpc,
5507 p->trapstack[i].tnpc, p->trapstack[i].tt);
5508 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
5509 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
5510 }
5511 }
5512
5513 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
5514
5515 lvl -= 0x100;
5516 if (regs->tstate & TSTATE_PRIV) {
5517 +
5518 +#ifdef CONFIG_PAX_REFCOUNT
5519 + if (lvl == 6)
5520 + pax_report_refcount_overflow(regs);
5521 +#endif
5522 +
5523 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
5524 die_if_kernel(buffer, regs);
5525 }
5526 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
5527 void bad_trap_tl1(struct pt_regs *regs, long lvl)
5528 {
5529 char buffer[32];
5530 -
5531 +
5532 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
5533 0, lvl, SIGTRAP) == NOTIFY_STOP)
5534 return;
5535
5536 +#ifdef CONFIG_PAX_REFCOUNT
5537 + if (lvl == 6)
5538 + pax_report_refcount_overflow(regs);
5539 +#endif
5540 +
5541 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
5542
5543 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
5544 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
5545 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5546 printk("%s" "ERROR(%d): ",
5547 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
5548 - printk("TPC<%pS>\n", (void *) regs->tpc);
5549 + printk("TPC<%pA>\n", (void *) regs->tpc);
5550 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
5551 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
5552 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
5553 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5554 smp_processor_id(),
5555 (type & 0x1) ? 'I' : 'D',
5556 regs->tpc);
5557 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
5558 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
5559 panic("Irrecoverable Cheetah+ parity error.");
5560 }
5561
5562 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5563 smp_processor_id(),
5564 (type & 0x1) ? 'I' : 'D',
5565 regs->tpc);
5566 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
5567 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
5568 }
5569
5570 struct sun4v_error_entry {
5571 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
5572
5573 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
5574 regs->tpc, tl);
5575 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
5576 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
5577 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5578 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
5579 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
5580 (void *) regs->u_regs[UREG_I7]);
5581 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
5582 "pte[%lx] error[%lx]\n",
5583 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
5584
5585 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
5586 regs->tpc, tl);
5587 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
5588 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
5589 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5590 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
5591 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
5592 (void *) regs->u_regs[UREG_I7]);
5593 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
5594 "pte[%lx] error[%lx]\n",
5595 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5596 fp = (unsigned long)sf->fp + STACK_BIAS;
5597 }
5598
5599 - printk(" [%016lx] %pS\n", pc, (void *) pc);
5600 + printk(" [%016lx] %pA\n", pc, (void *) pc);
5601 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5602 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
5603 int index = tsk->curr_ret_stack;
5604 if (tsk->ret_stack && index >= graph) {
5605 pc = tsk->ret_stack[index - graph].ret;
5606 - printk(" [%016lx] %pS\n", pc, (void *) pc);
5607 + printk(" [%016lx] %pA\n", pc, (void *) pc);
5608 graph++;
5609 }
5610 }
5611 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
5612 return (struct reg_window *) (fp + STACK_BIAS);
5613 }
5614
5615 +extern void gr_handle_kernel_exploit(void);
5616 +
5617 void die_if_kernel(char *str, struct pt_regs *regs)
5618 {
5619 static int die_counter;
5620 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5621 while (rw &&
5622 count++ < 30 &&
5623 kstack_valid(tp, (unsigned long) rw)) {
5624 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
5625 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
5626 (void *) rw->ins[7]);
5627
5628 rw = kernel_stack_up(rw);
5629 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5630 }
5631 user_instruction_dump ((unsigned int __user *) regs->tpc);
5632 }
5633 - if (regs->tstate & TSTATE_PRIV)
5634 + if (regs->tstate & TSTATE_PRIV) {
5635 + gr_handle_kernel_exploit();
5636 do_exit(SIGKILL);
5637 + }
5638 do_exit(SIGSEGV);
5639 }
5640 EXPORT_SYMBOL(die_if_kernel);
5641 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
5642 index 76e4ac1..78f8bb1 100644
5643 --- a/arch/sparc/kernel/unaligned_64.c
5644 +++ b/arch/sparc/kernel/unaligned_64.c
5645 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
5646 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
5647
5648 if (__ratelimit(&ratelimit)) {
5649 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
5650 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
5651 regs->tpc, (void *) regs->tpc);
5652 }
5653 }
5654 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
5655 index a3fc437..fea9957 100644
5656 --- a/arch/sparc/lib/Makefile
5657 +++ b/arch/sparc/lib/Makefile
5658 @@ -2,7 +2,7 @@
5659 #
5660
5661 asflags-y := -ansi -DST_DIV0=0x02
5662 -ccflags-y := -Werror
5663 +#ccflags-y := -Werror
5664
5665 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5666 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5667 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
5668 index 59186e0..f747d7a 100644
5669 --- a/arch/sparc/lib/atomic_64.S
5670 +++ b/arch/sparc/lib/atomic_64.S
5671 @@ -18,7 +18,12 @@
5672 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5673 BACKOFF_SETUP(%o2)
5674 1: lduw [%o1], %g1
5675 - add %g1, %o0, %g7
5676 + addcc %g1, %o0, %g7
5677 +
5678 +#ifdef CONFIG_PAX_REFCOUNT
5679 + tvs %icc, 6
5680 +#endif
5681 +
5682 cas [%o1], %g1, %g7
5683 cmp %g1, %g7
5684 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5685 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5686 2: BACKOFF_SPIN(%o2, %o3, 1b)
5687 .size atomic_add, .-atomic_add
5688
5689 + .globl atomic_add_unchecked
5690 + .type atomic_add_unchecked,#function
5691 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5692 + BACKOFF_SETUP(%o2)
5693 +1: lduw [%o1], %g1
5694 + add %g1, %o0, %g7
5695 + cas [%o1], %g1, %g7
5696 + cmp %g1, %g7
5697 + bne,pn %icc, 2f
5698 + nop
5699 + retl
5700 + nop
5701 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5702 + .size atomic_add_unchecked, .-atomic_add_unchecked
5703 +
5704 .globl atomic_sub
5705 .type atomic_sub,#function
5706 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5707 BACKOFF_SETUP(%o2)
5708 1: lduw [%o1], %g1
5709 - sub %g1, %o0, %g7
5710 + subcc %g1, %o0, %g7
5711 +
5712 +#ifdef CONFIG_PAX_REFCOUNT
5713 + tvs %icc, 6
5714 +#endif
5715 +
5716 cas [%o1], %g1, %g7
5717 cmp %g1, %g7
5718 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5719 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5720 2: BACKOFF_SPIN(%o2, %o3, 1b)
5721 .size atomic_sub, .-atomic_sub
5722
5723 + .globl atomic_sub_unchecked
5724 + .type atomic_sub_unchecked,#function
5725 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5726 + BACKOFF_SETUP(%o2)
5727 +1: lduw [%o1], %g1
5728 + sub %g1, %o0, %g7
5729 + cas [%o1], %g1, %g7
5730 + cmp %g1, %g7
5731 + bne,pn %icc, 2f
5732 + nop
5733 + retl
5734 + nop
5735 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5736 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
5737 +
5738 .globl atomic_add_ret
5739 .type atomic_add_ret,#function
5740 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5741 BACKOFF_SETUP(%o2)
5742 1: lduw [%o1], %g1
5743 - add %g1, %o0, %g7
5744 + addcc %g1, %o0, %g7
5745 +
5746 +#ifdef CONFIG_PAX_REFCOUNT
5747 + tvs %icc, 6
5748 +#endif
5749 +
5750 cas [%o1], %g1, %g7
5751 cmp %g1, %g7
5752 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5753 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5754 2: BACKOFF_SPIN(%o2, %o3, 1b)
5755 .size atomic_add_ret, .-atomic_add_ret
5756
5757 + .globl atomic_add_ret_unchecked
5758 + .type atomic_add_ret_unchecked,#function
5759 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5760 + BACKOFF_SETUP(%o2)
5761 +1: lduw [%o1], %g1
5762 + addcc %g1, %o0, %g7
5763 + cas [%o1], %g1, %g7
5764 + cmp %g1, %g7
5765 + bne,pn %icc, 2f
5766 + add %g7, %o0, %g7
5767 + sra %g7, 0, %o0
5768 + retl
5769 + nop
5770 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5771 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
5772 +
5773 .globl atomic_sub_ret
5774 .type atomic_sub_ret,#function
5775 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5776 BACKOFF_SETUP(%o2)
5777 1: lduw [%o1], %g1
5778 - sub %g1, %o0, %g7
5779 + subcc %g1, %o0, %g7
5780 +
5781 +#ifdef CONFIG_PAX_REFCOUNT
5782 + tvs %icc, 6
5783 +#endif
5784 +
5785 cas [%o1], %g1, %g7
5786 cmp %g1, %g7
5787 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5788 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5789 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5790 BACKOFF_SETUP(%o2)
5791 1: ldx [%o1], %g1
5792 - add %g1, %o0, %g7
5793 + addcc %g1, %o0, %g7
5794 +
5795 +#ifdef CONFIG_PAX_REFCOUNT
5796 + tvs %xcc, 6
5797 +#endif
5798 +
5799 casx [%o1], %g1, %g7
5800 cmp %g1, %g7
5801 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5802 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5803 2: BACKOFF_SPIN(%o2, %o3, 1b)
5804 .size atomic64_add, .-atomic64_add
5805
5806 + .globl atomic64_add_unchecked
5807 + .type atomic64_add_unchecked,#function
5808 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5809 + BACKOFF_SETUP(%o2)
5810 +1: ldx [%o1], %g1
5811 + addcc %g1, %o0, %g7
5812 + casx [%o1], %g1, %g7
5813 + cmp %g1, %g7
5814 + bne,pn %xcc, 2f
5815 + nop
5816 + retl
5817 + nop
5818 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5819 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
5820 +
5821 .globl atomic64_sub
5822 .type atomic64_sub,#function
5823 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5824 BACKOFF_SETUP(%o2)
5825 1: ldx [%o1], %g1
5826 - sub %g1, %o0, %g7
5827 + subcc %g1, %o0, %g7
5828 +
5829 +#ifdef CONFIG_PAX_REFCOUNT
5830 + tvs %xcc, 6
5831 +#endif
5832 +
5833 casx [%o1], %g1, %g7
5834 cmp %g1, %g7
5835 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5836 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5837 2: BACKOFF_SPIN(%o2, %o3, 1b)
5838 .size atomic64_sub, .-atomic64_sub
5839
5840 + .globl atomic64_sub_unchecked
5841 + .type atomic64_sub_unchecked,#function
5842 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5843 + BACKOFF_SETUP(%o2)
5844 +1: ldx [%o1], %g1
5845 + subcc %g1, %o0, %g7
5846 + casx [%o1], %g1, %g7
5847 + cmp %g1, %g7
5848 + bne,pn %xcc, 2f
5849 + nop
5850 + retl
5851 + nop
5852 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5853 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5854 +
5855 .globl atomic64_add_ret
5856 .type atomic64_add_ret,#function
5857 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5858 BACKOFF_SETUP(%o2)
5859 1: ldx [%o1], %g1
5860 - add %g1, %o0, %g7
5861 + addcc %g1, %o0, %g7
5862 +
5863 +#ifdef CONFIG_PAX_REFCOUNT
5864 + tvs %xcc, 6
5865 +#endif
5866 +
5867 casx [%o1], %g1, %g7
5868 cmp %g1, %g7
5869 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5870 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5871 2: BACKOFF_SPIN(%o2, %o3, 1b)
5872 .size atomic64_add_ret, .-atomic64_add_ret
5873
5874 + .globl atomic64_add_ret_unchecked
5875 + .type atomic64_add_ret_unchecked,#function
5876 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5877 + BACKOFF_SETUP(%o2)
5878 +1: ldx [%o1], %g1
5879 + addcc %g1, %o0, %g7
5880 + casx [%o1], %g1, %g7
5881 + cmp %g1, %g7
5882 + bne,pn %xcc, 2f
5883 + add %g7, %o0, %g7
5884 + mov %g7, %o0
5885 + retl
5886 + nop
5887 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5888 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5889 +
5890 .globl atomic64_sub_ret
5891 .type atomic64_sub_ret,#function
5892 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5893 BACKOFF_SETUP(%o2)
5894 1: ldx [%o1], %g1
5895 - sub %g1, %o0, %g7
5896 + subcc %g1, %o0, %g7
5897 +
5898 +#ifdef CONFIG_PAX_REFCOUNT
5899 + tvs %xcc, 6
5900 +#endif
5901 +
5902 casx [%o1], %g1, %g7
5903 cmp %g1, %g7
5904 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5905 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
5906 index 1b30bb3..b4a16c7 100644
5907 --- a/arch/sparc/lib/ksyms.c
5908 +++ b/arch/sparc/lib/ksyms.c
5909 @@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
5910
5911 /* Atomic counter implementation. */
5912 EXPORT_SYMBOL(atomic_add);
5913 +EXPORT_SYMBOL(atomic_add_unchecked);
5914 EXPORT_SYMBOL(atomic_add_ret);
5915 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
5916 EXPORT_SYMBOL(atomic_sub);
5917 +EXPORT_SYMBOL(atomic_sub_unchecked);
5918 EXPORT_SYMBOL(atomic_sub_ret);
5919 EXPORT_SYMBOL(atomic64_add);
5920 +EXPORT_SYMBOL(atomic64_add_unchecked);
5921 EXPORT_SYMBOL(atomic64_add_ret);
5922 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5923 EXPORT_SYMBOL(atomic64_sub);
5924 +EXPORT_SYMBOL(atomic64_sub_unchecked);
5925 EXPORT_SYMBOL(atomic64_sub_ret);
5926
5927 /* Atomic bit operations. */
5928 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
5929 index 301421c..e2535d1 100644
5930 --- a/arch/sparc/mm/Makefile
5931 +++ b/arch/sparc/mm/Makefile
5932 @@ -2,7 +2,7 @@
5933 #
5934
5935 asflags-y := -ansi
5936 -ccflags-y := -Werror
5937 +#ccflags-y := -Werror
5938
5939 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
5940 obj-y += fault_$(BITS).o
5941 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
5942 index 8023fd7..c8e89e9 100644
5943 --- a/arch/sparc/mm/fault_32.c
5944 +++ b/arch/sparc/mm/fault_32.c
5945 @@ -21,6 +21,9 @@
5946 #include <linux/perf_event.h>
5947 #include <linux/interrupt.h>
5948 #include <linux/kdebug.h>
5949 +#include <linux/slab.h>
5950 +#include <linux/pagemap.h>
5951 +#include <linux/compiler.h>
5952
5953 #include <asm/system.h>
5954 #include <asm/page.h>
5955 @@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
5956 return safe_compute_effective_address(regs, insn);
5957 }
5958
5959 +#ifdef CONFIG_PAX_PAGEEXEC
5960 +#ifdef CONFIG_PAX_DLRESOLVE
5961 +static void pax_emuplt_close(struct vm_area_struct *vma)
5962 +{
5963 + vma->vm_mm->call_dl_resolve = 0UL;
5964 +}
5965 +
5966 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5967 +{
5968 + unsigned int *kaddr;
5969 +
5970 + vmf->page = alloc_page(GFP_HIGHUSER);
5971 + if (!vmf->page)
5972 + return VM_FAULT_OOM;
5973 +
5974 + kaddr = kmap(vmf->page);
5975 + memset(kaddr, 0, PAGE_SIZE);
5976 + kaddr[0] = 0x9DE3BFA8U; /* save */
5977 + flush_dcache_page(vmf->page);
5978 + kunmap(vmf->page);
5979 + return VM_FAULT_MAJOR;
5980 +}
5981 +
5982 +static const struct vm_operations_struct pax_vm_ops = {
5983 + .close = pax_emuplt_close,
5984 + .fault = pax_emuplt_fault
5985 +};
5986 +
5987 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5988 +{
5989 + int ret;
5990 +
5991 + INIT_LIST_HEAD(&vma->anon_vma_chain);
5992 + vma->vm_mm = current->mm;
5993 + vma->vm_start = addr;
5994 + vma->vm_end = addr + PAGE_SIZE;
5995 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5996 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5997 + vma->vm_ops = &pax_vm_ops;
5998 +
5999 + ret = insert_vm_struct(current->mm, vma);
6000 + if (ret)
6001 + return ret;
6002 +
6003 + ++current->mm->total_vm;
6004 + return 0;
6005 +}
6006 +#endif
6007 +
6008 +/*
6009 + * PaX: decide what to do with offenders (regs->pc = fault address)
6010 + *
6011 + * returns 1 when task should be killed
6012 + * 2 when patched PLT trampoline was detected
6013 + * 3 when unpatched PLT trampoline was detected
6014 + */
6015 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6016 +{
6017 +
6018 +#ifdef CONFIG_PAX_EMUPLT
6019 + int err;
6020 +
6021 + do { /* PaX: patched PLT emulation #1 */
6022 + unsigned int sethi1, sethi2, jmpl;
6023 +
6024 + err = get_user(sethi1, (unsigned int *)regs->pc);
6025 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6026 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6027 +
6028 + if (err)
6029 + break;
6030 +
6031 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6032 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6033 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6034 + {
6035 + unsigned int addr;
6036 +
6037 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6038 + addr = regs->u_regs[UREG_G1];
6039 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6040 + regs->pc = addr;
6041 + regs->npc = addr+4;
6042 + return 2;
6043 + }
6044 + } while (0);
6045 +
6046 + { /* PaX: patched PLT emulation #2 */
6047 + unsigned int ba;
6048 +
6049 + err = get_user(ba, (unsigned int *)regs->pc);
6050 +
6051 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6052 + unsigned int addr;
6053 +
6054 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6055 + regs->pc = addr;
6056 + regs->npc = addr+4;
6057 + return 2;
6058 + }
6059 + }
6060 +
6061 + do { /* PaX: patched PLT emulation #3 */
6062 + unsigned int sethi, jmpl, nop;
6063 +
6064 + err = get_user(sethi, (unsigned int *)regs->pc);
6065 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6066 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6067 +
6068 + if (err)
6069 + break;
6070 +
6071 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6072 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6073 + nop == 0x01000000U)
6074 + {
6075 + unsigned int addr;
6076 +
6077 + addr = (sethi & 0x003FFFFFU) << 10;
6078 + regs->u_regs[UREG_G1] = addr;
6079 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6080 + regs->pc = addr;
6081 + regs->npc = addr+4;
6082 + return 2;
6083 + }
6084 + } while (0);
6085 +
6086 + do { /* PaX: unpatched PLT emulation step 1 */
6087 + unsigned int sethi, ba, nop;
6088 +
6089 + err = get_user(sethi, (unsigned int *)regs->pc);
6090 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
6091 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6092 +
6093 + if (err)
6094 + break;
6095 +
6096 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6097 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6098 + nop == 0x01000000U)
6099 + {
6100 + unsigned int addr, save, call;
6101 +
6102 + if ((ba & 0xFFC00000U) == 0x30800000U)
6103 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6104 + else
6105 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6106 +
6107 + err = get_user(save, (unsigned int *)addr);
6108 + err |= get_user(call, (unsigned int *)(addr+4));
6109 + err |= get_user(nop, (unsigned int *)(addr+8));
6110 + if (err)
6111 + break;
6112 +
6113 +#ifdef CONFIG_PAX_DLRESOLVE
6114 + if (save == 0x9DE3BFA8U &&
6115 + (call & 0xC0000000U) == 0x40000000U &&
6116 + nop == 0x01000000U)
6117 + {
6118 + struct vm_area_struct *vma;
6119 + unsigned long call_dl_resolve;
6120 +
6121 + down_read(&current->mm->mmap_sem);
6122 + call_dl_resolve = current->mm->call_dl_resolve;
6123 + up_read(&current->mm->mmap_sem);
6124 + if (likely(call_dl_resolve))
6125 + goto emulate;
6126 +
6127 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6128 +
6129 + down_write(&current->mm->mmap_sem);
6130 + if (current->mm->call_dl_resolve) {
6131 + call_dl_resolve = current->mm->call_dl_resolve;
6132 + up_write(&current->mm->mmap_sem);
6133 + if (vma)
6134 + kmem_cache_free(vm_area_cachep, vma);
6135 + goto emulate;
6136 + }
6137 +
6138 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6139 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6140 + up_write(&current->mm->mmap_sem);
6141 + if (vma)
6142 + kmem_cache_free(vm_area_cachep, vma);
6143 + return 1;
6144 + }
6145 +
6146 + if (pax_insert_vma(vma, call_dl_resolve)) {
6147 + up_write(&current->mm->mmap_sem);
6148 + kmem_cache_free(vm_area_cachep, vma);
6149 + return 1;
6150 + }
6151 +
6152 + current->mm->call_dl_resolve = call_dl_resolve;
6153 + up_write(&current->mm->mmap_sem);
6154 +
6155 +emulate:
6156 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6157 + regs->pc = call_dl_resolve;
6158 + regs->npc = addr+4;
6159 + return 3;
6160 + }
6161 +#endif
6162 +
6163 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6164 + if ((save & 0xFFC00000U) == 0x05000000U &&
6165 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6166 + nop == 0x01000000U)
6167 + {
6168 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6169 + regs->u_regs[UREG_G2] = addr + 4;
6170 + addr = (save & 0x003FFFFFU) << 10;
6171 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6172 + regs->pc = addr;
6173 + regs->npc = addr+4;
6174 + return 3;
6175 + }
6176 + }
6177 + } while (0);
6178 +
6179 + do { /* PaX: unpatched PLT emulation step 2 */
6180 + unsigned int save, call, nop;
6181 +
6182 + err = get_user(save, (unsigned int *)(regs->pc-4));
6183 + err |= get_user(call, (unsigned int *)regs->pc);
6184 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
6185 + if (err)
6186 + break;
6187 +
6188 + if (save == 0x9DE3BFA8U &&
6189 + (call & 0xC0000000U) == 0x40000000U &&
6190 + nop == 0x01000000U)
6191 + {
6192 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6193 +
6194 + regs->u_regs[UREG_RETPC] = regs->pc;
6195 + regs->pc = dl_resolve;
6196 + regs->npc = dl_resolve+4;
6197 + return 3;
6198 + }
6199 + } while (0);
6200 +#endif
6201 +
6202 + return 1;
6203 +}
6204 +
6205 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6206 +{
6207 + unsigned long i;
6208 +
6209 + printk(KERN_ERR "PAX: bytes at PC: ");
6210 + for (i = 0; i < 8; i++) {
6211 + unsigned int c;
6212 + if (get_user(c, (unsigned int *)pc+i))
6213 + printk(KERN_CONT "???????? ");
6214 + else
6215 + printk(KERN_CONT "%08x ", c);
6216 + }
6217 + printk("\n");
6218 +}
6219 +#endif
6220 +
6221 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
6222 int text_fault)
6223 {
6224 @@ -280,6 +545,24 @@ good_area:
6225 if(!(vma->vm_flags & VM_WRITE))
6226 goto bad_area;
6227 } else {
6228 +
6229 +#ifdef CONFIG_PAX_PAGEEXEC
6230 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6231 + up_read(&mm->mmap_sem);
6232 + switch (pax_handle_fetch_fault(regs)) {
6233 +
6234 +#ifdef CONFIG_PAX_EMUPLT
6235 + case 2:
6236 + case 3:
6237 + return;
6238 +#endif
6239 +
6240 + }
6241 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6242 + do_group_exit(SIGKILL);
6243 + }
6244 +#endif
6245 +
6246 /* Allow reads even for write-only mappings */
6247 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6248 goto bad_area;
6249 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6250 index 504c062..6fcb9c6 100644
6251 --- a/arch/sparc/mm/fault_64.c
6252 +++ b/arch/sparc/mm/fault_64.c
6253 @@ -21,6 +21,9 @@
6254 #include <linux/kprobes.h>
6255 #include <linux/kdebug.h>
6256 #include <linux/percpu.h>
6257 +#include <linux/slab.h>
6258 +#include <linux/pagemap.h>
6259 +#include <linux/compiler.h>
6260
6261 #include <asm/page.h>
6262 #include <asm/pgtable.h>
6263 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6264 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6265 regs->tpc);
6266 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6267 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6268 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6269 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6270 dump_stack();
6271 unhandled_fault(regs->tpc, current, regs);
6272 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
6273 show_regs(regs);
6274 }
6275
6276 +#ifdef CONFIG_PAX_PAGEEXEC
6277 +#ifdef CONFIG_PAX_DLRESOLVE
6278 +static void pax_emuplt_close(struct vm_area_struct *vma)
6279 +{
6280 + vma->vm_mm->call_dl_resolve = 0UL;
6281 +}
6282 +
6283 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6284 +{
6285 + unsigned int *kaddr;
6286 +
6287 + vmf->page = alloc_page(GFP_HIGHUSER);
6288 + if (!vmf->page)
6289 + return VM_FAULT_OOM;
6290 +
6291 + kaddr = kmap(vmf->page);
6292 + memset(kaddr, 0, PAGE_SIZE);
6293 + kaddr[0] = 0x9DE3BFA8U; /* save */
6294 + flush_dcache_page(vmf->page);
6295 + kunmap(vmf->page);
6296 + return VM_FAULT_MAJOR;
6297 +}
6298 +
6299 +static const struct vm_operations_struct pax_vm_ops = {
6300 + .close = pax_emuplt_close,
6301 + .fault = pax_emuplt_fault
6302 +};
6303 +
6304 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6305 +{
6306 + int ret;
6307 +
6308 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6309 + vma->vm_mm = current->mm;
6310 + vma->vm_start = addr;
6311 + vma->vm_end = addr + PAGE_SIZE;
6312 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6313 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6314 + vma->vm_ops = &pax_vm_ops;
6315 +
6316 + ret = insert_vm_struct(current->mm, vma);
6317 + if (ret)
6318 + return ret;
6319 +
6320 + ++current->mm->total_vm;
6321 + return 0;
6322 +}
6323 +#endif
6324 +
6325 +/*
6326 + * PaX: decide what to do with offenders (regs->tpc = fault address)
6327 + *
6328 + * returns 1 when task should be killed
6329 + * 2 when patched PLT trampoline was detected
6330 + * 3 when unpatched PLT trampoline was detected
6331 + */
6332 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6333 +{
6334 +
6335 +#ifdef CONFIG_PAX_EMUPLT
6336 + int err;
6337 +
6338 + do { /* PaX: patched PLT emulation #1 */
6339 + unsigned int sethi1, sethi2, jmpl;
6340 +
6341 + err = get_user(sethi1, (unsigned int *)regs->tpc);
6342 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6343 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6344 +
6345 + if (err)
6346 + break;
6347 +
6348 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6349 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6350 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6351 + {
6352 + unsigned long addr;
6353 +
6354 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6355 + addr = regs->u_regs[UREG_G1];
6356 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6357 +
6358 + if (test_thread_flag(TIF_32BIT))
6359 + addr &= 0xFFFFFFFFUL;
6360 +
6361 + regs->tpc = addr;
6362 + regs->tnpc = addr+4;
6363 + return 2;
6364 + }
6365 + } while (0);
6366 +
6367 + { /* PaX: patched PLT emulation #2 */
6368 + unsigned int ba;
6369 +
6370 + err = get_user(ba, (unsigned int *)regs->tpc);
6371 +
6372 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6373 + unsigned long addr;
6374 +
6375 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6376 +
6377 + if (test_thread_flag(TIF_32BIT))
6378 + addr &= 0xFFFFFFFFUL;
6379 +
6380 + regs->tpc = addr;
6381 + regs->tnpc = addr+4;
6382 + return 2;
6383 + }
6384 + }
6385 +
6386 + do { /* PaX: patched PLT emulation #3 */
6387 + unsigned int sethi, jmpl, nop;
6388 +
6389 + err = get_user(sethi, (unsigned int *)regs->tpc);
6390 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6391 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6392 +
6393 + if (err)
6394 + break;
6395 +
6396 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6397 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6398 + nop == 0x01000000U)
6399 + {
6400 + unsigned long addr;
6401 +
6402 + addr = (sethi & 0x003FFFFFU) << 10;
6403 + regs->u_regs[UREG_G1] = addr;
6404 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6405 +
6406 + if (test_thread_flag(TIF_32BIT))
6407 + addr &= 0xFFFFFFFFUL;
6408 +
6409 + regs->tpc = addr;
6410 + regs->tnpc = addr+4;
6411 + return 2;
6412 + }
6413 + } while (0);
6414 +
6415 + do { /* PaX: patched PLT emulation #4 */
6416 + unsigned int sethi, mov1, call, mov2;
6417 +
6418 + err = get_user(sethi, (unsigned int *)regs->tpc);
6419 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6420 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
6421 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6422 +
6423 + if (err)
6424 + break;
6425 +
6426 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6427 + mov1 == 0x8210000FU &&
6428 + (call & 0xC0000000U) == 0x40000000U &&
6429 + mov2 == 0x9E100001U)
6430 + {
6431 + unsigned long addr;
6432 +
6433 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6434 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6435 +
6436 + if (test_thread_flag(TIF_32BIT))
6437 + addr &= 0xFFFFFFFFUL;
6438 +
6439 + regs->tpc = addr;
6440 + regs->tnpc = addr+4;
6441 + return 2;
6442 + }
6443 + } while (0);
6444 +
6445 + do { /* PaX: patched PLT emulation #5 */
6446 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6447 +
6448 + err = get_user(sethi, (unsigned int *)regs->tpc);
6449 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6450 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6451 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6452 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6453 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6454 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6455 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6456 +
6457 + if (err)
6458 + break;
6459 +
6460 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6461 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6462 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6463 + (or1 & 0xFFFFE000U) == 0x82106000U &&
6464 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6465 + sllx == 0x83287020U &&
6466 + jmpl == 0x81C04005U &&
6467 + nop == 0x01000000U)
6468 + {
6469 + unsigned long addr;
6470 +
6471 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6472 + regs->u_regs[UREG_G1] <<= 32;
6473 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6474 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6475 + regs->tpc = addr;
6476 + regs->tnpc = addr+4;
6477 + return 2;
6478 + }
6479 + } while (0);
6480 +
6481 + do { /* PaX: patched PLT emulation #6 */
6482 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6483 +
6484 + err = get_user(sethi, (unsigned int *)regs->tpc);
6485 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6486 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6487 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6488 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
6489 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6490 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6491 +
6492 + if (err)
6493 + break;
6494 +
6495 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6496 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6497 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6498 + sllx == 0x83287020U &&
6499 + (or & 0xFFFFE000U) == 0x8A116000U &&
6500 + jmpl == 0x81C04005U &&
6501 + nop == 0x01000000U)
6502 + {
6503 + unsigned long addr;
6504 +
6505 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
6506 + regs->u_regs[UREG_G1] <<= 32;
6507 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
6508 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6509 + regs->tpc = addr;
6510 + regs->tnpc = addr+4;
6511 + return 2;
6512 + }
6513 + } while (0);
6514 +
6515 + do { /* PaX: unpatched PLT emulation step 1 */
6516 + unsigned int sethi, ba, nop;
6517 +
6518 + err = get_user(sethi, (unsigned int *)regs->tpc);
6519 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6520 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6521 +
6522 + if (err)
6523 + break;
6524 +
6525 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6526 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6527 + nop == 0x01000000U)
6528 + {
6529 + unsigned long addr;
6530 + unsigned int save, call;
6531 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
6532 +
6533 + if ((ba & 0xFFC00000U) == 0x30800000U)
6534 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6535 + else
6536 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6537 +
6538 + if (test_thread_flag(TIF_32BIT))
6539 + addr &= 0xFFFFFFFFUL;
6540 +
6541 + err = get_user(save, (unsigned int *)addr);
6542 + err |= get_user(call, (unsigned int *)(addr+4));
6543 + err |= get_user(nop, (unsigned int *)(addr+8));
6544 + if (err)
6545 + break;
6546 +
6547 +#ifdef CONFIG_PAX_DLRESOLVE
6548 + if (save == 0x9DE3BFA8U &&
6549 + (call & 0xC0000000U) == 0x40000000U &&
6550 + nop == 0x01000000U)
6551 + {
6552 + struct vm_area_struct *vma;
6553 + unsigned long call_dl_resolve;
6554 +
6555 + down_read(&current->mm->mmap_sem);
6556 + call_dl_resolve = current->mm->call_dl_resolve;
6557 + up_read(&current->mm->mmap_sem);
6558 + if (likely(call_dl_resolve))
6559 + goto emulate;
6560 +
6561 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6562 +
6563 + down_write(&current->mm->mmap_sem);
6564 + if (current->mm->call_dl_resolve) {
6565 + call_dl_resolve = current->mm->call_dl_resolve;
6566 + up_write(&current->mm->mmap_sem);
6567 + if (vma)
6568 + kmem_cache_free(vm_area_cachep, vma);
6569 + goto emulate;
6570 + }
6571 +
6572 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6573 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6574 + up_write(&current->mm->mmap_sem);
6575 + if (vma)
6576 + kmem_cache_free(vm_area_cachep, vma);
6577 + return 1;
6578 + }
6579 +
6580 + if (pax_insert_vma(vma, call_dl_resolve)) {
6581 + up_write(&current->mm->mmap_sem);
6582 + kmem_cache_free(vm_area_cachep, vma);
6583 + return 1;
6584 + }
6585 +
6586 + current->mm->call_dl_resolve = call_dl_resolve;
6587 + up_write(&current->mm->mmap_sem);
6588 +
6589 +emulate:
6590 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6591 + regs->tpc = call_dl_resolve;
6592 + regs->tnpc = addr+4;
6593 + return 3;
6594 + }
6595 +#endif
6596 +
6597 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6598 + if ((save & 0xFFC00000U) == 0x05000000U &&
6599 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6600 + nop == 0x01000000U)
6601 + {
6602 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6603 + regs->u_regs[UREG_G2] = addr + 4;
6604 + addr = (save & 0x003FFFFFU) << 10;
6605 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6606 +
6607 + if (test_thread_flag(TIF_32BIT))
6608 + addr &= 0xFFFFFFFFUL;
6609 +
6610 + regs->tpc = addr;
6611 + regs->tnpc = addr+4;
6612 + return 3;
6613 + }
6614 +
6615 + /* PaX: 64-bit PLT stub */
6616 + err = get_user(sethi1, (unsigned int *)addr);
6617 + err |= get_user(sethi2, (unsigned int *)(addr+4));
6618 + err |= get_user(or1, (unsigned int *)(addr+8));
6619 + err |= get_user(or2, (unsigned int *)(addr+12));
6620 + err |= get_user(sllx, (unsigned int *)(addr+16));
6621 + err |= get_user(add, (unsigned int *)(addr+20));
6622 + err |= get_user(jmpl, (unsigned int *)(addr+24));
6623 + err |= get_user(nop, (unsigned int *)(addr+28));
6624 + if (err)
6625 + break;
6626 +
6627 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
6628 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6629 + (or1 & 0xFFFFE000U) == 0x88112000U &&
6630 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6631 + sllx == 0x89293020U &&
6632 + add == 0x8A010005U &&
6633 + jmpl == 0x89C14000U &&
6634 + nop == 0x01000000U)
6635 + {
6636 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6637 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6638 + regs->u_regs[UREG_G4] <<= 32;
6639 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6640 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
6641 + regs->u_regs[UREG_G4] = addr + 24;
6642 + addr = regs->u_regs[UREG_G5];
6643 + regs->tpc = addr;
6644 + regs->tnpc = addr+4;
6645 + return 3;
6646 + }
6647 + }
6648 + } while (0);
6649 +
6650 +#ifdef CONFIG_PAX_DLRESOLVE
6651 + do { /* PaX: unpatched PLT emulation step 2 */
6652 + unsigned int save, call, nop;
6653 +
6654 + err = get_user(save, (unsigned int *)(regs->tpc-4));
6655 + err |= get_user(call, (unsigned int *)regs->tpc);
6656 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
6657 + if (err)
6658 + break;
6659 +
6660 + if (save == 0x9DE3BFA8U &&
6661 + (call & 0xC0000000U) == 0x40000000U &&
6662 + nop == 0x01000000U)
6663 + {
6664 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6665 +
6666 + if (test_thread_flag(TIF_32BIT))
6667 + dl_resolve &= 0xFFFFFFFFUL;
6668 +
6669 + regs->u_regs[UREG_RETPC] = regs->tpc;
6670 + regs->tpc = dl_resolve;
6671 + regs->tnpc = dl_resolve+4;
6672 + return 3;
6673 + }
6674 + } while (0);
6675 +#endif
6676 +
6677 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
6678 + unsigned int sethi, ba, nop;
6679 +
6680 + err = get_user(sethi, (unsigned int *)regs->tpc);
6681 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6682 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6683 +
6684 + if (err)
6685 + break;
6686 +
6687 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6688 + (ba & 0xFFF00000U) == 0x30600000U &&
6689 + nop == 0x01000000U)
6690 + {
6691 + unsigned long addr;
6692 +
6693 + addr = (sethi & 0x003FFFFFU) << 10;
6694 + regs->u_regs[UREG_G1] = addr;
6695 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6696 +
6697 + if (test_thread_flag(TIF_32BIT))
6698 + addr &= 0xFFFFFFFFUL;
6699 +
6700 + regs->tpc = addr;
6701 + regs->tnpc = addr+4;
6702 + return 2;
6703 + }
6704 + } while (0);
6705 +
6706 +#endif
6707 +
6708 + return 1;
6709 +}
6710 +
6711 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6712 +{
6713 + unsigned long i;
6714 +
6715 + printk(KERN_ERR "PAX: bytes at PC: ");
6716 + for (i = 0; i < 8; i++) {
6717 + unsigned int c;
6718 + if (get_user(c, (unsigned int *)pc+i))
6719 + printk(KERN_CONT "???????? ");
6720 + else
6721 + printk(KERN_CONT "%08x ", c);
6722 + }
6723 + printk("\n");
6724 +}
6725 +#endif
6726 +
6727 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6728 {
6729 struct mm_struct *mm = current->mm;
6730 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6731 if (!vma)
6732 goto bad_area;
6733
6734 +#ifdef CONFIG_PAX_PAGEEXEC
6735 + /* PaX: detect ITLB misses on non-exec pages */
6736 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
6737 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
6738 + {
6739 + if (address != regs->tpc)
6740 + goto good_area;
6741 +
6742 + up_read(&mm->mmap_sem);
6743 + switch (pax_handle_fetch_fault(regs)) {
6744 +
6745 +#ifdef CONFIG_PAX_EMUPLT
6746 + case 2:
6747 + case 3:
6748 + return;
6749 +#endif
6750 +
6751 + }
6752 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6753 + do_group_exit(SIGKILL);
6754 + }
6755 +#endif
6756 +
6757 /* Pure DTLB misses do not tell us whether the fault causing
6758 * load/store/atomic was a write or not, it only says that there
6759 * was no match. So in such a case we (carefully) read the
6760 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
6761 index 07e1453..0a7d9e9 100644
6762 --- a/arch/sparc/mm/hugetlbpage.c
6763 +++ b/arch/sparc/mm/hugetlbpage.c
6764 @@ -67,7 +67,7 @@ full_search:
6765 }
6766 return -ENOMEM;
6767 }
6768 - if (likely(!vma || addr + len <= vma->vm_start)) {
6769 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6770 /*
6771 * Remember the place where we stopped the search:
6772 */
6773 @@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6774 /* make sure it can fit in the remaining address space */
6775 if (likely(addr > len)) {
6776 vma = find_vma(mm, addr-len);
6777 - if (!vma || addr <= vma->vm_start) {
6778 + if (check_heap_stack_gap(vma, addr - len, len)) {
6779 /* remember the address as a hint for next time */
6780 return (mm->free_area_cache = addr-len);
6781 }
6782 @@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6783 if (unlikely(mm->mmap_base < len))
6784 goto bottomup;
6785
6786 - addr = (mm->mmap_base-len) & HPAGE_MASK;
6787 + addr = mm->mmap_base - len;
6788
6789 do {
6790 + addr &= HPAGE_MASK;
6791 /*
6792 * Lookup failure means no vma is above this address,
6793 * else if new region fits below vma->vm_start,
6794 * return with success:
6795 */
6796 vma = find_vma(mm, addr);
6797 - if (likely(!vma || addr+len <= vma->vm_start)) {
6798 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6799 /* remember the address as a hint for next time */
6800 return (mm->free_area_cache = addr);
6801 }
6802 @@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6803 mm->cached_hole_size = vma->vm_start - addr;
6804
6805 /* try just below the current vma->vm_start */
6806 - addr = (vma->vm_start-len) & HPAGE_MASK;
6807 - } while (likely(len < vma->vm_start));
6808 + addr = skip_heap_stack_gap(vma, len);
6809 + } while (!IS_ERR_VALUE(addr));
6810
6811 bottomup:
6812 /*
6813 @@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
6814 if (addr) {
6815 addr = ALIGN(addr, HPAGE_SIZE);
6816 vma = find_vma(mm, addr);
6817 - if (task_size - len >= addr &&
6818 - (!vma || addr + len <= vma->vm_start))
6819 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6820 return addr;
6821 }
6822 if (mm->get_unmapped_area == arch_get_unmapped_area)
6823 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
6824 index 7b00de6..78239f4 100644
6825 --- a/arch/sparc/mm/init_32.c
6826 +++ b/arch/sparc/mm/init_32.c
6827 @@ -316,6 +316,9 @@ extern void device_scan(void);
6828 pgprot_t PAGE_SHARED __read_mostly;
6829 EXPORT_SYMBOL(PAGE_SHARED);
6830
6831 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6832 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6833 +
6834 void __init paging_init(void)
6835 {
6836 switch(sparc_cpu_model) {
6837 @@ -344,17 +347,17 @@ void __init paging_init(void)
6838
6839 /* Initialize the protection map with non-constant, MMU dependent values. */
6840 protection_map[0] = PAGE_NONE;
6841 - protection_map[1] = PAGE_READONLY;
6842 - protection_map[2] = PAGE_COPY;
6843 - protection_map[3] = PAGE_COPY;
6844 + protection_map[1] = PAGE_READONLY_NOEXEC;
6845 + protection_map[2] = PAGE_COPY_NOEXEC;
6846 + protection_map[3] = PAGE_COPY_NOEXEC;
6847 protection_map[4] = PAGE_READONLY;
6848 protection_map[5] = PAGE_READONLY;
6849 protection_map[6] = PAGE_COPY;
6850 protection_map[7] = PAGE_COPY;
6851 protection_map[8] = PAGE_NONE;
6852 - protection_map[9] = PAGE_READONLY;
6853 - protection_map[10] = PAGE_SHARED;
6854 - protection_map[11] = PAGE_SHARED;
6855 + protection_map[9] = PAGE_READONLY_NOEXEC;
6856 + protection_map[10] = PAGE_SHARED_NOEXEC;
6857 + protection_map[11] = PAGE_SHARED_NOEXEC;
6858 protection_map[12] = PAGE_READONLY;
6859 protection_map[13] = PAGE_READONLY;
6860 protection_map[14] = PAGE_SHARED;
6861 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
6862 index cbef74e..c38fead 100644
6863 --- a/arch/sparc/mm/srmmu.c
6864 +++ b/arch/sparc/mm/srmmu.c
6865 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6866 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6867 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6868 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6869 +
6870 +#ifdef CONFIG_PAX_PAGEEXEC
6871 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6872 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6873 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6874 +#endif
6875 +
6876 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6877 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6878
6879 diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
6880 index 27fe667..36d474c 100644
6881 --- a/arch/tile/include/asm/atomic_64.h
6882 +++ b/arch/tile/include/asm/atomic_64.h
6883 @@ -142,6 +142,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
6884
6885 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6886
6887 +#define atomic64_read_unchecked(v) atomic64_read(v)
6888 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
6889 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
6890 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
6891 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
6892 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
6893 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
6894 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
6895 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
6896 +
6897 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
6898 #define smp_mb__before_atomic_dec() smp_mb()
6899 #define smp_mb__after_atomic_dec() smp_mb()
6900 diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
6901 index 392e533..536b092 100644
6902 --- a/arch/tile/include/asm/cache.h
6903 +++ b/arch/tile/include/asm/cache.h
6904 @@ -15,11 +15,12 @@
6905 #ifndef _ASM_TILE_CACHE_H
6906 #define _ASM_TILE_CACHE_H
6907
6908 +#include <linux/const.h>
6909 #include <arch/chip.h>
6910
6911 /* bytes per L1 data cache line */
6912 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
6913 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6914 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6915
6916 /* bytes per L2 cache line */
6917 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
6918 diff --git a/arch/um/Makefile b/arch/um/Makefile
6919 index 7730af6..cce5b19 100644
6920 --- a/arch/um/Makefile
6921 +++ b/arch/um/Makefile
6922 @@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
6923 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
6924 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
6925
6926 +ifdef CONSTIFY_PLUGIN
6927 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6928 +endif
6929 +
6930 #This will adjust *FLAGS accordingly to the platform.
6931 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
6932
6933 diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
6934 index 19e1bdd..3665b77 100644
6935 --- a/arch/um/include/asm/cache.h
6936 +++ b/arch/um/include/asm/cache.h
6937 @@ -1,6 +1,7 @@
6938 #ifndef __UM_CACHE_H
6939 #define __UM_CACHE_H
6940
6941 +#include <linux/const.h>
6942
6943 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
6944 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6945 @@ -12,6 +13,6 @@
6946 # define L1_CACHE_SHIFT 5
6947 #endif
6948
6949 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6950 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6951
6952 #endif
6953 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
6954 index 6c03acd..a5e0215 100644
6955 --- a/arch/um/include/asm/kmap_types.h
6956 +++ b/arch/um/include/asm/kmap_types.h
6957 @@ -23,6 +23,7 @@ enum km_type {
6958 KM_IRQ1,
6959 KM_SOFTIRQ0,
6960 KM_SOFTIRQ1,
6961 + KM_CLEARPAGE,
6962 KM_TYPE_NR
6963 };
6964
6965 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
6966 index 7cfc3ce..cbd1a58 100644
6967 --- a/arch/um/include/asm/page.h
6968 +++ b/arch/um/include/asm/page.h
6969 @@ -14,6 +14,9 @@
6970 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6971 #define PAGE_MASK (~(PAGE_SIZE-1))
6972
6973 +#define ktla_ktva(addr) (addr)
6974 +#define ktva_ktla(addr) (addr)
6975 +
6976 #ifndef __ASSEMBLY__
6977
6978 struct page;
6979 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
6980 index c533835..84db18e 100644
6981 --- a/arch/um/kernel/process.c
6982 +++ b/arch/um/kernel/process.c
6983 @@ -406,22 +406,6 @@ int singlestepping(void * t)
6984 return 2;
6985 }
6986
6987 -/*
6988 - * Only x86 and x86_64 have an arch_align_stack().
6989 - * All other arches have "#define arch_align_stack(x) (x)"
6990 - * in their asm/system.h
6991 - * As this is included in UML from asm-um/system-generic.h,
6992 - * we can use it to behave as the subarch does.
6993 - */
6994 -#ifndef arch_align_stack
6995 -unsigned long arch_align_stack(unsigned long sp)
6996 -{
6997 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6998 - sp -= get_random_int() % 8192;
6999 - return sp & ~0xf;
7000 -}
7001 -#endif
7002 -
7003 unsigned long get_wchan(struct task_struct *p)
7004 {
7005 unsigned long stack_page, sp, ip;
7006 diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
7007 index ad8f795..2c7eec6 100644
7008 --- a/arch/unicore32/include/asm/cache.h
7009 +++ b/arch/unicore32/include/asm/cache.h
7010 @@ -12,8 +12,10 @@
7011 #ifndef __UNICORE_CACHE_H__
7012 #define __UNICORE_CACHE_H__
7013
7014 -#define L1_CACHE_SHIFT (5)
7015 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7016 +#include <linux/const.h>
7017 +
7018 +#define L1_CACHE_SHIFT 5
7019 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7020
7021 /*
7022 * Memory returned by kmalloc() may be used for DMA, so we must make
7023 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7024 index efb4294..61bc18c 100644
7025 --- a/arch/x86/Kconfig
7026 +++ b/arch/x86/Kconfig
7027 @@ -235,7 +235,7 @@ config X86_HT
7028
7029 config X86_32_LAZY_GS
7030 def_bool y
7031 - depends on X86_32 && !CC_STACKPROTECTOR
7032 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7033
7034 config ARCH_HWEIGHT_CFLAGS
7035 string
7036 @@ -1022,7 +1022,7 @@ choice
7037
7038 config NOHIGHMEM
7039 bool "off"
7040 - depends on !X86_NUMAQ
7041 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7042 ---help---
7043 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7044 However, the address space of 32-bit x86 processors is only 4
7045 @@ -1059,7 +1059,7 @@ config NOHIGHMEM
7046
7047 config HIGHMEM4G
7048 bool "4GB"
7049 - depends on !X86_NUMAQ
7050 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7051 ---help---
7052 Select this if you have a 32-bit processor and between 1 and 4
7053 gigabytes of physical RAM.
7054 @@ -1113,7 +1113,7 @@ config PAGE_OFFSET
7055 hex
7056 default 0xB0000000 if VMSPLIT_3G_OPT
7057 default 0x80000000 if VMSPLIT_2G
7058 - default 0x78000000 if VMSPLIT_2G_OPT
7059 + default 0x70000000 if VMSPLIT_2G_OPT
7060 default 0x40000000 if VMSPLIT_1G
7061 default 0xC0000000
7062 depends on X86_32
7063 @@ -1496,6 +1496,7 @@ config SECCOMP
7064
7065 config CC_STACKPROTECTOR
7066 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7067 + depends on X86_64 || !PAX_MEMORY_UDEREF
7068 ---help---
7069 This option turns on the -fstack-protector GCC feature. This
7070 feature puts, at the beginning of functions, a canary value on
7071 @@ -1553,6 +1554,7 @@ config KEXEC_JUMP
7072 config PHYSICAL_START
7073 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
7074 default "0x1000000"
7075 + range 0x400000 0x40000000
7076 ---help---
7077 This gives the physical address where the kernel is loaded.
7078
7079 @@ -1616,6 +1618,7 @@ config X86_NEED_RELOCS
7080 config PHYSICAL_ALIGN
7081 hex "Alignment value to which kernel should be aligned" if X86_32
7082 default "0x1000000"
7083 + range 0x400000 0x1000000 if PAX_KERNEXEC
7084 range 0x2000 0x1000000
7085 ---help---
7086 This value puts the alignment restrictions on physical address
7087 @@ -1647,9 +1650,10 @@ config HOTPLUG_CPU
7088 Say N if you want to disable CPU hotplug.
7089
7090 config COMPAT_VDSO
7091 - def_bool y
7092 + def_bool n
7093 prompt "Compat VDSO support"
7094 depends on X86_32 || IA32_EMULATION
7095 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7096 ---help---
7097 Map the 32-bit VDSO to the predictable old-style address too.
7098
7099 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7100 index e3ca7e0..b30b28a 100644
7101 --- a/arch/x86/Kconfig.cpu
7102 +++ b/arch/x86/Kconfig.cpu
7103 @@ -341,7 +341,7 @@ config X86_PPRO_FENCE
7104
7105 config X86_F00F_BUG
7106 def_bool y
7107 - depends on M586MMX || M586TSC || M586 || M486 || M386
7108 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7109
7110 config X86_INVD_BUG
7111 def_bool y
7112 @@ -365,7 +365,7 @@ config X86_POPAD_OK
7113
7114 config X86_ALIGNMENT_16
7115 def_bool y
7116 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7117 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7118
7119 config X86_INTEL_USERCOPY
7120 def_bool y
7121 @@ -411,7 +411,7 @@ config X86_CMPXCHG64
7122 # generates cmov.
7123 config X86_CMOV
7124 def_bool y
7125 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7126 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7127
7128 config X86_MINIMUM_CPU_FAMILY
7129 int
7130 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7131 index bf56e17..05f9891 100644
7132 --- a/arch/x86/Kconfig.debug
7133 +++ b/arch/x86/Kconfig.debug
7134 @@ -81,7 +81,7 @@ config X86_PTDUMP
7135 config DEBUG_RODATA
7136 bool "Write protect kernel read-only data structures"
7137 default y
7138 - depends on DEBUG_KERNEL
7139 + depends on DEBUG_KERNEL && BROKEN
7140 ---help---
7141 Mark the kernel read-only data as write-protected in the pagetables,
7142 in order to catch accidental (and incorrect) writes to such const
7143 @@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
7144
7145 config DEBUG_SET_MODULE_RONX
7146 bool "Set loadable kernel module data as NX and text as RO"
7147 - depends on MODULES
7148 + depends on MODULES && BROKEN
7149 ---help---
7150 This option helps catch unintended modifications to loadable
7151 kernel module's text and read-only data. It also prevents execution
7152 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7153 index b02e509..2631e48 100644
7154 --- a/arch/x86/Makefile
7155 +++ b/arch/x86/Makefile
7156 @@ -46,6 +46,7 @@ else
7157 UTS_MACHINE := x86_64
7158 CHECKFLAGS += -D__x86_64__ -m64
7159
7160 + biarch := $(call cc-option,-m64)
7161 KBUILD_AFLAGS += -m64
7162 KBUILD_CFLAGS += -m64
7163
7164 @@ -195,3 +196,12 @@ define archhelp
7165 echo ' FDARGS="..." arguments for the booted kernel'
7166 echo ' FDINITRD=file initrd for the booted kernel'
7167 endef
7168 +
7169 +define OLD_LD
7170 +
7171 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7172 +*** Please upgrade your binutils to 2.18 or newer
7173 +endef
7174 +
7175 +archprepare:
7176 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7177 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7178 index 95365a8..52f857b 100644
7179 --- a/arch/x86/boot/Makefile
7180 +++ b/arch/x86/boot/Makefile
7181 @@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7182 $(call cc-option, -fno-stack-protector) \
7183 $(call cc-option, -mpreferred-stack-boundary=2)
7184 KBUILD_CFLAGS += $(call cc-option, -m32)
7185 +ifdef CONSTIFY_PLUGIN
7186 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7187 +endif
7188 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7189 GCOV_PROFILE := n
7190
7191 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7192 index 878e4b9..20537ab 100644
7193 --- a/arch/x86/boot/bitops.h
7194 +++ b/arch/x86/boot/bitops.h
7195 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7196 u8 v;
7197 const u32 *p = (const u32 *)addr;
7198
7199 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7200 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7201 return v;
7202 }
7203
7204 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7205
7206 static inline void set_bit(int nr, void *addr)
7207 {
7208 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7209 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7210 }
7211
7212 #endif /* BOOT_BITOPS_H */
7213 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7214 index c7093bd..d4247ffe0 100644
7215 --- a/arch/x86/boot/boot.h
7216 +++ b/arch/x86/boot/boot.h
7217 @@ -85,7 +85,7 @@ static inline void io_delay(void)
7218 static inline u16 ds(void)
7219 {
7220 u16 seg;
7221 - asm("movw %%ds,%0" : "=rm" (seg));
7222 + asm volatile("movw %%ds,%0" : "=rm" (seg));
7223 return seg;
7224 }
7225
7226 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7227 static inline int memcmp(const void *s1, const void *s2, size_t len)
7228 {
7229 u8 diff;
7230 - asm("repe; cmpsb; setnz %0"
7231 + asm volatile("repe; cmpsb; setnz %0"
7232 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7233 return diff;
7234 }
7235 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7236 index 09664ef..edc5d03 100644
7237 --- a/arch/x86/boot/compressed/Makefile
7238 +++ b/arch/x86/boot/compressed/Makefile
7239 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7240 KBUILD_CFLAGS += $(cflags-y)
7241 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7242 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7243 +ifdef CONSTIFY_PLUGIN
7244 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7245 +endif
7246
7247 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7248 GCOV_PROFILE := n
7249 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7250 index 67a655a..b924059 100644
7251 --- a/arch/x86/boot/compressed/head_32.S
7252 +++ b/arch/x86/boot/compressed/head_32.S
7253 @@ -76,7 +76,7 @@ ENTRY(startup_32)
7254 notl %eax
7255 andl %eax, %ebx
7256 #else
7257 - movl $LOAD_PHYSICAL_ADDR, %ebx
7258 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7259 #endif
7260
7261 /* Target address to relocate to for decompression */
7262 @@ -162,7 +162,7 @@ relocated:
7263 * and where it was actually loaded.
7264 */
7265 movl %ebp, %ebx
7266 - subl $LOAD_PHYSICAL_ADDR, %ebx
7267 + subl $____LOAD_PHYSICAL_ADDR, %ebx
7268 jz 2f /* Nothing to be done if loaded at compiled addr. */
7269 /*
7270 * Process relocations.
7271 @@ -170,8 +170,7 @@ relocated:
7272
7273 1: subl $4, %edi
7274 movl (%edi), %ecx
7275 - testl %ecx, %ecx
7276 - jz 2f
7277 + jecxz 2f
7278 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7279 jmp 1b
7280 2:
7281 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7282 index 35af09d..99c9676 100644
7283 --- a/arch/x86/boot/compressed/head_64.S
7284 +++ b/arch/x86/boot/compressed/head_64.S
7285 @@ -91,7 +91,7 @@ ENTRY(startup_32)
7286 notl %eax
7287 andl %eax, %ebx
7288 #else
7289 - movl $LOAD_PHYSICAL_ADDR, %ebx
7290 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7291 #endif
7292
7293 /* Target address to relocate to for decompression */
7294 @@ -233,7 +233,7 @@ ENTRY(startup_64)
7295 notq %rax
7296 andq %rax, %rbp
7297 #else
7298 - movq $LOAD_PHYSICAL_ADDR, %rbp
7299 + movq $____LOAD_PHYSICAL_ADDR, %rbp
7300 #endif
7301
7302 /* Target address to relocate to for decompression */
7303 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7304 index 3a19d04..7c1d55a 100644
7305 --- a/arch/x86/boot/compressed/misc.c
7306 +++ b/arch/x86/boot/compressed/misc.c
7307 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
7308 case PT_LOAD:
7309 #ifdef CONFIG_RELOCATABLE
7310 dest = output;
7311 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7312 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7313 #else
7314 dest = (void *)(phdr->p_paddr);
7315 #endif
7316 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7317 error("Destination address too large");
7318 #endif
7319 #ifndef CONFIG_RELOCATABLE
7320 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7321 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7322 error("Wrong destination address");
7323 #endif
7324
7325 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7326 index 89bbf4e..869908e 100644
7327 --- a/arch/x86/boot/compressed/relocs.c
7328 +++ b/arch/x86/boot/compressed/relocs.c
7329 @@ -13,8 +13,11 @@
7330
7331 static void die(char *fmt, ...);
7332
7333 +#include "../../../../include/generated/autoconf.h"
7334 +
7335 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7336 static Elf32_Ehdr ehdr;
7337 +static Elf32_Phdr *phdr;
7338 static unsigned long reloc_count, reloc_idx;
7339 static unsigned long *relocs;
7340
7341 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
7342 }
7343 }
7344
7345 +static void read_phdrs(FILE *fp)
7346 +{
7347 + unsigned int i;
7348 +
7349 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7350 + if (!phdr) {
7351 + die("Unable to allocate %d program headers\n",
7352 + ehdr.e_phnum);
7353 + }
7354 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7355 + die("Seek to %d failed: %s\n",
7356 + ehdr.e_phoff, strerror(errno));
7357 + }
7358 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7359 + die("Cannot read ELF program headers: %s\n",
7360 + strerror(errno));
7361 + }
7362 + for(i = 0; i < ehdr.e_phnum; i++) {
7363 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7364 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7365 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7366 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7367 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7368 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7369 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7370 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7371 + }
7372 +
7373 +}
7374 +
7375 static void read_shdrs(FILE *fp)
7376 {
7377 - int i;
7378 + unsigned int i;
7379 Elf32_Shdr shdr;
7380
7381 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7382 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
7383
7384 static void read_strtabs(FILE *fp)
7385 {
7386 - int i;
7387 + unsigned int i;
7388 for (i = 0; i < ehdr.e_shnum; i++) {
7389 struct section *sec = &secs[i];
7390 if (sec->shdr.sh_type != SHT_STRTAB) {
7391 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
7392
7393 static void read_symtabs(FILE *fp)
7394 {
7395 - int i,j;
7396 + unsigned int i,j;
7397 for (i = 0; i < ehdr.e_shnum; i++) {
7398 struct section *sec = &secs[i];
7399 if (sec->shdr.sh_type != SHT_SYMTAB) {
7400 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
7401
7402 static void read_relocs(FILE *fp)
7403 {
7404 - int i,j;
7405 + unsigned int i,j;
7406 + uint32_t base;
7407 +
7408 for (i = 0; i < ehdr.e_shnum; i++) {
7409 struct section *sec = &secs[i];
7410 if (sec->shdr.sh_type != SHT_REL) {
7411 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
7412 die("Cannot read symbol table: %s\n",
7413 strerror(errno));
7414 }
7415 + base = 0;
7416 + for (j = 0; j < ehdr.e_phnum; j++) {
7417 + if (phdr[j].p_type != PT_LOAD )
7418 + continue;
7419 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7420 + continue;
7421 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7422 + break;
7423 + }
7424 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7425 Elf32_Rel *rel = &sec->reltab[j];
7426 - rel->r_offset = elf32_to_cpu(rel->r_offset);
7427 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7428 rel->r_info = elf32_to_cpu(rel->r_info);
7429 }
7430 }
7431 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
7432
7433 static void print_absolute_symbols(void)
7434 {
7435 - int i;
7436 + unsigned int i;
7437 printf("Absolute symbols\n");
7438 printf(" Num: Value Size Type Bind Visibility Name\n");
7439 for (i = 0; i < ehdr.e_shnum; i++) {
7440 struct section *sec = &secs[i];
7441 char *sym_strtab;
7442 Elf32_Sym *sh_symtab;
7443 - int j;
7444 + unsigned int j;
7445
7446 if (sec->shdr.sh_type != SHT_SYMTAB) {
7447 continue;
7448 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
7449
7450 static void print_absolute_relocs(void)
7451 {
7452 - int i, printed = 0;
7453 + unsigned int i, printed = 0;
7454
7455 for (i = 0; i < ehdr.e_shnum; i++) {
7456 struct section *sec = &secs[i];
7457 struct section *sec_applies, *sec_symtab;
7458 char *sym_strtab;
7459 Elf32_Sym *sh_symtab;
7460 - int j;
7461 + unsigned int j;
7462 if (sec->shdr.sh_type != SHT_REL) {
7463 continue;
7464 }
7465 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
7466
7467 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7468 {
7469 - int i;
7470 + unsigned int i;
7471 /* Walk through the relocations */
7472 for (i = 0; i < ehdr.e_shnum; i++) {
7473 char *sym_strtab;
7474 Elf32_Sym *sh_symtab;
7475 struct section *sec_applies, *sec_symtab;
7476 - int j;
7477 + unsigned int j;
7478 struct section *sec = &secs[i];
7479
7480 if (sec->shdr.sh_type != SHT_REL) {
7481 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7482 !is_rel_reloc(sym_name(sym_strtab, sym))) {
7483 continue;
7484 }
7485 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
7486 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
7487 + continue;
7488 +
7489 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
7490 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
7491 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
7492 + continue;
7493 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
7494 + continue;
7495 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
7496 + continue;
7497 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
7498 + continue;
7499 +#endif
7500 +
7501 switch (r_type) {
7502 case R_386_NONE:
7503 case R_386_PC32:
7504 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
7505
7506 static void emit_relocs(int as_text)
7507 {
7508 - int i;
7509 + unsigned int i;
7510 /* Count how many relocations I have and allocate space for them. */
7511 reloc_count = 0;
7512 walk_relocs(count_reloc);
7513 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
7514 fname, strerror(errno));
7515 }
7516 read_ehdr(fp);
7517 + read_phdrs(fp);
7518 read_shdrs(fp);
7519 read_strtabs(fp);
7520 read_symtabs(fp);
7521 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7522 index 4d3ff03..e4972ff 100644
7523 --- a/arch/x86/boot/cpucheck.c
7524 +++ b/arch/x86/boot/cpucheck.c
7525 @@ -74,7 +74,7 @@ static int has_fpu(void)
7526 u16 fcw = -1, fsw = -1;
7527 u32 cr0;
7528
7529 - asm("movl %%cr0,%0" : "=r" (cr0));
7530 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
7531 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7532 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7533 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7534 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7535 {
7536 u32 f0, f1;
7537
7538 - asm("pushfl ; "
7539 + asm volatile("pushfl ; "
7540 "pushfl ; "
7541 "popl %0 ; "
7542 "movl %0,%1 ; "
7543 @@ -115,7 +115,7 @@ static void get_flags(void)
7544 set_bit(X86_FEATURE_FPU, cpu.flags);
7545
7546 if (has_eflag(X86_EFLAGS_ID)) {
7547 - asm("cpuid"
7548 + asm volatile("cpuid"
7549 : "=a" (max_intel_level),
7550 "=b" (cpu_vendor[0]),
7551 "=d" (cpu_vendor[1]),
7552 @@ -124,7 +124,7 @@ static void get_flags(void)
7553
7554 if (max_intel_level >= 0x00000001 &&
7555 max_intel_level <= 0x0000ffff) {
7556 - asm("cpuid"
7557 + asm volatile("cpuid"
7558 : "=a" (tfms),
7559 "=c" (cpu.flags[4]),
7560 "=d" (cpu.flags[0])
7561 @@ -136,7 +136,7 @@ static void get_flags(void)
7562 cpu.model += ((tfms >> 16) & 0xf) << 4;
7563 }
7564
7565 - asm("cpuid"
7566 + asm volatile("cpuid"
7567 : "=a" (max_amd_level)
7568 : "a" (0x80000000)
7569 : "ebx", "ecx", "edx");
7570 @@ -144,7 +144,7 @@ static void get_flags(void)
7571 if (max_amd_level >= 0x80000001 &&
7572 max_amd_level <= 0x8000ffff) {
7573 u32 eax = 0x80000001;
7574 - asm("cpuid"
7575 + asm volatile("cpuid"
7576 : "+a" (eax),
7577 "=c" (cpu.flags[6]),
7578 "=d" (cpu.flags[1])
7579 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7580 u32 ecx = MSR_K7_HWCR;
7581 u32 eax, edx;
7582
7583 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7584 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7585 eax &= ~(1 << 15);
7586 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7587 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7588
7589 get_flags(); /* Make sure it really did something */
7590 err = check_flags();
7591 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7592 u32 ecx = MSR_VIA_FCR;
7593 u32 eax, edx;
7594
7595 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7596 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7597 eax |= (1<<1)|(1<<7);
7598 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7599 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7600
7601 set_bit(X86_FEATURE_CX8, cpu.flags);
7602 err = check_flags();
7603 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7604 u32 eax, edx;
7605 u32 level = 1;
7606
7607 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7608 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7609 - asm("cpuid"
7610 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7611 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7612 + asm volatile("cpuid"
7613 : "+a" (level), "=d" (cpu.flags[0])
7614 : : "ecx", "ebx");
7615 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7616 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7617
7618 err = check_flags();
7619 }
7620 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7621 index bdb4d45..0476680 100644
7622 --- a/arch/x86/boot/header.S
7623 +++ b/arch/x86/boot/header.S
7624 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7625 # single linked list of
7626 # struct setup_data
7627
7628 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7629 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7630
7631 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7632 #define VO_INIT_SIZE (VO__end - VO__text)
7633 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7634 index db75d07..8e6d0af 100644
7635 --- a/arch/x86/boot/memory.c
7636 +++ b/arch/x86/boot/memory.c
7637 @@ -19,7 +19,7 @@
7638
7639 static int detect_memory_e820(void)
7640 {
7641 - int count = 0;
7642 + unsigned int count = 0;
7643 struct biosregs ireg, oreg;
7644 struct e820entry *desc = boot_params.e820_map;
7645 static struct e820entry buf; /* static so it is zeroed */
7646 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7647 index 11e8c6e..fdbb1ed 100644
7648 --- a/arch/x86/boot/video-vesa.c
7649 +++ b/arch/x86/boot/video-vesa.c
7650 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7651
7652 boot_params.screen_info.vesapm_seg = oreg.es;
7653 boot_params.screen_info.vesapm_off = oreg.di;
7654 + boot_params.screen_info.vesapm_size = oreg.cx;
7655 }
7656
7657 /*
7658 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
7659 index 43eda28..5ab5fdb 100644
7660 --- a/arch/x86/boot/video.c
7661 +++ b/arch/x86/boot/video.c
7662 @@ -96,7 +96,7 @@ static void store_mode_params(void)
7663 static unsigned int get_entry(void)
7664 {
7665 char entry_buf[4];
7666 - int i, len = 0;
7667 + unsigned int i, len = 0;
7668 int key;
7669 unsigned int v;
7670
7671 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
7672 index 5b577d5..3c1fed4 100644
7673 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
7674 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
7675 @@ -8,6 +8,8 @@
7676 * including this sentence is retained in full.
7677 */
7678
7679 +#include <asm/alternative-asm.h>
7680 +
7681 .extern crypto_ft_tab
7682 .extern crypto_it_tab
7683 .extern crypto_fl_tab
7684 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
7685 je B192; \
7686 leaq 32(r9),r9;
7687
7688 +#define ret pax_force_retaddr 0, 1; ret
7689 +
7690 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
7691 movq r1,r2; \
7692 movq r3,r4; \
7693 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
7694 index be6d9e3..21fbbca 100644
7695 --- a/arch/x86/crypto/aesni-intel_asm.S
7696 +++ b/arch/x86/crypto/aesni-intel_asm.S
7697 @@ -31,6 +31,7 @@
7698
7699 #include <linux/linkage.h>
7700 #include <asm/inst.h>
7701 +#include <asm/alternative-asm.h>
7702
7703 #ifdef __x86_64__
7704 .data
7705 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
7706 pop %r14
7707 pop %r13
7708 pop %r12
7709 + pax_force_retaddr 0, 1
7710 ret
7711 +ENDPROC(aesni_gcm_dec)
7712
7713
7714 /*****************************************************************************
7715 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
7716 pop %r14
7717 pop %r13
7718 pop %r12
7719 + pax_force_retaddr 0, 1
7720 ret
7721 +ENDPROC(aesni_gcm_enc)
7722
7723 #endif
7724
7725 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
7726 pxor %xmm1, %xmm0
7727 movaps %xmm0, (TKEYP)
7728 add $0x10, TKEYP
7729 + pax_force_retaddr_bts
7730 ret
7731
7732 .align 4
7733 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
7734 shufps $0b01001110, %xmm2, %xmm1
7735 movaps %xmm1, 0x10(TKEYP)
7736 add $0x20, TKEYP
7737 + pax_force_retaddr_bts
7738 ret
7739
7740 .align 4
7741 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
7742
7743 movaps %xmm0, (TKEYP)
7744 add $0x10, TKEYP
7745 + pax_force_retaddr_bts
7746 ret
7747
7748 .align 4
7749 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
7750 pxor %xmm1, %xmm2
7751 movaps %xmm2, (TKEYP)
7752 add $0x10, TKEYP
7753 + pax_force_retaddr_bts
7754 ret
7755
7756 /*
7757 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
7758 #ifndef __x86_64__
7759 popl KEYP
7760 #endif
7761 + pax_force_retaddr 0, 1
7762 ret
7763 +ENDPROC(aesni_set_key)
7764
7765 /*
7766 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
7767 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
7768 popl KLEN
7769 popl KEYP
7770 #endif
7771 + pax_force_retaddr 0, 1
7772 ret
7773 +ENDPROC(aesni_enc)
7774
7775 /*
7776 * _aesni_enc1: internal ABI
7777 @@ -1959,6 +1972,7 @@ _aesni_enc1:
7778 AESENC KEY STATE
7779 movaps 0x70(TKEYP), KEY
7780 AESENCLAST KEY STATE
7781 + pax_force_retaddr_bts
7782 ret
7783
7784 /*
7785 @@ -2067,6 +2081,7 @@ _aesni_enc4:
7786 AESENCLAST KEY STATE2
7787 AESENCLAST KEY STATE3
7788 AESENCLAST KEY STATE4
7789 + pax_force_retaddr_bts
7790 ret
7791
7792 /*
7793 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
7794 popl KLEN
7795 popl KEYP
7796 #endif
7797 + pax_force_retaddr 0, 1
7798 ret
7799 +ENDPROC(aesni_dec)
7800
7801 /*
7802 * _aesni_dec1: internal ABI
7803 @@ -2146,6 +2163,7 @@ _aesni_dec1:
7804 AESDEC KEY STATE
7805 movaps 0x70(TKEYP), KEY
7806 AESDECLAST KEY STATE
7807 + pax_force_retaddr_bts
7808 ret
7809
7810 /*
7811 @@ -2254,6 +2272,7 @@ _aesni_dec4:
7812 AESDECLAST KEY STATE2
7813 AESDECLAST KEY STATE3
7814 AESDECLAST KEY STATE4
7815 + pax_force_retaddr_bts
7816 ret
7817
7818 /*
7819 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
7820 popl KEYP
7821 popl LEN
7822 #endif
7823 + pax_force_retaddr 0, 1
7824 ret
7825 +ENDPROC(aesni_ecb_enc)
7826
7827 /*
7828 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7829 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
7830 popl KEYP
7831 popl LEN
7832 #endif
7833 + pax_force_retaddr 0, 1
7834 ret
7835 +ENDPROC(aesni_ecb_dec)
7836
7837 /*
7838 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7839 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
7840 popl LEN
7841 popl IVP
7842 #endif
7843 + pax_force_retaddr 0, 1
7844 ret
7845 +ENDPROC(aesni_cbc_enc)
7846
7847 /*
7848 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7849 @@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
7850 popl LEN
7851 popl IVP
7852 #endif
7853 + pax_force_retaddr 0, 1
7854 ret
7855 +ENDPROC(aesni_cbc_dec)
7856
7857 #ifdef __x86_64__
7858 .align 16
7859 @@ -2524,6 +2551,7 @@ _aesni_inc_init:
7860 mov $1, TCTR_LOW
7861 MOVQ_R64_XMM TCTR_LOW INC
7862 MOVQ_R64_XMM CTR TCTR_LOW
7863 + pax_force_retaddr_bts
7864 ret
7865
7866 /*
7867 @@ -2552,6 +2580,7 @@ _aesni_inc:
7868 .Linc_low:
7869 movaps CTR, IV
7870 PSHUFB_XMM BSWAP_MASK IV
7871 + pax_force_retaddr_bts
7872 ret
7873
7874 /*
7875 @@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
7876 .Lctr_enc_ret:
7877 movups IV, (IVP)
7878 .Lctr_enc_just_ret:
7879 + pax_force_retaddr 0, 1
7880 ret
7881 +ENDPROC(aesni_ctr_enc)
7882 #endif
7883 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
7884 index 391d245..67f35c2 100644
7885 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
7886 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
7887 @@ -20,6 +20,8 @@
7888 *
7889 */
7890
7891 +#include <asm/alternative-asm.h>
7892 +
7893 .file "blowfish-x86_64-asm.S"
7894 .text
7895
7896 @@ -151,9 +153,11 @@ __blowfish_enc_blk:
7897 jnz __enc_xor;
7898
7899 write_block();
7900 + pax_force_retaddr 0, 1
7901 ret;
7902 __enc_xor:
7903 xor_block();
7904 + pax_force_retaddr 0, 1
7905 ret;
7906
7907 .align 8
7908 @@ -188,6 +192,7 @@ blowfish_dec_blk:
7909
7910 movq %r11, %rbp;
7911
7912 + pax_force_retaddr 0, 1
7913 ret;
7914
7915 /**********************************************************************
7916 @@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
7917
7918 popq %rbx;
7919 popq %rbp;
7920 + pax_force_retaddr 0, 1
7921 ret;
7922
7923 __enc_xor4:
7924 @@ -349,6 +355,7 @@ __enc_xor4:
7925
7926 popq %rbx;
7927 popq %rbp;
7928 + pax_force_retaddr 0, 1
7929 ret;
7930
7931 .align 8
7932 @@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
7933 popq %rbx;
7934 popq %rbp;
7935
7936 + pax_force_retaddr 0, 1
7937 ret;
7938
7939 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7940 index 6214a9b..1f4fc9a 100644
7941 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
7942 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7943 @@ -1,3 +1,5 @@
7944 +#include <asm/alternative-asm.h>
7945 +
7946 # enter ECRYPT_encrypt_bytes
7947 .text
7948 .p2align 5
7949 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
7950 add %r11,%rsp
7951 mov %rdi,%rax
7952 mov %rsi,%rdx
7953 + pax_force_retaddr 0, 1
7954 ret
7955 # bytesatleast65:
7956 ._bytesatleast65:
7957 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
7958 add %r11,%rsp
7959 mov %rdi,%rax
7960 mov %rsi,%rdx
7961 + pax_force_retaddr
7962 ret
7963 # enter ECRYPT_ivsetup
7964 .text
7965 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
7966 add %r11,%rsp
7967 mov %rdi,%rax
7968 mov %rsi,%rdx
7969 + pax_force_retaddr
7970 ret
7971 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
7972 index b2c2f57..8470cab 100644
7973 --- a/arch/x86/crypto/sha1_ssse3_asm.S
7974 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
7975 @@ -28,6 +28,8 @@
7976 * (at your option) any later version.
7977 */
7978
7979 +#include <asm/alternative-asm.h>
7980 +
7981 #define CTX %rdi // arg1
7982 #define BUF %rsi // arg2
7983 #define CNT %rdx // arg3
7984 @@ -104,6 +106,7 @@
7985 pop %r12
7986 pop %rbp
7987 pop %rbx
7988 + pax_force_retaddr 0, 1
7989 ret
7990
7991 .size \name, .-\name
7992 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
7993 index 5b012a2..36d5364 100644
7994 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
7995 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
7996 @@ -20,6 +20,8 @@
7997 *
7998 */
7999
8000 +#include <asm/alternative-asm.h>
8001 +
8002 .file "twofish-x86_64-asm-3way.S"
8003 .text
8004
8005 @@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
8006 popq %r13;
8007 popq %r14;
8008 popq %r15;
8009 + pax_force_retaddr 0, 1
8010 ret;
8011
8012 __enc_xor3:
8013 @@ -271,6 +274,7 @@ __enc_xor3:
8014 popq %r13;
8015 popq %r14;
8016 popq %r15;
8017 + pax_force_retaddr 0, 1
8018 ret;
8019
8020 .global twofish_dec_blk_3way
8021 @@ -312,5 +316,6 @@ twofish_dec_blk_3way:
8022 popq %r13;
8023 popq %r14;
8024 popq %r15;
8025 + pax_force_retaddr 0, 1
8026 ret;
8027
8028 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8029 index 7bcf3fc..f53832f 100644
8030 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8031 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8032 @@ -21,6 +21,7 @@
8033 .text
8034
8035 #include <asm/asm-offsets.h>
8036 +#include <asm/alternative-asm.h>
8037
8038 #define a_offset 0
8039 #define b_offset 4
8040 @@ -268,6 +269,7 @@ twofish_enc_blk:
8041
8042 popq R1
8043 movq $1,%rax
8044 + pax_force_retaddr 0, 1
8045 ret
8046
8047 twofish_dec_blk:
8048 @@ -319,4 +321,5 @@ twofish_dec_blk:
8049
8050 popq R1
8051 movq $1,%rax
8052 + pax_force_retaddr 0, 1
8053 ret
8054 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8055 index fd84387..887aa7e 100644
8056 --- a/arch/x86/ia32/ia32_aout.c
8057 +++ b/arch/x86/ia32/ia32_aout.c
8058 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8059 unsigned long dump_start, dump_size;
8060 struct user32 dump;
8061
8062 + memset(&dump, 0, sizeof(dump));
8063 +
8064 fs = get_fs();
8065 set_fs(KERNEL_DS);
8066 has_dumped = 1;
8067 @@ -315,6 +317,13 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
8068 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
8069 current->mm->cached_hole_size = 0;
8070
8071 + retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
8072 + if (retval < 0) {
8073 + /* Someone check-me: is this error path enough? */
8074 + send_sig(SIGKILL, current, 0);
8075 + return retval;
8076 + }
8077 +
8078 install_exec_creds(bprm);
8079 current->flags &= ~PF_FORKNOEXEC;
8080
8081 @@ -410,13 +419,6 @@ beyond_if:
8082
8083 set_brk(current->mm->start_brk, current->mm->brk);
8084
8085 - retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
8086 - if (retval < 0) {
8087 - /* Someone check-me: is this error path enough? */
8088 - send_sig(SIGKILL, current, 0);
8089 - return retval;
8090 - }
8091 -
8092 current->mm->start_stack =
8093 (unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
8094 /* start thread */
8095 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8096 index 6557769..ef6ae89 100644
8097 --- a/arch/x86/ia32/ia32_signal.c
8098 +++ b/arch/x86/ia32/ia32_signal.c
8099 @@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8100 }
8101 seg = get_fs();
8102 set_fs(KERNEL_DS);
8103 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8104 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8105 set_fs(seg);
8106 if (ret >= 0 && uoss_ptr) {
8107 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8108 @@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8109 */
8110 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8111 size_t frame_size,
8112 - void **fpstate)
8113 + void __user **fpstate)
8114 {
8115 unsigned long sp;
8116
8117 @@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8118
8119 if (used_math()) {
8120 sp = sp - sig_xstate_ia32_size;
8121 - *fpstate = (struct _fpstate_ia32 *) sp;
8122 + *fpstate = (struct _fpstate_ia32 __user *) sp;
8123 if (save_i387_xstate_ia32(*fpstate) < 0)
8124 return (void __user *) -1L;
8125 }
8126 @@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8127 sp -= frame_size;
8128 /* Align the stack pointer according to the i386 ABI,
8129 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8130 - sp = ((sp + 4) & -16ul) - 4;
8131 + sp = ((sp - 12) & -16ul) - 4;
8132 return (void __user *) sp;
8133 }
8134
8135 @@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8136 * These are actually not used anymore, but left because some
8137 * gdb versions depend on them as a marker.
8138 */
8139 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8140 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8141 } put_user_catch(err);
8142
8143 if (err)
8144 @@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8145 0xb8,
8146 __NR_ia32_rt_sigreturn,
8147 0x80cd,
8148 - 0,
8149 + 0
8150 };
8151
8152 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8153 @@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8154
8155 if (ka->sa.sa_flags & SA_RESTORER)
8156 restorer = ka->sa.sa_restorer;
8157 + else if (current->mm->context.vdso)
8158 + /* Return stub is in 32bit vsyscall page */
8159 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8160 else
8161 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8162 - rt_sigreturn);
8163 + restorer = &frame->retcode;
8164 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8165
8166 /*
8167 * Not actually used anymore, but left because some gdb
8168 * versions need it.
8169 */
8170 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8171 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8172 } put_user_catch(err);
8173
8174 if (err)
8175 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8176 index a6253ec..4ad2120 100644
8177 --- a/arch/x86/ia32/ia32entry.S
8178 +++ b/arch/x86/ia32/ia32entry.S
8179 @@ -13,7 +13,9 @@
8180 #include <asm/thread_info.h>
8181 #include <asm/segment.h>
8182 #include <asm/irqflags.h>
8183 +#include <asm/pgtable.h>
8184 #include <linux/linkage.h>
8185 +#include <asm/alternative-asm.h>
8186
8187 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8188 #include <linux/elf-em.h>
8189 @@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
8190 ENDPROC(native_irq_enable_sysexit)
8191 #endif
8192
8193 + .macro pax_enter_kernel_user
8194 + pax_set_fptr_mask
8195 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8196 + call pax_enter_kernel_user
8197 +#endif
8198 + .endm
8199 +
8200 + .macro pax_exit_kernel_user
8201 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8202 + call pax_exit_kernel_user
8203 +#endif
8204 +#ifdef CONFIG_PAX_RANDKSTACK
8205 + pushq %rax
8206 + pushq %r11
8207 + call pax_randomize_kstack
8208 + popq %r11
8209 + popq %rax
8210 +#endif
8211 + .endm
8212 +
8213 +.macro pax_erase_kstack
8214 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8215 + call pax_erase_kstack
8216 +#endif
8217 +.endm
8218 +
8219 /*
8220 * 32bit SYSENTER instruction entry.
8221 *
8222 @@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target)
8223 CFI_REGISTER rsp,rbp
8224 SWAPGS_UNSAFE_STACK
8225 movq PER_CPU_VAR(kernel_stack), %rsp
8226 - addq $(KERNEL_STACK_OFFSET),%rsp
8227 - /*
8228 - * No need to follow this irqs on/off section: the syscall
8229 - * disabled irqs, here we enable it straight after entry:
8230 - */
8231 - ENABLE_INTERRUPTS(CLBR_NONE)
8232 movl %ebp,%ebp /* zero extension */
8233 pushq_cfi $__USER32_DS
8234 /*CFI_REL_OFFSET ss,0*/
8235 @@ -134,25 +156,39 @@ ENTRY(ia32_sysenter_target)
8236 CFI_REL_OFFSET rsp,0
8237 pushfq_cfi
8238 /*CFI_REL_OFFSET rflags,0*/
8239 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
8240 - CFI_REGISTER rip,r10
8241 + orl $X86_EFLAGS_IF,(%rsp)
8242 + GET_THREAD_INFO(%r11)
8243 + movl TI_sysenter_return(%r11), %r11d
8244 + CFI_REGISTER rip,r11
8245 pushq_cfi $__USER32_CS
8246 /*CFI_REL_OFFSET cs,0*/
8247 movl %eax, %eax
8248 - pushq_cfi %r10
8249 + pushq_cfi %r11
8250 CFI_REL_OFFSET rip,0
8251 pushq_cfi %rax
8252 cld
8253 SAVE_ARGS 0,1,0
8254 + pax_enter_kernel_user
8255 + /*
8256 + * No need to follow this irqs on/off section: the syscall
8257 + * disabled irqs, here we enable it straight after entry:
8258 + */
8259 + ENABLE_INTERRUPTS(CLBR_NONE)
8260 /* no need to do an access_ok check here because rbp has been
8261 32bit zero extended */
8262 +
8263 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8264 + mov $PAX_USER_SHADOW_BASE,%r11
8265 + add %r11,%rbp
8266 +#endif
8267 +
8268 1: movl (%rbp),%ebp
8269 .section __ex_table,"a"
8270 .quad 1b,ia32_badarg
8271 .previous
8272 - GET_THREAD_INFO(%r10)
8273 - orl $TS_COMPAT,TI_status(%r10)
8274 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8275 + GET_THREAD_INFO(%r11)
8276 + orl $TS_COMPAT,TI_status(%r11)
8277 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8278 CFI_REMEMBER_STATE
8279 jnz sysenter_tracesys
8280 cmpq $(IA32_NR_syscalls-1),%rax
8281 @@ -162,13 +198,15 @@ sysenter_do_call:
8282 sysenter_dispatch:
8283 call *ia32_sys_call_table(,%rax,8)
8284 movq %rax,RAX-ARGOFFSET(%rsp)
8285 - GET_THREAD_INFO(%r10)
8286 + GET_THREAD_INFO(%r11)
8287 DISABLE_INTERRUPTS(CLBR_NONE)
8288 TRACE_IRQS_OFF
8289 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8290 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8291 jnz sysexit_audit
8292 sysexit_from_sys_call:
8293 - andl $~TS_COMPAT,TI_status(%r10)
8294 + pax_exit_kernel_user
8295 + pax_erase_kstack
8296 + andl $~TS_COMPAT,TI_status(%r11)
8297 /* clear IF, that popfq doesn't enable interrupts early */
8298 andl $~0x200,EFLAGS-R11(%rsp)
8299 movl RIP-R11(%rsp),%edx /* User %eip */
8300 @@ -194,6 +232,9 @@ sysexit_from_sys_call:
8301 movl %eax,%esi /* 2nd arg: syscall number */
8302 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8303 call audit_syscall_entry
8304 +
8305 + pax_erase_kstack
8306 +
8307 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8308 cmpq $(IA32_NR_syscalls-1),%rax
8309 ja ia32_badsys
8310 @@ -205,7 +246,7 @@ sysexit_from_sys_call:
8311 .endm
8312
8313 .macro auditsys_exit exit
8314 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8315 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8316 jnz ia32_ret_from_sys_call
8317 TRACE_IRQS_ON
8318 sti
8319 @@ -215,12 +256,12 @@ sysexit_from_sys_call:
8320 movzbl %al,%edi /* zero-extend that into %edi */
8321 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
8322 call audit_syscall_exit
8323 - GET_THREAD_INFO(%r10)
8324 + GET_THREAD_INFO(%r11)
8325 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
8326 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8327 cli
8328 TRACE_IRQS_OFF
8329 - testl %edi,TI_flags(%r10)
8330 + testl %edi,TI_flags(%r11)
8331 jz \exit
8332 CLEAR_RREGS -ARGOFFSET
8333 jmp int_with_check
8334 @@ -238,7 +279,7 @@ sysexit_audit:
8335
8336 sysenter_tracesys:
8337 #ifdef CONFIG_AUDITSYSCALL
8338 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8339 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8340 jz sysenter_auditsys
8341 #endif
8342 SAVE_REST
8343 @@ -246,6 +287,9 @@ sysenter_tracesys:
8344 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8345 movq %rsp,%rdi /* &pt_regs -> arg1 */
8346 call syscall_trace_enter
8347 +
8348 + pax_erase_kstack
8349 +
8350 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8351 RESTORE_REST
8352 cmpq $(IA32_NR_syscalls-1),%rax
8353 @@ -277,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
8354 ENTRY(ia32_cstar_target)
8355 CFI_STARTPROC32 simple
8356 CFI_SIGNAL_FRAME
8357 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8358 + CFI_DEF_CFA rsp,0
8359 CFI_REGISTER rip,rcx
8360 /*CFI_REGISTER rflags,r11*/
8361 SWAPGS_UNSAFE_STACK
8362 movl %esp,%r8d
8363 CFI_REGISTER rsp,r8
8364 movq PER_CPU_VAR(kernel_stack),%rsp
8365 + SAVE_ARGS 8*6,0,0
8366 + pax_enter_kernel_user
8367 /*
8368 * No need to follow this irqs on/off section: the syscall
8369 * disabled irqs and here we enable it straight after entry:
8370 */
8371 ENABLE_INTERRUPTS(CLBR_NONE)
8372 - SAVE_ARGS 8,0,0
8373 movl %eax,%eax /* zero extension */
8374 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8375 movq %rcx,RIP-ARGOFFSET(%rsp)
8376 @@ -305,13 +350,19 @@ ENTRY(ia32_cstar_target)
8377 /* no need to do an access_ok check here because r8 has been
8378 32bit zero extended */
8379 /* hardware stack frame is complete now */
8380 +
8381 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8382 + mov $PAX_USER_SHADOW_BASE,%r11
8383 + add %r11,%r8
8384 +#endif
8385 +
8386 1: movl (%r8),%r9d
8387 .section __ex_table,"a"
8388 .quad 1b,ia32_badarg
8389 .previous
8390 - GET_THREAD_INFO(%r10)
8391 - orl $TS_COMPAT,TI_status(%r10)
8392 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8393 + GET_THREAD_INFO(%r11)
8394 + orl $TS_COMPAT,TI_status(%r11)
8395 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8396 CFI_REMEMBER_STATE
8397 jnz cstar_tracesys
8398 cmpq $IA32_NR_syscalls-1,%rax
8399 @@ -321,13 +372,15 @@ cstar_do_call:
8400 cstar_dispatch:
8401 call *ia32_sys_call_table(,%rax,8)
8402 movq %rax,RAX-ARGOFFSET(%rsp)
8403 - GET_THREAD_INFO(%r10)
8404 + GET_THREAD_INFO(%r11)
8405 DISABLE_INTERRUPTS(CLBR_NONE)
8406 TRACE_IRQS_OFF
8407 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8408 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8409 jnz sysretl_audit
8410 sysretl_from_sys_call:
8411 - andl $~TS_COMPAT,TI_status(%r10)
8412 + pax_exit_kernel_user
8413 + pax_erase_kstack
8414 + andl $~TS_COMPAT,TI_status(%r11)
8415 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
8416 movl RIP-ARGOFFSET(%rsp),%ecx
8417 CFI_REGISTER rip,rcx
8418 @@ -355,7 +408,7 @@ sysretl_audit:
8419
8420 cstar_tracesys:
8421 #ifdef CONFIG_AUDITSYSCALL
8422 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8423 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8424 jz cstar_auditsys
8425 #endif
8426 xchgl %r9d,%ebp
8427 @@ -364,6 +417,9 @@ cstar_tracesys:
8428 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8429 movq %rsp,%rdi /* &pt_regs -> arg1 */
8430 call syscall_trace_enter
8431 +
8432 + pax_erase_kstack
8433 +
8434 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8435 RESTORE_REST
8436 xchgl %ebp,%r9d
8437 @@ -409,20 +465,21 @@ ENTRY(ia32_syscall)
8438 CFI_REL_OFFSET rip,RIP-RIP
8439 PARAVIRT_ADJUST_EXCEPTION_FRAME
8440 SWAPGS
8441 - /*
8442 - * No need to follow this irqs on/off section: the syscall
8443 - * disabled irqs and here we enable it straight after entry:
8444 - */
8445 - ENABLE_INTERRUPTS(CLBR_NONE)
8446 movl %eax,%eax
8447 pushq_cfi %rax
8448 cld
8449 /* note the registers are not zero extended to the sf.
8450 this could be a problem. */
8451 SAVE_ARGS 0,1,0
8452 - GET_THREAD_INFO(%r10)
8453 - orl $TS_COMPAT,TI_status(%r10)
8454 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8455 + pax_enter_kernel_user
8456 + /*
8457 + * No need to follow this irqs on/off section: the syscall
8458 + * disabled irqs and here we enable it straight after entry:
8459 + */
8460 + ENABLE_INTERRUPTS(CLBR_NONE)
8461 + GET_THREAD_INFO(%r11)
8462 + orl $TS_COMPAT,TI_status(%r11)
8463 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8464 jnz ia32_tracesys
8465 cmpq $(IA32_NR_syscalls-1),%rax
8466 ja ia32_badsys
8467 @@ -441,6 +498,9 @@ ia32_tracesys:
8468 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8469 movq %rsp,%rdi /* &pt_regs -> arg1 */
8470 call syscall_trace_enter
8471 +
8472 + pax_erase_kstack
8473 +
8474 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8475 RESTORE_REST
8476 cmpq $(IA32_NR_syscalls-1),%rax
8477 @@ -455,6 +515,7 @@ ia32_badsys:
8478
8479 quiet_ni_syscall:
8480 movq $-ENOSYS,%rax
8481 + pax_force_retaddr
8482 ret
8483 CFI_ENDPROC
8484
8485 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8486 index f6f5c53..b358b28 100644
8487 --- a/arch/x86/ia32/sys_ia32.c
8488 +++ b/arch/x86/ia32/sys_ia32.c
8489 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8490 */
8491 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8492 {
8493 - typeof(ubuf->st_uid) uid = 0;
8494 - typeof(ubuf->st_gid) gid = 0;
8495 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
8496 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
8497 SET_UID(uid, stat->uid);
8498 SET_GID(gid, stat->gid);
8499 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8500 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
8501 }
8502 set_fs(KERNEL_DS);
8503 ret = sys_rt_sigprocmask(how,
8504 - set ? (sigset_t __user *)&s : NULL,
8505 - oset ? (sigset_t __user *)&s : NULL,
8506 + set ? (sigset_t __force_user *)&s : NULL,
8507 + oset ? (sigset_t __force_user *)&s : NULL,
8508 sigsetsize);
8509 set_fs(old_fs);
8510 if (ret)
8511 @@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
8512 return alarm_setitimer(seconds);
8513 }
8514
8515 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
8516 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
8517 int options)
8518 {
8519 return compat_sys_wait4(pid, stat_addr, options, NULL);
8520 @@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8521 mm_segment_t old_fs = get_fs();
8522
8523 set_fs(KERNEL_DS);
8524 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8525 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8526 set_fs(old_fs);
8527 if (put_compat_timespec(&t, interval))
8528 return -EFAULT;
8529 @@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8530 mm_segment_t old_fs = get_fs();
8531
8532 set_fs(KERNEL_DS);
8533 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8534 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8535 set_fs(old_fs);
8536 if (!ret) {
8537 switch (_NSIG_WORDS) {
8538 @@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8539 if (copy_siginfo_from_user32(&info, uinfo))
8540 return -EFAULT;
8541 set_fs(KERNEL_DS);
8542 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8543 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8544 set_fs(old_fs);
8545 return ret;
8546 }
8547 @@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8548 return -EFAULT;
8549
8550 set_fs(KERNEL_DS);
8551 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8552 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8553 count);
8554 set_fs(old_fs);
8555
8556 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8557 index 091508b..7692c6f 100644
8558 --- a/arch/x86/include/asm/alternative-asm.h
8559 +++ b/arch/x86/include/asm/alternative-asm.h
8560 @@ -4,10 +4,10 @@
8561
8562 #ifdef CONFIG_SMP
8563 .macro LOCK_PREFIX
8564 -1: lock
8565 +672: lock
8566 .section .smp_locks,"a"
8567 .balign 4
8568 - .long 1b - .
8569 + .long 672b - .
8570 .previous
8571 .endm
8572 #else
8573 @@ -15,6 +15,45 @@
8574 .endm
8575 #endif
8576
8577 +#ifdef KERNEXEC_PLUGIN
8578 + .macro pax_force_retaddr_bts rip=0
8579 + btsq $63,\rip(%rsp)
8580 + .endm
8581 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8582 + .macro pax_force_retaddr rip=0, reload=0
8583 + btsq $63,\rip(%rsp)
8584 + .endm
8585 + .macro pax_force_fptr ptr
8586 + btsq $63,\ptr
8587 + .endm
8588 + .macro pax_set_fptr_mask
8589 + .endm
8590 +#endif
8591 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8592 + .macro pax_force_retaddr rip=0, reload=0
8593 + .if \reload
8594 + pax_set_fptr_mask
8595 + .endif
8596 + orq %r10,\rip(%rsp)
8597 + .endm
8598 + .macro pax_force_fptr ptr
8599 + orq %r10,\ptr
8600 + .endm
8601 + .macro pax_set_fptr_mask
8602 + movabs $0x8000000000000000,%r10
8603 + .endm
8604 +#endif
8605 +#else
8606 + .macro pax_force_retaddr rip=0, reload=0
8607 + .endm
8608 + .macro pax_force_fptr ptr
8609 + .endm
8610 + .macro pax_force_retaddr_bts rip=0
8611 + .endm
8612 + .macro pax_set_fptr_mask
8613 + .endm
8614 +#endif
8615 +
8616 .macro altinstruction_entry orig alt feature orig_len alt_len
8617 .long \orig - .
8618 .long \alt - .
8619 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8620 index 37ad100..7d47faa 100644
8621 --- a/arch/x86/include/asm/alternative.h
8622 +++ b/arch/x86/include/asm/alternative.h
8623 @@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
8624 ".section .discard,\"aw\",@progbits\n" \
8625 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
8626 ".previous\n" \
8627 - ".section .altinstr_replacement, \"ax\"\n" \
8628 + ".section .altinstr_replacement, \"a\"\n" \
8629 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
8630 ".previous"
8631
8632 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
8633 index 1a6c09a..fec2432 100644
8634 --- a/arch/x86/include/asm/apic.h
8635 +++ b/arch/x86/include/asm/apic.h
8636 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
8637
8638 #ifdef CONFIG_X86_LOCAL_APIC
8639
8640 -extern unsigned int apic_verbosity;
8641 +extern int apic_verbosity;
8642 extern int local_apic_timer_c2_ok;
8643
8644 extern int disable_apic;
8645 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
8646 index 20370c6..a2eb9b0 100644
8647 --- a/arch/x86/include/asm/apm.h
8648 +++ b/arch/x86/include/asm/apm.h
8649 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
8650 __asm__ __volatile__(APM_DO_ZERO_SEGS
8651 "pushl %%edi\n\t"
8652 "pushl %%ebp\n\t"
8653 - "lcall *%%cs:apm_bios_entry\n\t"
8654 + "lcall *%%ss:apm_bios_entry\n\t"
8655 "setc %%al\n\t"
8656 "popl %%ebp\n\t"
8657 "popl %%edi\n\t"
8658 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
8659 __asm__ __volatile__(APM_DO_ZERO_SEGS
8660 "pushl %%edi\n\t"
8661 "pushl %%ebp\n\t"
8662 - "lcall *%%cs:apm_bios_entry\n\t"
8663 + "lcall *%%ss:apm_bios_entry\n\t"
8664 "setc %%bl\n\t"
8665 "popl %%ebp\n\t"
8666 "popl %%edi\n\t"
8667 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
8668 index 58cb6d4..ca9010d 100644
8669 --- a/arch/x86/include/asm/atomic.h
8670 +++ b/arch/x86/include/asm/atomic.h
8671 @@ -22,7 +22,18 @@
8672 */
8673 static inline int atomic_read(const atomic_t *v)
8674 {
8675 - return (*(volatile int *)&(v)->counter);
8676 + return (*(volatile const int *)&(v)->counter);
8677 +}
8678 +
8679 +/**
8680 + * atomic_read_unchecked - read atomic variable
8681 + * @v: pointer of type atomic_unchecked_t
8682 + *
8683 + * Atomically reads the value of @v.
8684 + */
8685 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8686 +{
8687 + return (*(volatile const int *)&(v)->counter);
8688 }
8689
8690 /**
8691 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
8692 }
8693
8694 /**
8695 + * atomic_set_unchecked - set atomic variable
8696 + * @v: pointer of type atomic_unchecked_t
8697 + * @i: required value
8698 + *
8699 + * Atomically sets the value of @v to @i.
8700 + */
8701 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8702 +{
8703 + v->counter = i;
8704 +}
8705 +
8706 +/**
8707 * atomic_add - add integer to atomic variable
8708 * @i: integer value to add
8709 * @v: pointer of type atomic_t
8710 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
8711 */
8712 static inline void atomic_add(int i, atomic_t *v)
8713 {
8714 - asm volatile(LOCK_PREFIX "addl %1,%0"
8715 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8716 +
8717 +#ifdef CONFIG_PAX_REFCOUNT
8718 + "jno 0f\n"
8719 + LOCK_PREFIX "subl %1,%0\n"
8720 + "int $4\n0:\n"
8721 + _ASM_EXTABLE(0b, 0b)
8722 +#endif
8723 +
8724 + : "+m" (v->counter)
8725 + : "ir" (i));
8726 +}
8727 +
8728 +/**
8729 + * atomic_add_unchecked - add integer to atomic variable
8730 + * @i: integer value to add
8731 + * @v: pointer of type atomic_unchecked_t
8732 + *
8733 + * Atomically adds @i to @v.
8734 + */
8735 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8736 +{
8737 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8738 : "+m" (v->counter)
8739 : "ir" (i));
8740 }
8741 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
8742 */
8743 static inline void atomic_sub(int i, atomic_t *v)
8744 {
8745 - asm volatile(LOCK_PREFIX "subl %1,%0"
8746 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8747 +
8748 +#ifdef CONFIG_PAX_REFCOUNT
8749 + "jno 0f\n"
8750 + LOCK_PREFIX "addl %1,%0\n"
8751 + "int $4\n0:\n"
8752 + _ASM_EXTABLE(0b, 0b)
8753 +#endif
8754 +
8755 + : "+m" (v->counter)
8756 + : "ir" (i));
8757 +}
8758 +
8759 +/**
8760 + * atomic_sub_unchecked - subtract integer from atomic variable
8761 + * @i: integer value to subtract
8762 + * @v: pointer of type atomic_unchecked_t
8763 + *
8764 + * Atomically subtracts @i from @v.
8765 + */
8766 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8767 +{
8768 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8769 : "+m" (v->counter)
8770 : "ir" (i));
8771 }
8772 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8773 {
8774 unsigned char c;
8775
8776 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8777 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
8778 +
8779 +#ifdef CONFIG_PAX_REFCOUNT
8780 + "jno 0f\n"
8781 + LOCK_PREFIX "addl %2,%0\n"
8782 + "int $4\n0:\n"
8783 + _ASM_EXTABLE(0b, 0b)
8784 +#endif
8785 +
8786 + "sete %1\n"
8787 : "+m" (v->counter), "=qm" (c)
8788 : "ir" (i) : "memory");
8789 return c;
8790 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8791 */
8792 static inline void atomic_inc(atomic_t *v)
8793 {
8794 - asm volatile(LOCK_PREFIX "incl %0"
8795 + asm volatile(LOCK_PREFIX "incl %0\n"
8796 +
8797 +#ifdef CONFIG_PAX_REFCOUNT
8798 + "jno 0f\n"
8799 + LOCK_PREFIX "decl %0\n"
8800 + "int $4\n0:\n"
8801 + _ASM_EXTABLE(0b, 0b)
8802 +#endif
8803 +
8804 + : "+m" (v->counter));
8805 +}
8806 +
8807 +/**
8808 + * atomic_inc_unchecked - increment atomic variable
8809 + * @v: pointer of type atomic_unchecked_t
8810 + *
8811 + * Atomically increments @v by 1.
8812 + */
8813 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8814 +{
8815 + asm volatile(LOCK_PREFIX "incl %0\n"
8816 : "+m" (v->counter));
8817 }
8818
8819 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
8820 */
8821 static inline void atomic_dec(atomic_t *v)
8822 {
8823 - asm volatile(LOCK_PREFIX "decl %0"
8824 + asm volatile(LOCK_PREFIX "decl %0\n"
8825 +
8826 +#ifdef CONFIG_PAX_REFCOUNT
8827 + "jno 0f\n"
8828 + LOCK_PREFIX "incl %0\n"
8829 + "int $4\n0:\n"
8830 + _ASM_EXTABLE(0b, 0b)
8831 +#endif
8832 +
8833 + : "+m" (v->counter));
8834 +}
8835 +
8836 +/**
8837 + * atomic_dec_unchecked - decrement atomic variable
8838 + * @v: pointer of type atomic_unchecked_t
8839 + *
8840 + * Atomically decrements @v by 1.
8841 + */
8842 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8843 +{
8844 + asm volatile(LOCK_PREFIX "decl %0\n"
8845 : "+m" (v->counter));
8846 }
8847
8848 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
8849 {
8850 unsigned char c;
8851
8852 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
8853 + asm volatile(LOCK_PREFIX "decl %0\n"
8854 +
8855 +#ifdef CONFIG_PAX_REFCOUNT
8856 + "jno 0f\n"
8857 + LOCK_PREFIX "incl %0\n"
8858 + "int $4\n0:\n"
8859 + _ASM_EXTABLE(0b, 0b)
8860 +#endif
8861 +
8862 + "sete %1\n"
8863 : "+m" (v->counter), "=qm" (c)
8864 : : "memory");
8865 return c != 0;
8866 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
8867 {
8868 unsigned char c;
8869
8870 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
8871 + asm volatile(LOCK_PREFIX "incl %0\n"
8872 +
8873 +#ifdef CONFIG_PAX_REFCOUNT
8874 + "jno 0f\n"
8875 + LOCK_PREFIX "decl %0\n"
8876 + "int $4\n0:\n"
8877 + _ASM_EXTABLE(0b, 0b)
8878 +#endif
8879 +
8880 + "sete %1\n"
8881 + : "+m" (v->counter), "=qm" (c)
8882 + : : "memory");
8883 + return c != 0;
8884 +}
8885 +
8886 +/**
8887 + * atomic_inc_and_test_unchecked - increment and test
8888 + * @v: pointer of type atomic_unchecked_t
8889 + *
8890 + * Atomically increments @v by 1
8891 + * and returns true if the result is zero, or false for all
8892 + * other cases.
8893 + */
8894 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8895 +{
8896 + unsigned char c;
8897 +
8898 + asm volatile(LOCK_PREFIX "incl %0\n"
8899 + "sete %1\n"
8900 : "+m" (v->counter), "=qm" (c)
8901 : : "memory");
8902 return c != 0;
8903 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
8904 {
8905 unsigned char c;
8906
8907 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
8908 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
8909 +
8910 +#ifdef CONFIG_PAX_REFCOUNT
8911 + "jno 0f\n"
8912 + LOCK_PREFIX "subl %2,%0\n"
8913 + "int $4\n0:\n"
8914 + _ASM_EXTABLE(0b, 0b)
8915 +#endif
8916 +
8917 + "sets %1\n"
8918 : "+m" (v->counter), "=qm" (c)
8919 : "ir" (i) : "memory");
8920 return c;
8921 @@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
8922 goto no_xadd;
8923 #endif
8924 /* Modern 486+ processor */
8925 - return i + xadd(&v->counter, i);
8926 + return i + xadd_check_overflow(&v->counter, i);
8927
8928 #ifdef CONFIG_M386
8929 no_xadd: /* Legacy 386 processor */
8930 @@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
8931 }
8932
8933 /**
8934 + * atomic_add_return_unchecked - add integer and return
8935 + * @i: integer value to add
8936 + * @v: pointer of type atomic_unchecked_t
8937 + *
8938 + * Atomically adds @i to @v and returns @i + @v
8939 + */
8940 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8941 +{
8942 +#ifdef CONFIG_M386
8943 + int __i;
8944 + unsigned long flags;
8945 + if (unlikely(boot_cpu_data.x86 <= 3))
8946 + goto no_xadd;
8947 +#endif
8948 + /* Modern 486+ processor */
8949 + return i + xadd(&v->counter, i);
8950 +
8951 +#ifdef CONFIG_M386
8952 +no_xadd: /* Legacy 386 processor */
8953 + raw_local_irq_save(flags);
8954 + __i = atomic_read_unchecked(v);
8955 + atomic_set_unchecked(v, i + __i);
8956 + raw_local_irq_restore(flags);
8957 + return i + __i;
8958 +#endif
8959 +}
8960 +
8961 +/**
8962 * atomic_sub_return - subtract integer and return
8963 * @v: pointer of type atomic_t
8964 * @i: integer value to subtract
8965 @@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
8966 }
8967
8968 #define atomic_inc_return(v) (atomic_add_return(1, v))
8969 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8970 +{
8971 + return atomic_add_return_unchecked(1, v);
8972 +}
8973 #define atomic_dec_return(v) (atomic_sub_return(1, v))
8974
8975 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8976 @@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8977 return cmpxchg(&v->counter, old, new);
8978 }
8979
8980 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8981 +{
8982 + return cmpxchg(&v->counter, old, new);
8983 +}
8984 +
8985 static inline int atomic_xchg(atomic_t *v, int new)
8986 {
8987 return xchg(&v->counter, new);
8988 }
8989
8990 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8991 +{
8992 + return xchg(&v->counter, new);
8993 +}
8994 +
8995 /**
8996 * __atomic_add_unless - add unless the number is already a given value
8997 * @v: pointer of type atomic_t
8998 @@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
8999 */
9000 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9001 {
9002 - int c, old;
9003 + int c, old, new;
9004 c = atomic_read(v);
9005 for (;;) {
9006 - if (unlikely(c == (u)))
9007 + if (unlikely(c == u))
9008 break;
9009 - old = atomic_cmpxchg((v), c, c + (a));
9010 +
9011 + asm volatile("addl %2,%0\n"
9012 +
9013 +#ifdef CONFIG_PAX_REFCOUNT
9014 + "jno 0f\n"
9015 + "subl %2,%0\n"
9016 + "int $4\n0:\n"
9017 + _ASM_EXTABLE(0b, 0b)
9018 +#endif
9019 +
9020 + : "=r" (new)
9021 + : "0" (c), "ir" (a));
9022 +
9023 + old = atomic_cmpxchg(v, c, new);
9024 if (likely(old == c))
9025 break;
9026 c = old;
9027 @@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9028 return c;
9029 }
9030
9031 +/**
9032 + * atomic_inc_not_zero_hint - increment if not null
9033 + * @v: pointer of type atomic_t
9034 + * @hint: probable value of the atomic before the increment
9035 + *
9036 + * This version of atomic_inc_not_zero() gives a hint of probable
9037 + * value of the atomic. This helps processor to not read the memory
9038 + * before doing the atomic read/modify/write cycle, lowering
9039 + * number of bus transactions on some arches.
9040 + *
9041 + * Returns: 0 if increment was not done, 1 otherwise.
9042 + */
9043 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
9044 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
9045 +{
9046 + int val, c = hint, new;
9047 +
9048 + /* sanity test, should be removed by compiler if hint is a constant */
9049 + if (!hint)
9050 + return __atomic_add_unless(v, 1, 0);
9051 +
9052 + do {
9053 + asm volatile("incl %0\n"
9054 +
9055 +#ifdef CONFIG_PAX_REFCOUNT
9056 + "jno 0f\n"
9057 + "decl %0\n"
9058 + "int $4\n0:\n"
9059 + _ASM_EXTABLE(0b, 0b)
9060 +#endif
9061 +
9062 + : "=r" (new)
9063 + : "0" (c));
9064 +
9065 + val = atomic_cmpxchg(v, c, new);
9066 + if (val == c)
9067 + return 1;
9068 + c = val;
9069 + } while (c);
9070 +
9071 + return 0;
9072 +}
9073
9074 /*
9075 * atomic_dec_if_positive - decrement by 1 if old value positive
9076 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
9077 index 24098aa..1e37723 100644
9078 --- a/arch/x86/include/asm/atomic64_32.h
9079 +++ b/arch/x86/include/asm/atomic64_32.h
9080 @@ -12,6 +12,14 @@ typedef struct {
9081 u64 __aligned(8) counter;
9082 } atomic64_t;
9083
9084 +#ifdef CONFIG_PAX_REFCOUNT
9085 +typedef struct {
9086 + u64 __aligned(8) counter;
9087 +} atomic64_unchecked_t;
9088 +#else
9089 +typedef atomic64_t atomic64_unchecked_t;
9090 +#endif
9091 +
9092 #define ATOMIC64_INIT(val) { (val) }
9093
9094 #ifdef CONFIG_X86_CMPXCHG64
9095 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
9096 }
9097
9098 /**
9099 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
9100 + * @p: pointer to type atomic64_unchecked_t
9101 + * @o: expected value
9102 + * @n: new value
9103 + *
9104 + * Atomically sets @v to @n if it was equal to @o and returns
9105 + * the old value.
9106 + */
9107 +
9108 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
9109 +{
9110 + return cmpxchg64(&v->counter, o, n);
9111 +}
9112 +
9113 +/**
9114 * atomic64_xchg - xchg atomic64 variable
9115 * @v: pointer to type atomic64_t
9116 * @n: value to assign
9117 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
9118 }
9119
9120 /**
9121 + * atomic64_set_unchecked - set atomic64 variable
9122 + * @v: pointer to type atomic64_unchecked_t
9123 + * @n: value to assign
9124 + *
9125 + * Atomically sets the value of @v to @n.
9126 + */
9127 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
9128 +{
9129 + unsigned high = (unsigned)(i >> 32);
9130 + unsigned low = (unsigned)i;
9131 + asm volatile(ATOMIC64_ALTERNATIVE(set)
9132 + : "+b" (low), "+c" (high)
9133 + : "S" (v)
9134 + : "eax", "edx", "memory"
9135 + );
9136 +}
9137 +
9138 +/**
9139 * atomic64_read - read atomic64 variable
9140 * @v: pointer to type atomic64_t
9141 *
9142 @@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v)
9143 }
9144
9145 /**
9146 + * atomic64_read_unchecked - read atomic64 variable
9147 + * @v: pointer to type atomic64_unchecked_t
9148 + *
9149 + * Atomically reads the value of @v and returns it.
9150 + */
9151 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
9152 +{
9153 + long long r;
9154 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
9155 + : "=A" (r), "+c" (v)
9156 + : : "memory"
9157 + );
9158 + return r;
9159 + }
9160 +
9161 +/**
9162 * atomic64_add_return - add and return
9163 * @i: integer value to add
9164 * @v: pointer to type atomic64_t
9165 @@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
9166 return i;
9167 }
9168
9169 +/**
9170 + * atomic64_add_return_unchecked - add and return
9171 + * @i: integer value to add
9172 + * @v: pointer to type atomic64_unchecked_t
9173 + *
9174 + * Atomically adds @i to @v and returns @i + *@v
9175 + */
9176 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
9177 +{
9178 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
9179 + : "+A" (i), "+c" (v)
9180 + : : "memory"
9181 + );
9182 + return i;
9183 +}
9184 +
9185 /*
9186 * Other variants with different arithmetic operators:
9187 */
9188 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
9189 return a;
9190 }
9191
9192 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9193 +{
9194 + long long a;
9195 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
9196 + : "=A" (a)
9197 + : "S" (v)
9198 + : "memory", "ecx"
9199 + );
9200 + return a;
9201 +}
9202 +
9203 static inline long long atomic64_dec_return(atomic64_t *v)
9204 {
9205 long long a;
9206 @@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
9207 }
9208
9209 /**
9210 + * atomic64_add_unchecked - add integer to atomic64 variable
9211 + * @i: integer value to add
9212 + * @v: pointer to type atomic64_unchecked_t
9213 + *
9214 + * Atomically adds @i to @v.
9215 + */
9216 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
9217 +{
9218 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
9219 + : "+A" (i), "+c" (v)
9220 + : : "memory"
9221 + );
9222 + return i;
9223 +}
9224 +
9225 +/**
9226 * atomic64_sub - subtract the atomic64 variable
9227 * @i: integer value to subtract
9228 * @v: pointer to type atomic64_t
9229 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
9230 index 0e1cbfc..5623683 100644
9231 --- a/arch/x86/include/asm/atomic64_64.h
9232 +++ b/arch/x86/include/asm/atomic64_64.h
9233 @@ -18,7 +18,19 @@
9234 */
9235 static inline long atomic64_read(const atomic64_t *v)
9236 {
9237 - return (*(volatile long *)&(v)->counter);
9238 + return (*(volatile const long *)&(v)->counter);
9239 +}
9240 +
9241 +/**
9242 + * atomic64_read_unchecked - read atomic64 variable
9243 + * @v: pointer of type atomic64_unchecked_t
9244 + *
9245 + * Atomically reads the value of @v.
9246 + * Doesn't imply a read memory barrier.
9247 + */
9248 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9249 +{
9250 + return (*(volatile const long *)&(v)->counter);
9251 }
9252
9253 /**
9254 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9255 }
9256
9257 /**
9258 + * atomic64_set_unchecked - set atomic64 variable
9259 + * @v: pointer to type atomic64_unchecked_t
9260 + * @i: required value
9261 + *
9262 + * Atomically sets the value of @v to @i.
9263 + */
9264 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9265 +{
9266 + v->counter = i;
9267 +}
9268 +
9269 +/**
9270 * atomic64_add - add integer to atomic64 variable
9271 * @i: integer value to add
9272 * @v: pointer to type atomic64_t
9273 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9274 */
9275 static inline void atomic64_add(long i, atomic64_t *v)
9276 {
9277 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
9278 +
9279 +#ifdef CONFIG_PAX_REFCOUNT
9280 + "jno 0f\n"
9281 + LOCK_PREFIX "subq %1,%0\n"
9282 + "int $4\n0:\n"
9283 + _ASM_EXTABLE(0b, 0b)
9284 +#endif
9285 +
9286 + : "=m" (v->counter)
9287 + : "er" (i), "m" (v->counter));
9288 +}
9289 +
9290 +/**
9291 + * atomic64_add_unchecked - add integer to atomic64 variable
9292 + * @i: integer value to add
9293 + * @v: pointer to type atomic64_unchecked_t
9294 + *
9295 + * Atomically adds @i to @v.
9296 + */
9297 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9298 +{
9299 asm volatile(LOCK_PREFIX "addq %1,%0"
9300 : "=m" (v->counter)
9301 : "er" (i), "m" (v->counter));
9302 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
9303 */
9304 static inline void atomic64_sub(long i, atomic64_t *v)
9305 {
9306 - asm volatile(LOCK_PREFIX "subq %1,%0"
9307 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9308 +
9309 +#ifdef CONFIG_PAX_REFCOUNT
9310 + "jno 0f\n"
9311 + LOCK_PREFIX "addq %1,%0\n"
9312 + "int $4\n0:\n"
9313 + _ASM_EXTABLE(0b, 0b)
9314 +#endif
9315 +
9316 + : "=m" (v->counter)
9317 + : "er" (i), "m" (v->counter));
9318 +}
9319 +
9320 +/**
9321 + * atomic64_sub_unchecked - subtract the atomic64 variable
9322 + * @i: integer value to subtract
9323 + * @v: pointer to type atomic64_unchecked_t
9324 + *
9325 + * Atomically subtracts @i from @v.
9326 + */
9327 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
9328 +{
9329 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9330 : "=m" (v->counter)
9331 : "er" (i), "m" (v->counter));
9332 }
9333 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9334 {
9335 unsigned char c;
9336
9337 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9338 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
9339 +
9340 +#ifdef CONFIG_PAX_REFCOUNT
9341 + "jno 0f\n"
9342 + LOCK_PREFIX "addq %2,%0\n"
9343 + "int $4\n0:\n"
9344 + _ASM_EXTABLE(0b, 0b)
9345 +#endif
9346 +
9347 + "sete %1\n"
9348 : "=m" (v->counter), "=qm" (c)
9349 : "er" (i), "m" (v->counter) : "memory");
9350 return c;
9351 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9352 */
9353 static inline void atomic64_inc(atomic64_t *v)
9354 {
9355 + asm volatile(LOCK_PREFIX "incq %0\n"
9356 +
9357 +#ifdef CONFIG_PAX_REFCOUNT
9358 + "jno 0f\n"
9359 + LOCK_PREFIX "decq %0\n"
9360 + "int $4\n0:\n"
9361 + _ASM_EXTABLE(0b, 0b)
9362 +#endif
9363 +
9364 + : "=m" (v->counter)
9365 + : "m" (v->counter));
9366 +}
9367 +
9368 +/**
9369 + * atomic64_inc_unchecked - increment atomic64 variable
9370 + * @v: pointer to type atomic64_unchecked_t
9371 + *
9372 + * Atomically increments @v by 1.
9373 + */
9374 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9375 +{
9376 asm volatile(LOCK_PREFIX "incq %0"
9377 : "=m" (v->counter)
9378 : "m" (v->counter));
9379 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
9380 */
9381 static inline void atomic64_dec(atomic64_t *v)
9382 {
9383 - asm volatile(LOCK_PREFIX "decq %0"
9384 + asm volatile(LOCK_PREFIX "decq %0\n"
9385 +
9386 +#ifdef CONFIG_PAX_REFCOUNT
9387 + "jno 0f\n"
9388 + LOCK_PREFIX "incq %0\n"
9389 + "int $4\n0:\n"
9390 + _ASM_EXTABLE(0b, 0b)
9391 +#endif
9392 +
9393 + : "=m" (v->counter)
9394 + : "m" (v->counter));
9395 +}
9396 +
9397 +/**
9398 + * atomic64_dec_unchecked - decrement atomic64 variable
9399 + * @v: pointer to type atomic64_t
9400 + *
9401 + * Atomically decrements @v by 1.
9402 + */
9403 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9404 +{
9405 + asm volatile(LOCK_PREFIX "decq %0\n"
9406 : "=m" (v->counter)
9407 : "m" (v->counter));
9408 }
9409 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9410 {
9411 unsigned char c;
9412
9413 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
9414 + asm volatile(LOCK_PREFIX "decq %0\n"
9415 +
9416 +#ifdef CONFIG_PAX_REFCOUNT
9417 + "jno 0f\n"
9418 + LOCK_PREFIX "incq %0\n"
9419 + "int $4\n0:\n"
9420 + _ASM_EXTABLE(0b, 0b)
9421 +#endif
9422 +
9423 + "sete %1\n"
9424 : "=m" (v->counter), "=qm" (c)
9425 : "m" (v->counter) : "memory");
9426 return c != 0;
9427 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9428 {
9429 unsigned char c;
9430
9431 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
9432 + asm volatile(LOCK_PREFIX "incq %0\n"
9433 +
9434 +#ifdef CONFIG_PAX_REFCOUNT
9435 + "jno 0f\n"
9436 + LOCK_PREFIX "decq %0\n"
9437 + "int $4\n0:\n"
9438 + _ASM_EXTABLE(0b, 0b)
9439 +#endif
9440 +
9441 + "sete %1\n"
9442 : "=m" (v->counter), "=qm" (c)
9443 : "m" (v->counter) : "memory");
9444 return c != 0;
9445 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9446 {
9447 unsigned char c;
9448
9449 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9450 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
9451 +
9452 +#ifdef CONFIG_PAX_REFCOUNT
9453 + "jno 0f\n"
9454 + LOCK_PREFIX "subq %2,%0\n"
9455 + "int $4\n0:\n"
9456 + _ASM_EXTABLE(0b, 0b)
9457 +#endif
9458 +
9459 + "sets %1\n"
9460 : "=m" (v->counter), "=qm" (c)
9461 : "er" (i), "m" (v->counter) : "memory");
9462 return c;
9463 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9464 */
9465 static inline long atomic64_add_return(long i, atomic64_t *v)
9466 {
9467 + return i + xadd_check_overflow(&v->counter, i);
9468 +}
9469 +
9470 +/**
9471 + * atomic64_add_return_unchecked - add and return
9472 + * @i: integer value to add
9473 + * @v: pointer to type atomic64_unchecked_t
9474 + *
9475 + * Atomically adds @i to @v and returns @i + @v
9476 + */
9477 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9478 +{
9479 return i + xadd(&v->counter, i);
9480 }
9481
9482 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9483 }
9484
9485 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9486 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9487 +{
9488 + return atomic64_add_return_unchecked(1, v);
9489 +}
9490 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9491
9492 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9493 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9494 return cmpxchg(&v->counter, old, new);
9495 }
9496
9497 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9498 +{
9499 + return cmpxchg(&v->counter, old, new);
9500 +}
9501 +
9502 static inline long atomic64_xchg(atomic64_t *v, long new)
9503 {
9504 return xchg(&v->counter, new);
9505 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
9506 */
9507 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9508 {
9509 - long c, old;
9510 + long c, old, new;
9511 c = atomic64_read(v);
9512 for (;;) {
9513 - if (unlikely(c == (u)))
9514 + if (unlikely(c == u))
9515 break;
9516 - old = atomic64_cmpxchg((v), c, c + (a));
9517 +
9518 + asm volatile("add %2,%0\n"
9519 +
9520 +#ifdef CONFIG_PAX_REFCOUNT
9521 + "jno 0f\n"
9522 + "sub %2,%0\n"
9523 + "int $4\n0:\n"
9524 + _ASM_EXTABLE(0b, 0b)
9525 +#endif
9526 +
9527 + : "=r" (new)
9528 + : "0" (c), "ir" (a));
9529 +
9530 + old = atomic64_cmpxchg(v, c, new);
9531 if (likely(old == c))
9532 break;
9533 c = old;
9534 }
9535 - return c != (u);
9536 + return c != u;
9537 }
9538
9539 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9540 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9541 index 1775d6e..b65017f 100644
9542 --- a/arch/x86/include/asm/bitops.h
9543 +++ b/arch/x86/include/asm/bitops.h
9544 @@ -38,7 +38,7 @@
9545 * a mask operation on a byte.
9546 */
9547 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9548 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9549 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9550 #define CONST_MASK(nr) (1 << ((nr) & 7))
9551
9552 /**
9553 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9554 index 5e1a2ee..c9f9533 100644
9555 --- a/arch/x86/include/asm/boot.h
9556 +++ b/arch/x86/include/asm/boot.h
9557 @@ -11,10 +11,15 @@
9558 #include <asm/pgtable_types.h>
9559
9560 /* Physical address where kernel should be loaded. */
9561 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9562 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9563 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9564 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9565
9566 +#ifndef __ASSEMBLY__
9567 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
9568 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9569 +#endif
9570 +
9571 /* Minimum kernel alignment, as a power of two */
9572 #ifdef CONFIG_X86_64
9573 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9574 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9575 index 48f99f1..d78ebf9 100644
9576 --- a/arch/x86/include/asm/cache.h
9577 +++ b/arch/x86/include/asm/cache.h
9578 @@ -5,12 +5,13 @@
9579
9580 /* L1 cache line size */
9581 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9582 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9583 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9584
9585 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9586 +#define __read_only __attribute__((__section__(".data..read_only")))
9587
9588 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
9589 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
9590 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
9591
9592 #ifdef CONFIG_X86_VSMP
9593 #ifdef CONFIG_SMP
9594 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9595 index 4e12668..501d239 100644
9596 --- a/arch/x86/include/asm/cacheflush.h
9597 +++ b/arch/x86/include/asm/cacheflush.h
9598 @@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
9599 unsigned long pg_flags = pg->flags & _PGMT_MASK;
9600
9601 if (pg_flags == _PGMT_DEFAULT)
9602 - return -1;
9603 + return ~0UL;
9604 else if (pg_flags == _PGMT_WC)
9605 return _PAGE_CACHE_WC;
9606 else if (pg_flags == _PGMT_UC_MINUS)
9607 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
9608 index 46fc474..b02b0f9 100644
9609 --- a/arch/x86/include/asm/checksum_32.h
9610 +++ b/arch/x86/include/asm/checksum_32.h
9611 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
9612 int len, __wsum sum,
9613 int *src_err_ptr, int *dst_err_ptr);
9614
9615 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
9616 + int len, __wsum sum,
9617 + int *src_err_ptr, int *dst_err_ptr);
9618 +
9619 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
9620 + int len, __wsum sum,
9621 + int *src_err_ptr, int *dst_err_ptr);
9622 +
9623 /*
9624 * Note: when you get a NULL pointer exception here this means someone
9625 * passed in an incorrect kernel address to one of these functions.
9626 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
9627 int *err_ptr)
9628 {
9629 might_sleep();
9630 - return csum_partial_copy_generic((__force void *)src, dst,
9631 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
9632 len, sum, err_ptr, NULL);
9633 }
9634
9635 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
9636 {
9637 might_sleep();
9638 if (access_ok(VERIFY_WRITE, dst, len))
9639 - return csum_partial_copy_generic(src, (__force void *)dst,
9640 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
9641 len, sum, NULL, err_ptr);
9642
9643 if (len)
9644 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
9645 index 5d3acdf..6447a02 100644
9646 --- a/arch/x86/include/asm/cmpxchg.h
9647 +++ b/arch/x86/include/asm/cmpxchg.h
9648 @@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
9649 __compiletime_error("Bad argument size for cmpxchg");
9650 extern void __xadd_wrong_size(void)
9651 __compiletime_error("Bad argument size for xadd");
9652 +extern void __xadd_check_overflow_wrong_size(void)
9653 + __compiletime_error("Bad argument size for xadd_check_overflow");
9654
9655 /*
9656 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
9657 @@ -195,6 +197,34 @@ extern void __xadd_wrong_size(void)
9658 __ret; \
9659 })
9660
9661 +#define __xadd_check_overflow(ptr, inc, lock) \
9662 + ({ \
9663 + __typeof__ (*(ptr)) __ret = (inc); \
9664 + switch (sizeof(*(ptr))) { \
9665 + case __X86_CASE_L: \
9666 + asm volatile (lock "xaddl %0, %1\n" \
9667 + "jno 0f\n" \
9668 + "mov %0,%1\n" \
9669 + "int $4\n0:\n" \
9670 + _ASM_EXTABLE(0b, 0b) \
9671 + : "+r" (__ret), "+m" (*(ptr)) \
9672 + : : "memory", "cc"); \
9673 + break; \
9674 + case __X86_CASE_Q: \
9675 + asm volatile (lock "xaddq %q0, %1\n" \
9676 + "jno 0f\n" \
9677 + "mov %0,%1\n" \
9678 + "int $4\n0:\n" \
9679 + _ASM_EXTABLE(0b, 0b) \
9680 + : "+r" (__ret), "+m" (*(ptr)) \
9681 + : : "memory", "cc"); \
9682 + break; \
9683 + default: \
9684 + __xadd_check_overflow_wrong_size(); \
9685 + } \
9686 + __ret; \
9687 + })
9688 +
9689 /*
9690 * xadd() adds "inc" to "*ptr" and atomically returns the previous
9691 * value of "*ptr".
9692 @@ -207,4 +237,6 @@ extern void __xadd_wrong_size(void)
9693 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
9694 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
9695
9696 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
9697 +
9698 #endif /* ASM_X86_CMPXCHG_H */
9699 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
9700 index f3444f7..051a196 100644
9701 --- a/arch/x86/include/asm/cpufeature.h
9702 +++ b/arch/x86/include/asm/cpufeature.h
9703 @@ -363,7 +363,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
9704 ".section .discard,\"aw\",@progbits\n"
9705 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
9706 ".previous\n"
9707 - ".section .altinstr_replacement,\"ax\"\n"
9708 + ".section .altinstr_replacement,\"a\"\n"
9709 "3: movb $1,%0\n"
9710 "4:\n"
9711 ".previous\n"
9712 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
9713 index 41935fa..3b40db8 100644
9714 --- a/arch/x86/include/asm/desc.h
9715 +++ b/arch/x86/include/asm/desc.h
9716 @@ -4,6 +4,7 @@
9717 #include <asm/desc_defs.h>
9718 #include <asm/ldt.h>
9719 #include <asm/mmu.h>
9720 +#include <asm/pgtable.h>
9721
9722 #include <linux/smp.h>
9723
9724 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
9725
9726 desc->type = (info->read_exec_only ^ 1) << 1;
9727 desc->type |= info->contents << 2;
9728 + desc->type |= info->seg_not_present ^ 1;
9729
9730 desc->s = 1;
9731 desc->dpl = 0x3;
9732 @@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
9733 }
9734
9735 extern struct desc_ptr idt_descr;
9736 -extern gate_desc idt_table[];
9737 -
9738 -struct gdt_page {
9739 - struct desc_struct gdt[GDT_ENTRIES];
9740 -} __attribute__((aligned(PAGE_SIZE)));
9741 -
9742 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
9743 +extern gate_desc idt_table[256];
9744
9745 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
9746 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
9747 {
9748 - return per_cpu(gdt_page, cpu).gdt;
9749 + return cpu_gdt_table[cpu];
9750 }
9751
9752 #ifdef CONFIG_X86_64
9753 @@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
9754 unsigned long base, unsigned dpl, unsigned flags,
9755 unsigned short seg)
9756 {
9757 - gate->a = (seg << 16) | (base & 0xffff);
9758 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
9759 + gate->gate.offset_low = base;
9760 + gate->gate.seg = seg;
9761 + gate->gate.reserved = 0;
9762 + gate->gate.type = type;
9763 + gate->gate.s = 0;
9764 + gate->gate.dpl = dpl;
9765 + gate->gate.p = 1;
9766 + gate->gate.offset_high = base >> 16;
9767 }
9768
9769 #endif
9770 @@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
9771
9772 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
9773 {
9774 + pax_open_kernel();
9775 memcpy(&idt[entry], gate, sizeof(*gate));
9776 + pax_close_kernel();
9777 }
9778
9779 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
9780 {
9781 + pax_open_kernel();
9782 memcpy(&ldt[entry], desc, 8);
9783 + pax_close_kernel();
9784 }
9785
9786 static inline void
9787 @@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
9788 default: size = sizeof(*gdt); break;
9789 }
9790
9791 + pax_open_kernel();
9792 memcpy(&gdt[entry], desc, size);
9793 + pax_close_kernel();
9794 }
9795
9796 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
9797 @@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
9798
9799 static inline void native_load_tr_desc(void)
9800 {
9801 + pax_open_kernel();
9802 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
9803 + pax_close_kernel();
9804 }
9805
9806 static inline void native_load_gdt(const struct desc_ptr *dtr)
9807 @@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
9808 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
9809 unsigned int i;
9810
9811 + pax_open_kernel();
9812 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
9813 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
9814 + pax_close_kernel();
9815 }
9816
9817 #define _LDT_empty(info) \
9818 @@ -307,7 +320,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
9819 desc->limit = (limit >> 16) & 0xf;
9820 }
9821
9822 -static inline void _set_gate(int gate, unsigned type, void *addr,
9823 +static inline void _set_gate(int gate, unsigned type, const void *addr,
9824 unsigned dpl, unsigned ist, unsigned seg)
9825 {
9826 gate_desc s;
9827 @@ -326,7 +339,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
9828 * Pentium F0 0F bugfix can have resulted in the mapped
9829 * IDT being write-protected.
9830 */
9831 -static inline void set_intr_gate(unsigned int n, void *addr)
9832 +static inline void set_intr_gate(unsigned int n, const void *addr)
9833 {
9834 BUG_ON((unsigned)n > 0xFF);
9835 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
9836 @@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
9837 /*
9838 * This routine sets up an interrupt gate at directory privilege level 3.
9839 */
9840 -static inline void set_system_intr_gate(unsigned int n, void *addr)
9841 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
9842 {
9843 BUG_ON((unsigned)n > 0xFF);
9844 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
9845 }
9846
9847 -static inline void set_system_trap_gate(unsigned int n, void *addr)
9848 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
9849 {
9850 BUG_ON((unsigned)n > 0xFF);
9851 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
9852 }
9853
9854 -static inline void set_trap_gate(unsigned int n, void *addr)
9855 +static inline void set_trap_gate(unsigned int n, const void *addr)
9856 {
9857 BUG_ON((unsigned)n > 0xFF);
9858 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
9859 @@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
9860 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
9861 {
9862 BUG_ON((unsigned)n > 0xFF);
9863 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
9864 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
9865 }
9866
9867 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
9868 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
9869 {
9870 BUG_ON((unsigned)n > 0xFF);
9871 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
9872 }
9873
9874 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
9875 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
9876 {
9877 BUG_ON((unsigned)n > 0xFF);
9878 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
9879 }
9880
9881 +#ifdef CONFIG_X86_32
9882 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
9883 +{
9884 + struct desc_struct d;
9885 +
9886 + if (likely(limit))
9887 + limit = (limit - 1UL) >> PAGE_SHIFT;
9888 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
9889 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
9890 +}
9891 +#endif
9892 +
9893 #endif /* _ASM_X86_DESC_H */
9894 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
9895 index 278441f..b95a174 100644
9896 --- a/arch/x86/include/asm/desc_defs.h
9897 +++ b/arch/x86/include/asm/desc_defs.h
9898 @@ -31,6 +31,12 @@ struct desc_struct {
9899 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
9900 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
9901 };
9902 + struct {
9903 + u16 offset_low;
9904 + u16 seg;
9905 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
9906 + unsigned offset_high: 16;
9907 + } gate;
9908 };
9909 } __attribute__((packed));
9910
9911 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
9912 index 908b969..a1f4eb4 100644
9913 --- a/arch/x86/include/asm/e820.h
9914 +++ b/arch/x86/include/asm/e820.h
9915 @@ -69,7 +69,7 @@ struct e820map {
9916 #define ISA_START_ADDRESS 0xa0000
9917 #define ISA_END_ADDRESS 0x100000
9918
9919 -#define BIOS_BEGIN 0x000a0000
9920 +#define BIOS_BEGIN 0x000c0000
9921 #define BIOS_END 0x00100000
9922
9923 #define BIOS_ROM_BASE 0xffe00000
9924 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
9925 index 5f962df..7289f09 100644
9926 --- a/arch/x86/include/asm/elf.h
9927 +++ b/arch/x86/include/asm/elf.h
9928 @@ -238,7 +238,25 @@ extern int force_personality32;
9929 the loader. We need to make sure that it is out of the way of the program
9930 that it will "exec", and that there is sufficient room for the brk. */
9931
9932 +#ifdef CONFIG_PAX_SEGMEXEC
9933 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
9934 +#else
9935 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
9936 +#endif
9937 +
9938 +#ifdef CONFIG_PAX_ASLR
9939 +#ifdef CONFIG_X86_32
9940 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
9941 +
9942 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9943 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9944 +#else
9945 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
9946 +
9947 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9948 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9949 +#endif
9950 +#endif
9951
9952 /* This yields a mask that user programs can use to figure out what
9953 instruction set this CPU supports. This could be done in user space,
9954 @@ -291,9 +309,7 @@ do { \
9955
9956 #define ARCH_DLINFO \
9957 do { \
9958 - if (vdso_enabled) \
9959 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
9960 - (unsigned long)current->mm->context.vdso); \
9961 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
9962 } while (0)
9963
9964 #define AT_SYSINFO 32
9965 @@ -304,7 +320,7 @@ do { \
9966
9967 #endif /* !CONFIG_X86_32 */
9968
9969 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
9970 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
9971
9972 #define VDSO_ENTRY \
9973 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
9974 @@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
9975 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
9976 #define compat_arch_setup_additional_pages syscall32_setup_pages
9977
9978 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9979 -#define arch_randomize_brk arch_randomize_brk
9980 -
9981 /*
9982 * True on X86_32 or when emulating IA32 on X86_64
9983 */
9984 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
9985 index cc70c1c..d96d011 100644
9986 --- a/arch/x86/include/asm/emergency-restart.h
9987 +++ b/arch/x86/include/asm/emergency-restart.h
9988 @@ -15,6 +15,6 @@ enum reboot_type {
9989
9990 extern enum reboot_type reboot_type;
9991
9992 -extern void machine_emergency_restart(void);
9993 +extern void machine_emergency_restart(void) __noreturn;
9994
9995 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
9996 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
9997 index d09bb03..4ea4194 100644
9998 --- a/arch/x86/include/asm/futex.h
9999 +++ b/arch/x86/include/asm/futex.h
10000 @@ -12,16 +12,18 @@
10001 #include <asm/system.h>
10002
10003 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10004 + typecheck(u32 __user *, uaddr); \
10005 asm volatile("1:\t" insn "\n" \
10006 "2:\t.section .fixup,\"ax\"\n" \
10007 "3:\tmov\t%3, %1\n" \
10008 "\tjmp\t2b\n" \
10009 "\t.previous\n" \
10010 _ASM_EXTABLE(1b, 3b) \
10011 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10012 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10013 : "i" (-EFAULT), "0" (oparg), "1" (0))
10014
10015 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10016 + typecheck(u32 __user *, uaddr); \
10017 asm volatile("1:\tmovl %2, %0\n" \
10018 "\tmovl\t%0, %3\n" \
10019 "\t" insn "\n" \
10020 @@ -34,7 +36,7 @@
10021 _ASM_EXTABLE(1b, 4b) \
10022 _ASM_EXTABLE(2b, 4b) \
10023 : "=&a" (oldval), "=&r" (ret), \
10024 - "+m" (*uaddr), "=&r" (tem) \
10025 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10026 : "r" (oparg), "i" (-EFAULT), "1" (0))
10027
10028 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10029 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10030
10031 switch (op) {
10032 case FUTEX_OP_SET:
10033 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10034 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10035 break;
10036 case FUTEX_OP_ADD:
10037 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10038 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10039 uaddr, oparg);
10040 break;
10041 case FUTEX_OP_OR:
10042 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
10043 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10044 return -EFAULT;
10045
10046 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
10047 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
10048 "2:\t.section .fixup, \"ax\"\n"
10049 "3:\tmov %3, %0\n"
10050 "\tjmp 2b\n"
10051 "\t.previous\n"
10052 _ASM_EXTABLE(1b, 3b)
10053 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
10054 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
10055 : "i" (-EFAULT), "r" (newval), "1" (oldval)
10056 : "memory"
10057 );
10058 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10059 index eb92a6e..b98b2f4 100644
10060 --- a/arch/x86/include/asm/hw_irq.h
10061 +++ b/arch/x86/include/asm/hw_irq.h
10062 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
10063 extern void enable_IO_APIC(void);
10064
10065 /* Statistics */
10066 -extern atomic_t irq_err_count;
10067 -extern atomic_t irq_mis_count;
10068 +extern atomic_unchecked_t irq_err_count;
10069 +extern atomic_unchecked_t irq_mis_count;
10070
10071 /* EISA */
10072 extern void eisa_set_level_irq(unsigned int irq);
10073 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10074 index a850b4d..bae26dc 100644
10075 --- a/arch/x86/include/asm/i387.h
10076 +++ b/arch/x86/include/asm/i387.h
10077 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10078 {
10079 int err;
10080
10081 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10082 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10083 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
10084 +#endif
10085 +
10086 /* See comment in fxsave() below. */
10087 #ifdef CONFIG_AS_FXSAVEQ
10088 asm volatile("1: fxrstorq %[fx]\n\t"
10089 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10090 {
10091 int err;
10092
10093 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10094 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10095 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10096 +#endif
10097 +
10098 /*
10099 * Clear the bytes not touched by the fxsave and reserved
10100 * for the SW usage.
10101 @@ -424,7 +434,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
10102 static inline bool interrupted_user_mode(void)
10103 {
10104 struct pt_regs *regs = get_irq_regs();
10105 - return regs && user_mode_vm(regs);
10106 + return regs && user_mode(regs);
10107 }
10108
10109 /*
10110 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
10111 index d8e8eef..99f81ae 100644
10112 --- a/arch/x86/include/asm/io.h
10113 +++ b/arch/x86/include/asm/io.h
10114 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
10115
10116 #include <linux/vmalloc.h>
10117
10118 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10119 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10120 +{
10121 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10122 +}
10123 +
10124 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10125 +{
10126 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10127 +}
10128 +
10129 /*
10130 * Convert a virtual cached pointer to an uncached pointer
10131 */
10132 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10133 index bba3cf8..06bc8da 100644
10134 --- a/arch/x86/include/asm/irqflags.h
10135 +++ b/arch/x86/include/asm/irqflags.h
10136 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
10137 sti; \
10138 sysexit
10139
10140 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
10141 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10142 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
10143 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10144 +
10145 #else
10146 #define INTERRUPT_RETURN iret
10147 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10148 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10149 index 5478825..839e88c 100644
10150 --- a/arch/x86/include/asm/kprobes.h
10151 +++ b/arch/x86/include/asm/kprobes.h
10152 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
10153 #define RELATIVEJUMP_SIZE 5
10154 #define RELATIVECALL_OPCODE 0xe8
10155 #define RELATIVE_ADDR_SIZE 4
10156 -#define MAX_STACK_SIZE 64
10157 -#define MIN_STACK_SIZE(ADDR) \
10158 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10159 - THREAD_SIZE - (unsigned long)(ADDR))) \
10160 - ? (MAX_STACK_SIZE) \
10161 - : (((unsigned long)current_thread_info()) + \
10162 - THREAD_SIZE - (unsigned long)(ADDR)))
10163 +#define MAX_STACK_SIZE 64UL
10164 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10165
10166 #define flush_insn_slot(p) do { } while (0)
10167
10168 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10169 index b4973f4..7c4d3fc 100644
10170 --- a/arch/x86/include/asm/kvm_host.h
10171 +++ b/arch/x86/include/asm/kvm_host.h
10172 @@ -459,7 +459,7 @@ struct kvm_arch {
10173 unsigned int n_requested_mmu_pages;
10174 unsigned int n_max_mmu_pages;
10175 unsigned int indirect_shadow_pages;
10176 - atomic_t invlpg_counter;
10177 + atomic_unchecked_t invlpg_counter;
10178 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
10179 /*
10180 * Hash table of struct kvm_mmu_page.
10181 @@ -638,7 +638,7 @@ struct kvm_x86_ops {
10182 int (*check_intercept)(struct kvm_vcpu *vcpu,
10183 struct x86_instruction_info *info,
10184 enum x86_intercept_stage stage);
10185 -};
10186 +} __do_const;
10187
10188 struct kvm_arch_async_pf {
10189 u32 token;
10190 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10191 index 9cdae5d..300d20f 100644
10192 --- a/arch/x86/include/asm/local.h
10193 +++ b/arch/x86/include/asm/local.h
10194 @@ -18,26 +18,58 @@ typedef struct {
10195
10196 static inline void local_inc(local_t *l)
10197 {
10198 - asm volatile(_ASM_INC "%0"
10199 + asm volatile(_ASM_INC "%0\n"
10200 +
10201 +#ifdef CONFIG_PAX_REFCOUNT
10202 + "jno 0f\n"
10203 + _ASM_DEC "%0\n"
10204 + "int $4\n0:\n"
10205 + _ASM_EXTABLE(0b, 0b)
10206 +#endif
10207 +
10208 : "+m" (l->a.counter));
10209 }
10210
10211 static inline void local_dec(local_t *l)
10212 {
10213 - asm volatile(_ASM_DEC "%0"
10214 + asm volatile(_ASM_DEC "%0\n"
10215 +
10216 +#ifdef CONFIG_PAX_REFCOUNT
10217 + "jno 0f\n"
10218 + _ASM_INC "%0\n"
10219 + "int $4\n0:\n"
10220 + _ASM_EXTABLE(0b, 0b)
10221 +#endif
10222 +
10223 : "+m" (l->a.counter));
10224 }
10225
10226 static inline void local_add(long i, local_t *l)
10227 {
10228 - asm volatile(_ASM_ADD "%1,%0"
10229 + asm volatile(_ASM_ADD "%1,%0\n"
10230 +
10231 +#ifdef CONFIG_PAX_REFCOUNT
10232 + "jno 0f\n"
10233 + _ASM_SUB "%1,%0\n"
10234 + "int $4\n0:\n"
10235 + _ASM_EXTABLE(0b, 0b)
10236 +#endif
10237 +
10238 : "+m" (l->a.counter)
10239 : "ir" (i));
10240 }
10241
10242 static inline void local_sub(long i, local_t *l)
10243 {
10244 - asm volatile(_ASM_SUB "%1,%0"
10245 + asm volatile(_ASM_SUB "%1,%0\n"
10246 +
10247 +#ifdef CONFIG_PAX_REFCOUNT
10248 + "jno 0f\n"
10249 + _ASM_ADD "%1,%0\n"
10250 + "int $4\n0:\n"
10251 + _ASM_EXTABLE(0b, 0b)
10252 +#endif
10253 +
10254 : "+m" (l->a.counter)
10255 : "ir" (i));
10256 }
10257 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10258 {
10259 unsigned char c;
10260
10261 - asm volatile(_ASM_SUB "%2,%0; sete %1"
10262 + asm volatile(_ASM_SUB "%2,%0\n"
10263 +
10264 +#ifdef CONFIG_PAX_REFCOUNT
10265 + "jno 0f\n"
10266 + _ASM_ADD "%2,%0\n"
10267 + "int $4\n0:\n"
10268 + _ASM_EXTABLE(0b, 0b)
10269 +#endif
10270 +
10271 + "sete %1\n"
10272 : "+m" (l->a.counter), "=qm" (c)
10273 : "ir" (i) : "memory");
10274 return c;
10275 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10276 {
10277 unsigned char c;
10278
10279 - asm volatile(_ASM_DEC "%0; sete %1"
10280 + asm volatile(_ASM_DEC "%0\n"
10281 +
10282 +#ifdef CONFIG_PAX_REFCOUNT
10283 + "jno 0f\n"
10284 + _ASM_INC "%0\n"
10285 + "int $4\n0:\n"
10286 + _ASM_EXTABLE(0b, 0b)
10287 +#endif
10288 +
10289 + "sete %1\n"
10290 : "+m" (l->a.counter), "=qm" (c)
10291 : : "memory");
10292 return c != 0;
10293 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10294 {
10295 unsigned char c;
10296
10297 - asm volatile(_ASM_INC "%0; sete %1"
10298 + asm volatile(_ASM_INC "%0\n"
10299 +
10300 +#ifdef CONFIG_PAX_REFCOUNT
10301 + "jno 0f\n"
10302 + _ASM_DEC "%0\n"
10303 + "int $4\n0:\n"
10304 + _ASM_EXTABLE(0b, 0b)
10305 +#endif
10306 +
10307 + "sete %1\n"
10308 : "+m" (l->a.counter), "=qm" (c)
10309 : : "memory");
10310 return c != 0;
10311 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10312 {
10313 unsigned char c;
10314
10315 - asm volatile(_ASM_ADD "%2,%0; sets %1"
10316 + asm volatile(_ASM_ADD "%2,%0\n"
10317 +
10318 +#ifdef CONFIG_PAX_REFCOUNT
10319 + "jno 0f\n"
10320 + _ASM_SUB "%2,%0\n"
10321 + "int $4\n0:\n"
10322 + _ASM_EXTABLE(0b, 0b)
10323 +#endif
10324 +
10325 + "sets %1\n"
10326 : "+m" (l->a.counter), "=qm" (c)
10327 : "ir" (i) : "memory");
10328 return c;
10329 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10330 #endif
10331 /* Modern 486+ processor */
10332 __i = i;
10333 - asm volatile(_ASM_XADD "%0, %1;"
10334 + asm volatile(_ASM_XADD "%0, %1\n"
10335 +
10336 +#ifdef CONFIG_PAX_REFCOUNT
10337 + "jno 0f\n"
10338 + _ASM_MOV "%0,%1\n"
10339 + "int $4\n0:\n"
10340 + _ASM_EXTABLE(0b, 0b)
10341 +#endif
10342 +
10343 : "+r" (i), "+m" (l->a.counter)
10344 : : "memory");
10345 return i + __i;
10346 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10347 index 593e51d..fa69c9a 100644
10348 --- a/arch/x86/include/asm/mman.h
10349 +++ b/arch/x86/include/asm/mman.h
10350 @@ -5,4 +5,14 @@
10351
10352 #include <asm-generic/mman.h>
10353
10354 +#ifdef __KERNEL__
10355 +#ifndef __ASSEMBLY__
10356 +#ifdef CONFIG_X86_32
10357 +#define arch_mmap_check i386_mmap_check
10358 +int i386_mmap_check(unsigned long addr, unsigned long len,
10359 + unsigned long flags);
10360 +#endif
10361 +#endif
10362 +#endif
10363 +
10364 #endif /* _ASM_X86_MMAN_H */
10365 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10366 index 5f55e69..e20bfb1 100644
10367 --- a/arch/x86/include/asm/mmu.h
10368 +++ b/arch/x86/include/asm/mmu.h
10369 @@ -9,7 +9,7 @@
10370 * we put the segment information here.
10371 */
10372 typedef struct {
10373 - void *ldt;
10374 + struct desc_struct *ldt;
10375 int size;
10376
10377 #ifdef CONFIG_X86_64
10378 @@ -18,7 +18,19 @@ typedef struct {
10379 #endif
10380
10381 struct mutex lock;
10382 - void *vdso;
10383 + unsigned long vdso;
10384 +
10385 +#ifdef CONFIG_X86_32
10386 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10387 + unsigned long user_cs_base;
10388 + unsigned long user_cs_limit;
10389 +
10390 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10391 + cpumask_t cpu_user_cs_mask;
10392 +#endif
10393 +
10394 +#endif
10395 +#endif
10396 } mm_context_t;
10397
10398 #ifdef CONFIG_SMP
10399 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10400 index 6902152..399f3a2 100644
10401 --- a/arch/x86/include/asm/mmu_context.h
10402 +++ b/arch/x86/include/asm/mmu_context.h
10403 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10404
10405 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10406 {
10407 +
10408 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10409 + unsigned int i;
10410 + pgd_t *pgd;
10411 +
10412 + pax_open_kernel();
10413 + pgd = get_cpu_pgd(smp_processor_id());
10414 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10415 + set_pgd_batched(pgd+i, native_make_pgd(0));
10416 + pax_close_kernel();
10417 +#endif
10418 +
10419 #ifdef CONFIG_SMP
10420 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10421 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10422 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10423 struct task_struct *tsk)
10424 {
10425 unsigned cpu = smp_processor_id();
10426 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10427 + int tlbstate = TLBSTATE_OK;
10428 +#endif
10429
10430 if (likely(prev != next)) {
10431 #ifdef CONFIG_SMP
10432 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10433 + tlbstate = percpu_read(cpu_tlbstate.state);
10434 +#endif
10435 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10436 percpu_write(cpu_tlbstate.active_mm, next);
10437 #endif
10438 cpumask_set_cpu(cpu, mm_cpumask(next));
10439
10440 /* Re-load page tables */
10441 +#ifdef CONFIG_PAX_PER_CPU_PGD
10442 + pax_open_kernel();
10443 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10444 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10445 + pax_close_kernel();
10446 + load_cr3(get_cpu_pgd(cpu));
10447 +#else
10448 load_cr3(next->pgd);
10449 +#endif
10450
10451 /* stop flush ipis for the previous mm */
10452 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10453 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10454 */
10455 if (unlikely(prev->context.ldt != next->context.ldt))
10456 load_LDT_nolock(&next->context);
10457 - }
10458 +
10459 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10460 + if (!(__supported_pte_mask & _PAGE_NX)) {
10461 + smp_mb__before_clear_bit();
10462 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10463 + smp_mb__after_clear_bit();
10464 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10465 + }
10466 +#endif
10467 +
10468 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10469 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10470 + prev->context.user_cs_limit != next->context.user_cs_limit))
10471 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10472 #ifdef CONFIG_SMP
10473 + else if (unlikely(tlbstate != TLBSTATE_OK))
10474 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10475 +#endif
10476 +#endif
10477 +
10478 + }
10479 else {
10480 +
10481 +#ifdef CONFIG_PAX_PER_CPU_PGD
10482 + pax_open_kernel();
10483 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10484 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10485 + pax_close_kernel();
10486 + load_cr3(get_cpu_pgd(cpu));
10487 +#endif
10488 +
10489 +#ifdef CONFIG_SMP
10490 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10491 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10492
10493 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10494 * tlb flush IPI delivery. We must reload CR3
10495 * to make sure to use no freed page tables.
10496 */
10497 +
10498 +#ifndef CONFIG_PAX_PER_CPU_PGD
10499 load_cr3(next->pgd);
10500 +#endif
10501 +
10502 load_LDT_nolock(&next->context);
10503 +
10504 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10505 + if (!(__supported_pte_mask & _PAGE_NX))
10506 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10507 +#endif
10508 +
10509 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10510 +#ifdef CONFIG_PAX_PAGEEXEC
10511 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
10512 +#endif
10513 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10514 +#endif
10515 +
10516 }
10517 +#endif
10518 }
10519 -#endif
10520 }
10521
10522 #define activate_mm(prev, next) \
10523 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10524 index 9eae775..c914fea 100644
10525 --- a/arch/x86/include/asm/module.h
10526 +++ b/arch/x86/include/asm/module.h
10527 @@ -5,6 +5,7 @@
10528
10529 #ifdef CONFIG_X86_64
10530 /* X86_64 does not define MODULE_PROC_FAMILY */
10531 +#define MODULE_PROC_FAMILY ""
10532 #elif defined CONFIG_M386
10533 #define MODULE_PROC_FAMILY "386 "
10534 #elif defined CONFIG_M486
10535 @@ -59,8 +60,20 @@
10536 #error unknown processor family
10537 #endif
10538
10539 -#ifdef CONFIG_X86_32
10540 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
10541 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10542 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10543 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10544 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10545 +#else
10546 +#define MODULE_PAX_KERNEXEC ""
10547 #endif
10548
10549 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10550 +#define MODULE_PAX_UDEREF "UDEREF "
10551 +#else
10552 +#define MODULE_PAX_UDEREF ""
10553 +#endif
10554 +
10555 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10556 +
10557 #endif /* _ASM_X86_MODULE_H */
10558 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10559 index 7639dbf..e08a58c 100644
10560 --- a/arch/x86/include/asm/page_64_types.h
10561 +++ b/arch/x86/include/asm/page_64_types.h
10562 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10563
10564 /* duplicated to the one in bootmem.h */
10565 extern unsigned long max_pfn;
10566 -extern unsigned long phys_base;
10567 +extern const unsigned long phys_base;
10568
10569 extern unsigned long __phys_addr(unsigned long);
10570 #define __phys_reloc_hide(x) (x)
10571 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10572 index a7d2db9..edb023e 100644
10573 --- a/arch/x86/include/asm/paravirt.h
10574 +++ b/arch/x86/include/asm/paravirt.h
10575 @@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10576 val);
10577 }
10578
10579 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10580 +{
10581 + pgdval_t val = native_pgd_val(pgd);
10582 +
10583 + if (sizeof(pgdval_t) > sizeof(long))
10584 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10585 + val, (u64)val >> 32);
10586 + else
10587 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10588 + val);
10589 +}
10590 +
10591 static inline void pgd_clear(pgd_t *pgdp)
10592 {
10593 set_pgd(pgdp, __pgd(0));
10594 @@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
10595 pv_mmu_ops.set_fixmap(idx, phys, flags);
10596 }
10597
10598 +#ifdef CONFIG_PAX_KERNEXEC
10599 +static inline unsigned long pax_open_kernel(void)
10600 +{
10601 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
10602 +}
10603 +
10604 +static inline unsigned long pax_close_kernel(void)
10605 +{
10606 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
10607 +}
10608 +#else
10609 +static inline unsigned long pax_open_kernel(void) { return 0; }
10610 +static inline unsigned long pax_close_kernel(void) { return 0; }
10611 +#endif
10612 +
10613 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
10614
10615 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
10616 @@ -964,7 +991,7 @@ extern void default_banner(void);
10617
10618 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
10619 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
10620 -#define PARA_INDIRECT(addr) *%cs:addr
10621 +#define PARA_INDIRECT(addr) *%ss:addr
10622 #endif
10623
10624 #define INTERRUPT_RETURN \
10625 @@ -1041,6 +1068,21 @@ extern void default_banner(void);
10626 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
10627 CLBR_NONE, \
10628 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
10629 +
10630 +#define GET_CR0_INTO_RDI \
10631 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
10632 + mov %rax,%rdi
10633 +
10634 +#define SET_RDI_INTO_CR0 \
10635 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
10636 +
10637 +#define GET_CR3_INTO_RDI \
10638 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
10639 + mov %rax,%rdi
10640 +
10641 +#define SET_RDI_INTO_CR3 \
10642 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
10643 +
10644 #endif /* CONFIG_X86_32 */
10645
10646 #endif /* __ASSEMBLY__ */
10647 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
10648 index 8e8b9a4..f07d725 100644
10649 --- a/arch/x86/include/asm/paravirt_types.h
10650 +++ b/arch/x86/include/asm/paravirt_types.h
10651 @@ -84,20 +84,20 @@ struct pv_init_ops {
10652 */
10653 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
10654 unsigned long addr, unsigned len);
10655 -};
10656 +} __no_const;
10657
10658
10659 struct pv_lazy_ops {
10660 /* Set deferred update mode, used for batching operations. */
10661 void (*enter)(void);
10662 void (*leave)(void);
10663 -};
10664 +} __no_const;
10665
10666 struct pv_time_ops {
10667 unsigned long long (*sched_clock)(void);
10668 unsigned long long (*steal_clock)(int cpu);
10669 unsigned long (*get_tsc_khz)(void);
10670 -};
10671 +} __no_const;
10672
10673 struct pv_cpu_ops {
10674 /* hooks for various privileged instructions */
10675 @@ -193,7 +193,7 @@ struct pv_cpu_ops {
10676
10677 void (*start_context_switch)(struct task_struct *prev);
10678 void (*end_context_switch)(struct task_struct *next);
10679 -};
10680 +} __no_const;
10681
10682 struct pv_irq_ops {
10683 /*
10684 @@ -224,7 +224,7 @@ struct pv_apic_ops {
10685 unsigned long start_eip,
10686 unsigned long start_esp);
10687 #endif
10688 -};
10689 +} __no_const;
10690
10691 struct pv_mmu_ops {
10692 unsigned long (*read_cr2)(void);
10693 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
10694 struct paravirt_callee_save make_pud;
10695
10696 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
10697 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
10698 #endif /* PAGETABLE_LEVELS == 4 */
10699 #endif /* PAGETABLE_LEVELS >= 3 */
10700
10701 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
10702 an mfn. We can tell which is which from the index. */
10703 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
10704 phys_addr_t phys, pgprot_t flags);
10705 +
10706 +#ifdef CONFIG_PAX_KERNEXEC
10707 + unsigned long (*pax_open_kernel)(void);
10708 + unsigned long (*pax_close_kernel)(void);
10709 +#endif
10710 +
10711 };
10712
10713 struct arch_spinlock;
10714 @@ -334,7 +341,7 @@ struct pv_lock_ops {
10715 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
10716 int (*spin_trylock)(struct arch_spinlock *lock);
10717 void (*spin_unlock)(struct arch_spinlock *lock);
10718 -};
10719 +} __no_const;
10720
10721 /* This contains all the paravirt structures: we get a convenient
10722 * number for each function using the offset which we use to indicate
10723 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
10724 index b4389a4..b7ff22c 100644
10725 --- a/arch/x86/include/asm/pgalloc.h
10726 +++ b/arch/x86/include/asm/pgalloc.h
10727 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
10728 pmd_t *pmd, pte_t *pte)
10729 {
10730 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10731 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
10732 +}
10733 +
10734 +static inline void pmd_populate_user(struct mm_struct *mm,
10735 + pmd_t *pmd, pte_t *pte)
10736 +{
10737 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10738 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
10739 }
10740
10741 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
10742 index 98391db..8f6984e 100644
10743 --- a/arch/x86/include/asm/pgtable-2level.h
10744 +++ b/arch/x86/include/asm/pgtable-2level.h
10745 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
10746
10747 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10748 {
10749 + pax_open_kernel();
10750 *pmdp = pmd;
10751 + pax_close_kernel();
10752 }
10753
10754 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10755 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
10756 index effff47..f9e4035 100644
10757 --- a/arch/x86/include/asm/pgtable-3level.h
10758 +++ b/arch/x86/include/asm/pgtable-3level.h
10759 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10760
10761 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10762 {
10763 + pax_open_kernel();
10764 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
10765 + pax_close_kernel();
10766 }
10767
10768 static inline void native_set_pud(pud_t *pudp, pud_t pud)
10769 {
10770 + pax_open_kernel();
10771 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
10772 + pax_close_kernel();
10773 }
10774
10775 /*
10776 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
10777 index 18601c8..3d716d1 100644
10778 --- a/arch/x86/include/asm/pgtable.h
10779 +++ b/arch/x86/include/asm/pgtable.h
10780 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
10781
10782 #ifndef __PAGETABLE_PUD_FOLDED
10783 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
10784 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
10785 #define pgd_clear(pgd) native_pgd_clear(pgd)
10786 #endif
10787
10788 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
10789
10790 #define arch_end_context_switch(prev) do {} while(0)
10791
10792 +#define pax_open_kernel() native_pax_open_kernel()
10793 +#define pax_close_kernel() native_pax_close_kernel()
10794 #endif /* CONFIG_PARAVIRT */
10795
10796 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
10797 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
10798 +
10799 +#ifdef CONFIG_PAX_KERNEXEC
10800 +static inline unsigned long native_pax_open_kernel(void)
10801 +{
10802 + unsigned long cr0;
10803 +
10804 + preempt_disable();
10805 + barrier();
10806 + cr0 = read_cr0() ^ X86_CR0_WP;
10807 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
10808 + write_cr0(cr0);
10809 + return cr0 ^ X86_CR0_WP;
10810 +}
10811 +
10812 +static inline unsigned long native_pax_close_kernel(void)
10813 +{
10814 + unsigned long cr0;
10815 +
10816 + cr0 = read_cr0() ^ X86_CR0_WP;
10817 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
10818 + write_cr0(cr0);
10819 + barrier();
10820 + preempt_enable_no_resched();
10821 + return cr0 ^ X86_CR0_WP;
10822 +}
10823 +#else
10824 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
10825 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
10826 +#endif
10827 +
10828 /*
10829 * The following only work if pte_present() is true.
10830 * Undefined behaviour if not..
10831 */
10832 +static inline int pte_user(pte_t pte)
10833 +{
10834 + return pte_val(pte) & _PAGE_USER;
10835 +}
10836 +
10837 static inline int pte_dirty(pte_t pte)
10838 {
10839 return pte_flags(pte) & _PAGE_DIRTY;
10840 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
10841 return pte_clear_flags(pte, _PAGE_RW);
10842 }
10843
10844 +static inline pte_t pte_mkread(pte_t pte)
10845 +{
10846 + return __pte(pte_val(pte) | _PAGE_USER);
10847 +}
10848 +
10849 static inline pte_t pte_mkexec(pte_t pte)
10850 {
10851 - return pte_clear_flags(pte, _PAGE_NX);
10852 +#ifdef CONFIG_X86_PAE
10853 + if (__supported_pte_mask & _PAGE_NX)
10854 + return pte_clear_flags(pte, _PAGE_NX);
10855 + else
10856 +#endif
10857 + return pte_set_flags(pte, _PAGE_USER);
10858 +}
10859 +
10860 +static inline pte_t pte_exprotect(pte_t pte)
10861 +{
10862 +#ifdef CONFIG_X86_PAE
10863 + if (__supported_pte_mask & _PAGE_NX)
10864 + return pte_set_flags(pte, _PAGE_NX);
10865 + else
10866 +#endif
10867 + return pte_clear_flags(pte, _PAGE_USER);
10868 }
10869
10870 static inline pte_t pte_mkdirty(pte_t pte)
10871 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
10872 #endif
10873
10874 #ifndef __ASSEMBLY__
10875 +
10876 +#ifdef CONFIG_PAX_PER_CPU_PGD
10877 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
10878 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
10879 +{
10880 + return cpu_pgd[cpu];
10881 +}
10882 +#endif
10883 +
10884 #include <linux/mm_types.h>
10885
10886 static inline int pte_none(pte_t pte)
10887 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
10888
10889 static inline int pgd_bad(pgd_t pgd)
10890 {
10891 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
10892 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
10893 }
10894
10895 static inline int pgd_none(pgd_t pgd)
10896 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
10897 * pgd_offset() returns a (pgd_t *)
10898 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
10899 */
10900 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
10901 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
10902 +
10903 +#ifdef CONFIG_PAX_PER_CPU_PGD
10904 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
10905 +#endif
10906 +
10907 /*
10908 * a shortcut which implies the use of the kernel's pgd, instead
10909 * of a process's
10910 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
10911 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
10912 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
10913
10914 +#ifdef CONFIG_X86_32
10915 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
10916 +#else
10917 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
10918 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
10919 +
10920 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10921 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
10922 +#else
10923 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
10924 +#endif
10925 +
10926 +#endif
10927 +
10928 #ifndef __ASSEMBLY__
10929
10930 extern int direct_gbpages;
10931 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
10932 * dst and src can be on the same page, but the range must not overlap,
10933 * and must not cross a page boundary.
10934 */
10935 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
10936 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
10937 {
10938 - memcpy(dst, src, count * sizeof(pgd_t));
10939 + pax_open_kernel();
10940 + while (count--)
10941 + *dst++ = *src++;
10942 + pax_close_kernel();
10943 }
10944
10945 +#ifdef CONFIG_PAX_PER_CPU_PGD
10946 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
10947 +#endif
10948 +
10949 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10950 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
10951 +#else
10952 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
10953 +#endif
10954
10955 #include <asm-generic/pgtable.h>
10956 #endif /* __ASSEMBLY__ */
10957 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
10958 index 0c92113..34a77c6 100644
10959 --- a/arch/x86/include/asm/pgtable_32.h
10960 +++ b/arch/x86/include/asm/pgtable_32.h
10961 @@ -25,9 +25,6 @@
10962 struct mm_struct;
10963 struct vm_area_struct;
10964
10965 -extern pgd_t swapper_pg_dir[1024];
10966 -extern pgd_t initial_page_table[1024];
10967 -
10968 static inline void pgtable_cache_init(void) { }
10969 static inline void check_pgt_cache(void) { }
10970 void paging_init(void);
10971 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
10972 # include <asm/pgtable-2level.h>
10973 #endif
10974
10975 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
10976 +extern pgd_t initial_page_table[PTRS_PER_PGD];
10977 +#ifdef CONFIG_X86_PAE
10978 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
10979 +#endif
10980 +
10981 #if defined(CONFIG_HIGHPTE)
10982 #define pte_offset_map(dir, address) \
10983 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
10984 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
10985 /* Clear a kernel PTE and flush it from the TLB */
10986 #define kpte_clear_flush(ptep, vaddr) \
10987 do { \
10988 + pax_open_kernel(); \
10989 pte_clear(&init_mm, (vaddr), (ptep)); \
10990 + pax_close_kernel(); \
10991 __flush_tlb_one((vaddr)); \
10992 } while (0)
10993
10994 @@ -74,6 +79,9 @@ do { \
10995
10996 #endif /* !__ASSEMBLY__ */
10997
10998 +#define HAVE_ARCH_UNMAPPED_AREA
10999 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11000 +
11001 /*
11002 * kern_addr_valid() is (1) for FLATMEM and (0) for
11003 * SPARSEMEM and DISCONTIGMEM
11004 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11005 index ed5903b..c7fe163 100644
11006 --- a/arch/x86/include/asm/pgtable_32_types.h
11007 +++ b/arch/x86/include/asm/pgtable_32_types.h
11008 @@ -8,7 +8,7 @@
11009 */
11010 #ifdef CONFIG_X86_PAE
11011 # include <asm/pgtable-3level_types.h>
11012 -# define PMD_SIZE (1UL << PMD_SHIFT)
11013 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11014 # define PMD_MASK (~(PMD_SIZE - 1))
11015 #else
11016 # include <asm/pgtable-2level_types.h>
11017 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11018 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11019 #endif
11020
11021 +#ifdef CONFIG_PAX_KERNEXEC
11022 +#ifndef __ASSEMBLY__
11023 +extern unsigned char MODULES_EXEC_VADDR[];
11024 +extern unsigned char MODULES_EXEC_END[];
11025 +#endif
11026 +#include <asm/boot.h>
11027 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11028 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11029 +#else
11030 +#define ktla_ktva(addr) (addr)
11031 +#define ktva_ktla(addr) (addr)
11032 +#endif
11033 +
11034 #define MODULES_VADDR VMALLOC_START
11035 #define MODULES_END VMALLOC_END
11036 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11037 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11038 index 975f709..107976d 100644
11039 --- a/arch/x86/include/asm/pgtable_64.h
11040 +++ b/arch/x86/include/asm/pgtable_64.h
11041 @@ -16,10 +16,14 @@
11042
11043 extern pud_t level3_kernel_pgt[512];
11044 extern pud_t level3_ident_pgt[512];
11045 +extern pud_t level3_vmalloc_start_pgt[512];
11046 +extern pud_t level3_vmalloc_end_pgt[512];
11047 +extern pud_t level3_vmemmap_pgt[512];
11048 +extern pud_t level2_vmemmap_pgt[512];
11049 extern pmd_t level2_kernel_pgt[512];
11050 extern pmd_t level2_fixmap_pgt[512];
11051 -extern pmd_t level2_ident_pgt[512];
11052 -extern pgd_t init_level4_pgt[];
11053 +extern pmd_t level2_ident_pgt[512*2];
11054 +extern pgd_t init_level4_pgt[512];
11055
11056 #define swapper_pg_dir init_level4_pgt
11057
11058 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11059
11060 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11061 {
11062 + pax_open_kernel();
11063 *pmdp = pmd;
11064 + pax_close_kernel();
11065 }
11066
11067 static inline void native_pmd_clear(pmd_t *pmd)
11068 @@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud)
11069
11070 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11071 {
11072 + pax_open_kernel();
11073 + *pgdp = pgd;
11074 + pax_close_kernel();
11075 +}
11076 +
11077 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11078 +{
11079 *pgdp = pgd;
11080 }
11081
11082 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11083 index 766ea16..5b96cb3 100644
11084 --- a/arch/x86/include/asm/pgtable_64_types.h
11085 +++ b/arch/x86/include/asm/pgtable_64_types.h
11086 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11087 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11088 #define MODULES_END _AC(0xffffffffff000000, UL)
11089 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11090 +#define MODULES_EXEC_VADDR MODULES_VADDR
11091 +#define MODULES_EXEC_END MODULES_END
11092 +
11093 +#define ktla_ktva(addr) (addr)
11094 +#define ktva_ktla(addr) (addr)
11095
11096 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11097 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11098 index 013286a..8b42f4f 100644
11099 --- a/arch/x86/include/asm/pgtable_types.h
11100 +++ b/arch/x86/include/asm/pgtable_types.h
11101 @@ -16,13 +16,12 @@
11102 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11103 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11104 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11105 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11106 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11107 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11108 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11109 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11110 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11111 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11112 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
11113 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11114 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
11115 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11116
11117 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11118 @@ -40,7 +39,6 @@
11119 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11120 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11121 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11122 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11123 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11124 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11125 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11126 @@ -57,8 +55,10 @@
11127
11128 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11129 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11130 -#else
11131 +#elif defined(CONFIG_KMEMCHECK)
11132 #define _PAGE_NX (_AT(pteval_t, 0))
11133 +#else
11134 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11135 #endif
11136
11137 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11138 @@ -96,6 +96,9 @@
11139 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11140 _PAGE_ACCESSED)
11141
11142 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
11143 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
11144 +
11145 #define __PAGE_KERNEL_EXEC \
11146 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11147 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11148 @@ -106,7 +109,7 @@
11149 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11150 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11151 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11152 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11153 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11154 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
11155 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
11156 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11157 @@ -168,8 +171,8 @@
11158 * bits are combined, this will alow user to access the high address mapped
11159 * VDSO in the presence of CONFIG_COMPAT_VDSO
11160 */
11161 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11162 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11163 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11164 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11165 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11166 #endif
11167
11168 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11169 {
11170 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11171 }
11172 +#endif
11173
11174 +#if PAGETABLE_LEVELS == 3
11175 +#include <asm-generic/pgtable-nopud.h>
11176 +#endif
11177 +
11178 +#if PAGETABLE_LEVELS == 2
11179 +#include <asm-generic/pgtable-nopmd.h>
11180 +#endif
11181 +
11182 +#ifndef __ASSEMBLY__
11183 #if PAGETABLE_LEVELS > 3
11184 typedef struct { pudval_t pud; } pud_t;
11185
11186 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11187 return pud.pud;
11188 }
11189 #else
11190 -#include <asm-generic/pgtable-nopud.h>
11191 -
11192 static inline pudval_t native_pud_val(pud_t pud)
11193 {
11194 return native_pgd_val(pud.pgd);
11195 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11196 return pmd.pmd;
11197 }
11198 #else
11199 -#include <asm-generic/pgtable-nopmd.h>
11200 -
11201 static inline pmdval_t native_pmd_val(pmd_t pmd)
11202 {
11203 return native_pgd_val(pmd.pud.pgd);
11204 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
11205
11206 extern pteval_t __supported_pte_mask;
11207 extern void set_nx(void);
11208 -extern int nx_enabled;
11209
11210 #define pgprot_writecombine pgprot_writecombine
11211 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11212 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11213 index bb3ee36..781a6b8 100644
11214 --- a/arch/x86/include/asm/processor.h
11215 +++ b/arch/x86/include/asm/processor.h
11216 @@ -268,7 +268,7 @@ struct tss_struct {
11217
11218 } ____cacheline_aligned;
11219
11220 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11221 +extern struct tss_struct init_tss[NR_CPUS];
11222
11223 /*
11224 * Save the original ist values for checking stack pointers during debugging
11225 @@ -861,11 +861,18 @@ static inline void spin_lock_prefetch(const void *x)
11226 */
11227 #define TASK_SIZE PAGE_OFFSET
11228 #define TASK_SIZE_MAX TASK_SIZE
11229 +
11230 +#ifdef CONFIG_PAX_SEGMEXEC
11231 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11232 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11233 +#else
11234 #define STACK_TOP TASK_SIZE
11235 -#define STACK_TOP_MAX STACK_TOP
11236 +#endif
11237 +
11238 +#define STACK_TOP_MAX TASK_SIZE
11239
11240 #define INIT_THREAD { \
11241 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11242 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11243 .vm86_info = NULL, \
11244 .sysenter_cs = __KERNEL_CS, \
11245 .io_bitmap_ptr = NULL, \
11246 @@ -879,7 +886,7 @@ static inline void spin_lock_prefetch(const void *x)
11247 */
11248 #define INIT_TSS { \
11249 .x86_tss = { \
11250 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11251 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11252 .ss0 = __KERNEL_DS, \
11253 .ss1 = __KERNEL_CS, \
11254 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11255 @@ -890,11 +897,7 @@ static inline void spin_lock_prefetch(const void *x)
11256 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11257
11258 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11259 -#define KSTK_TOP(info) \
11260 -({ \
11261 - unsigned long *__ptr = (unsigned long *)(info); \
11262 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11263 -})
11264 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11265
11266 /*
11267 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11268 @@ -909,7 +912,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11269 #define task_pt_regs(task) \
11270 ({ \
11271 struct pt_regs *__regs__; \
11272 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11273 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11274 __regs__ - 1; \
11275 })
11276
11277 @@ -919,13 +922,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11278 /*
11279 * User space process size. 47bits minus one guard page.
11280 */
11281 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11282 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11283
11284 /* This decides where the kernel will search for a free chunk of vm
11285 * space during mmap's.
11286 */
11287 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11288 - 0xc0000000 : 0xFFFFe000)
11289 + 0xc0000000 : 0xFFFFf000)
11290
11291 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11292 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11293 @@ -936,11 +939,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11294 #define STACK_TOP_MAX TASK_SIZE_MAX
11295
11296 #define INIT_THREAD { \
11297 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11298 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11299 }
11300
11301 #define INIT_TSS { \
11302 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11303 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11304 }
11305
11306 /*
11307 @@ -962,6 +965,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11308 */
11309 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11310
11311 +#ifdef CONFIG_PAX_SEGMEXEC
11312 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11313 +#endif
11314 +
11315 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11316
11317 /* Get/set a process' ability to use the timestamp counter instruction */
11318 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11319 index 3566454..4bdfb8c 100644
11320 --- a/arch/x86/include/asm/ptrace.h
11321 +++ b/arch/x86/include/asm/ptrace.h
11322 @@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11323 }
11324
11325 /*
11326 - * user_mode_vm(regs) determines whether a register set came from user mode.
11327 + * user_mode(regs) determines whether a register set came from user mode.
11328 * This is true if V8086 mode was enabled OR if the register set was from
11329 * protected mode with RPL-3 CS value. This tricky test checks that with
11330 * one comparison. Many places in the kernel can bypass this full check
11331 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11332 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11333 + * be used.
11334 */
11335 -static inline int user_mode(struct pt_regs *regs)
11336 +static inline int user_mode_novm(struct pt_regs *regs)
11337 {
11338 #ifdef CONFIG_X86_32
11339 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11340 #else
11341 - return !!(regs->cs & 3);
11342 + return !!(regs->cs & SEGMENT_RPL_MASK);
11343 #endif
11344 }
11345
11346 -static inline int user_mode_vm(struct pt_regs *regs)
11347 +static inline int user_mode(struct pt_regs *regs)
11348 {
11349 #ifdef CONFIG_X86_32
11350 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11351 USER_RPL;
11352 #else
11353 - return user_mode(regs);
11354 + return user_mode_novm(regs);
11355 #endif
11356 }
11357
11358 @@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
11359 #ifdef CONFIG_X86_64
11360 static inline bool user_64bit_mode(struct pt_regs *regs)
11361 {
11362 + unsigned long cs = regs->cs & 0xffff;
11363 #ifndef CONFIG_PARAVIRT
11364 /*
11365 * On non-paravirt systems, this is the only long mode CPL 3
11366 * selector. We do not allow long mode selectors in the LDT.
11367 */
11368 - return regs->cs == __USER_CS;
11369 + return cs == __USER_CS;
11370 #else
11371 /* Headers are too twisted for this to go in paravirt.h. */
11372 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
11373 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
11374 #endif
11375 }
11376 #endif
11377 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11378 index 92f29706..a79cbbb 100644
11379 --- a/arch/x86/include/asm/reboot.h
11380 +++ b/arch/x86/include/asm/reboot.h
11381 @@ -6,19 +6,19 @@
11382 struct pt_regs;
11383
11384 struct machine_ops {
11385 - void (*restart)(char *cmd);
11386 - void (*halt)(void);
11387 - void (*power_off)(void);
11388 + void (* __noreturn restart)(char *cmd);
11389 + void (* __noreturn halt)(void);
11390 + void (* __noreturn power_off)(void);
11391 void (*shutdown)(void);
11392 void (*crash_shutdown)(struct pt_regs *);
11393 - void (*emergency_restart)(void);
11394 -};
11395 + void (* __noreturn emergency_restart)(void);
11396 +} __no_const;
11397
11398 extern struct machine_ops machine_ops;
11399
11400 void native_machine_crash_shutdown(struct pt_regs *regs);
11401 void native_machine_shutdown(void);
11402 -void machine_real_restart(unsigned int type);
11403 +void machine_real_restart(unsigned int type) __noreturn;
11404 /* These must match dispatch_table in reboot_32.S */
11405 #define MRR_BIOS 0
11406 #define MRR_APM 1
11407 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11408 index 2dbe4a7..ce1db00 100644
11409 --- a/arch/x86/include/asm/rwsem.h
11410 +++ b/arch/x86/include/asm/rwsem.h
11411 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11412 {
11413 asm volatile("# beginning down_read\n\t"
11414 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11415 +
11416 +#ifdef CONFIG_PAX_REFCOUNT
11417 + "jno 0f\n"
11418 + LOCK_PREFIX _ASM_DEC "(%1)\n"
11419 + "int $4\n0:\n"
11420 + _ASM_EXTABLE(0b, 0b)
11421 +#endif
11422 +
11423 /* adds 0x00000001 */
11424 " jns 1f\n"
11425 " call call_rwsem_down_read_failed\n"
11426 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11427 "1:\n\t"
11428 " mov %1,%2\n\t"
11429 " add %3,%2\n\t"
11430 +
11431 +#ifdef CONFIG_PAX_REFCOUNT
11432 + "jno 0f\n"
11433 + "sub %3,%2\n"
11434 + "int $4\n0:\n"
11435 + _ASM_EXTABLE(0b, 0b)
11436 +#endif
11437 +
11438 " jle 2f\n\t"
11439 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11440 " jnz 1b\n\t"
11441 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11442 long tmp;
11443 asm volatile("# beginning down_write\n\t"
11444 LOCK_PREFIX " xadd %1,(%2)\n\t"
11445 +
11446 +#ifdef CONFIG_PAX_REFCOUNT
11447 + "jno 0f\n"
11448 + "mov %1,(%2)\n"
11449 + "int $4\n0:\n"
11450 + _ASM_EXTABLE(0b, 0b)
11451 +#endif
11452 +
11453 /* adds 0xffff0001, returns the old value */
11454 " test %1,%1\n\t"
11455 /* was the count 0 before? */
11456 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11457 long tmp;
11458 asm volatile("# beginning __up_read\n\t"
11459 LOCK_PREFIX " xadd %1,(%2)\n\t"
11460 +
11461 +#ifdef CONFIG_PAX_REFCOUNT
11462 + "jno 0f\n"
11463 + "mov %1,(%2)\n"
11464 + "int $4\n0:\n"
11465 + _ASM_EXTABLE(0b, 0b)
11466 +#endif
11467 +
11468 /* subtracts 1, returns the old value */
11469 " jns 1f\n\t"
11470 " call call_rwsem_wake\n" /* expects old value in %edx */
11471 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
11472 long tmp;
11473 asm volatile("# beginning __up_write\n\t"
11474 LOCK_PREFIX " xadd %1,(%2)\n\t"
11475 +
11476 +#ifdef CONFIG_PAX_REFCOUNT
11477 + "jno 0f\n"
11478 + "mov %1,(%2)\n"
11479 + "int $4\n0:\n"
11480 + _ASM_EXTABLE(0b, 0b)
11481 +#endif
11482 +
11483 /* subtracts 0xffff0001, returns the old value */
11484 " jns 1f\n\t"
11485 " call call_rwsem_wake\n" /* expects old value in %edx */
11486 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11487 {
11488 asm volatile("# beginning __downgrade_write\n\t"
11489 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
11490 +
11491 +#ifdef CONFIG_PAX_REFCOUNT
11492 + "jno 0f\n"
11493 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
11494 + "int $4\n0:\n"
11495 + _ASM_EXTABLE(0b, 0b)
11496 +#endif
11497 +
11498 /*
11499 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
11500 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
11501 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11502 */
11503 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
11504 {
11505 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
11506 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
11507 +
11508 +#ifdef CONFIG_PAX_REFCOUNT
11509 + "jno 0f\n"
11510 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
11511 + "int $4\n0:\n"
11512 + _ASM_EXTABLE(0b, 0b)
11513 +#endif
11514 +
11515 : "+m" (sem->count)
11516 : "er" (delta));
11517 }
11518 @@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
11519 */
11520 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
11521 {
11522 - return delta + xadd(&sem->count, delta);
11523 + return delta + xadd_check_overflow(&sem->count, delta);
11524 }
11525
11526 #endif /* __KERNEL__ */
11527 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
11528 index 5e64171..f58957e 100644
11529 --- a/arch/x86/include/asm/segment.h
11530 +++ b/arch/x86/include/asm/segment.h
11531 @@ -64,10 +64,15 @@
11532 * 26 - ESPFIX small SS
11533 * 27 - per-cpu [ offset to per-cpu data area ]
11534 * 28 - stack_canary-20 [ for stack protector ]
11535 - * 29 - unused
11536 - * 30 - unused
11537 + * 29 - PCI BIOS CS
11538 + * 30 - PCI BIOS DS
11539 * 31 - TSS for double fault handler
11540 */
11541 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
11542 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
11543 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
11544 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
11545 +
11546 #define GDT_ENTRY_TLS_MIN 6
11547 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
11548
11549 @@ -79,6 +84,8 @@
11550
11551 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
11552
11553 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
11554 +
11555 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
11556
11557 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
11558 @@ -104,6 +111,12 @@
11559 #define __KERNEL_STACK_CANARY 0
11560 #endif
11561
11562 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
11563 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
11564 +
11565 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
11566 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
11567 +
11568 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
11569
11570 /*
11571 @@ -141,7 +154,7 @@
11572 */
11573
11574 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
11575 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
11576 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
11577
11578
11579 #else
11580 @@ -165,6 +178,8 @@
11581 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
11582 #define __USER32_DS __USER_DS
11583
11584 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
11585 +
11586 #define GDT_ENTRY_TSS 8 /* needs two entries */
11587 #define GDT_ENTRY_LDT 10 /* needs two entries */
11588 #define GDT_ENTRY_TLS_MIN 12
11589 @@ -185,6 +200,7 @@
11590 #endif
11591
11592 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
11593 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
11594 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
11595 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
11596 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
11597 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
11598 index 73b11bc..d4a3b63 100644
11599 --- a/arch/x86/include/asm/smp.h
11600 +++ b/arch/x86/include/asm/smp.h
11601 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
11602 /* cpus sharing the last level cache: */
11603 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
11604 DECLARE_PER_CPU(u16, cpu_llc_id);
11605 -DECLARE_PER_CPU(int, cpu_number);
11606 +DECLARE_PER_CPU(unsigned int, cpu_number);
11607
11608 static inline struct cpumask *cpu_sibling_mask(int cpu)
11609 {
11610 @@ -77,7 +77,7 @@ struct smp_ops {
11611
11612 void (*send_call_func_ipi)(const struct cpumask *mask);
11613 void (*send_call_func_single_ipi)(int cpu);
11614 -};
11615 +} __no_const;
11616
11617 /* Globals due to paravirt */
11618 extern void set_cpu_sibling_map(int cpu);
11619 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
11620 extern int safe_smp_processor_id(void);
11621
11622 #elif defined(CONFIG_X86_64_SMP)
11623 -#define raw_smp_processor_id() (percpu_read(cpu_number))
11624 -
11625 -#define stack_smp_processor_id() \
11626 -({ \
11627 - struct thread_info *ti; \
11628 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
11629 - ti->cpu; \
11630 -})
11631 +#define raw_smp_processor_id() (percpu_read(cpu_number))
11632 +#define stack_smp_processor_id() raw_smp_processor_id()
11633 #define safe_smp_processor_id() smp_processor_id()
11634
11635 #endif
11636 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
11637 index 972c260..43ab1fd 100644
11638 --- a/arch/x86/include/asm/spinlock.h
11639 +++ b/arch/x86/include/asm/spinlock.h
11640 @@ -188,6 +188,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
11641 static inline void arch_read_lock(arch_rwlock_t *rw)
11642 {
11643 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
11644 +
11645 +#ifdef CONFIG_PAX_REFCOUNT
11646 + "jno 0f\n"
11647 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
11648 + "int $4\n0:\n"
11649 + _ASM_EXTABLE(0b, 0b)
11650 +#endif
11651 +
11652 "jns 1f\n"
11653 "call __read_lock_failed\n\t"
11654 "1:\n"
11655 @@ -197,6 +205,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
11656 static inline void arch_write_lock(arch_rwlock_t *rw)
11657 {
11658 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
11659 +
11660 +#ifdef CONFIG_PAX_REFCOUNT
11661 + "jno 0f\n"
11662 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
11663 + "int $4\n0:\n"
11664 + _ASM_EXTABLE(0b, 0b)
11665 +#endif
11666 +
11667 "jz 1f\n"
11668 "call __write_lock_failed\n\t"
11669 "1:\n"
11670 @@ -226,13 +242,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
11671
11672 static inline void arch_read_unlock(arch_rwlock_t *rw)
11673 {
11674 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
11675 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
11676 +
11677 +#ifdef CONFIG_PAX_REFCOUNT
11678 + "jno 0f\n"
11679 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
11680 + "int $4\n0:\n"
11681 + _ASM_EXTABLE(0b, 0b)
11682 +#endif
11683 +
11684 :"+m" (rw->lock) : : "memory");
11685 }
11686
11687 static inline void arch_write_unlock(arch_rwlock_t *rw)
11688 {
11689 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
11690 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
11691 +
11692 +#ifdef CONFIG_PAX_REFCOUNT
11693 + "jno 0f\n"
11694 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
11695 + "int $4\n0:\n"
11696 + _ASM_EXTABLE(0b, 0b)
11697 +#endif
11698 +
11699 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
11700 }
11701
11702 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
11703 index 1575177..cb23f52 100644
11704 --- a/arch/x86/include/asm/stackprotector.h
11705 +++ b/arch/x86/include/asm/stackprotector.h
11706 @@ -48,7 +48,7 @@
11707 * head_32 for boot CPU and setup_per_cpu_areas() for others.
11708 */
11709 #define GDT_STACK_CANARY_INIT \
11710 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
11711 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
11712
11713 /*
11714 * Initialize the stackprotector canary value.
11715 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
11716
11717 static inline void load_stack_canary_segment(void)
11718 {
11719 -#ifdef CONFIG_X86_32
11720 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
11721 asm volatile ("mov %0, %%gs" : : "r" (0));
11722 #endif
11723 }
11724 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
11725 index 70bbe39..4ae2bd4 100644
11726 --- a/arch/x86/include/asm/stacktrace.h
11727 +++ b/arch/x86/include/asm/stacktrace.h
11728 @@ -11,28 +11,20 @@
11729
11730 extern int kstack_depth_to_print;
11731
11732 -struct thread_info;
11733 +struct task_struct;
11734 struct stacktrace_ops;
11735
11736 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
11737 - unsigned long *stack,
11738 - unsigned long bp,
11739 - const struct stacktrace_ops *ops,
11740 - void *data,
11741 - unsigned long *end,
11742 - int *graph);
11743 +typedef unsigned long walk_stack_t(struct task_struct *task,
11744 + void *stack_start,
11745 + unsigned long *stack,
11746 + unsigned long bp,
11747 + const struct stacktrace_ops *ops,
11748 + void *data,
11749 + unsigned long *end,
11750 + int *graph);
11751
11752 -extern unsigned long
11753 -print_context_stack(struct thread_info *tinfo,
11754 - unsigned long *stack, unsigned long bp,
11755 - const struct stacktrace_ops *ops, void *data,
11756 - unsigned long *end, int *graph);
11757 -
11758 -extern unsigned long
11759 -print_context_stack_bp(struct thread_info *tinfo,
11760 - unsigned long *stack, unsigned long bp,
11761 - const struct stacktrace_ops *ops, void *data,
11762 - unsigned long *end, int *graph);
11763 +extern walk_stack_t print_context_stack;
11764 +extern walk_stack_t print_context_stack_bp;
11765
11766 /* Generic stack tracer with callbacks */
11767
11768 @@ -40,7 +32,7 @@ struct stacktrace_ops {
11769 void (*address)(void *data, unsigned long address, int reliable);
11770 /* On negative return stop dumping */
11771 int (*stack)(void *data, char *name);
11772 - walk_stack_t walk_stack;
11773 + walk_stack_t *walk_stack;
11774 };
11775
11776 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
11777 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
11778 index cb23852..2dde194 100644
11779 --- a/arch/x86/include/asm/sys_ia32.h
11780 +++ b/arch/x86/include/asm/sys_ia32.h
11781 @@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
11782 compat_sigset_t __user *, unsigned int);
11783 asmlinkage long sys32_alarm(unsigned int);
11784
11785 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
11786 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
11787 asmlinkage long sys32_sysfs(int, u32, u32);
11788
11789 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
11790 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
11791 index 2d2f01c..f985723 100644
11792 --- a/arch/x86/include/asm/system.h
11793 +++ b/arch/x86/include/asm/system.h
11794 @@ -129,7 +129,7 @@ do { \
11795 "call __switch_to\n\t" \
11796 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
11797 __switch_canary \
11798 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
11799 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
11800 "movq %%rax,%%rdi\n\t" \
11801 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
11802 "jnz ret_from_fork\n\t" \
11803 @@ -140,7 +140,7 @@ do { \
11804 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
11805 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
11806 [_tif_fork] "i" (_TIF_FORK), \
11807 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
11808 + [thread_info] "m" (current_tinfo), \
11809 [current_task] "m" (current_task) \
11810 __switch_canary_iparam \
11811 : "memory", "cc" __EXTRA_CLOBBER)
11812 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
11813 {
11814 unsigned long __limit;
11815 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
11816 - return __limit + 1;
11817 + return __limit;
11818 }
11819
11820 static inline void native_clts(void)
11821 @@ -397,13 +397,13 @@ void enable_hlt(void);
11822
11823 void cpu_idle_wait(void);
11824
11825 -extern unsigned long arch_align_stack(unsigned long sp);
11826 +#define arch_align_stack(x) ((x) & ~0xfUL)
11827 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11828
11829 void default_idle(void);
11830 bool set_pm_idle_to_default(void);
11831
11832 -void stop_this_cpu(void *dummy);
11833 +void stop_this_cpu(void *dummy) __noreturn;
11834
11835 /*
11836 * Force strict CPU ordering.
11837 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
11838 index d7ef849..6af292e 100644
11839 --- a/arch/x86/include/asm/thread_info.h
11840 +++ b/arch/x86/include/asm/thread_info.h
11841 @@ -10,6 +10,7 @@
11842 #include <linux/compiler.h>
11843 #include <asm/page.h>
11844 #include <asm/types.h>
11845 +#include <asm/percpu.h>
11846
11847 /*
11848 * low level task data that entry.S needs immediate access to
11849 @@ -24,7 +25,6 @@ struct exec_domain;
11850 #include <linux/atomic.h>
11851
11852 struct thread_info {
11853 - struct task_struct *task; /* main task structure */
11854 struct exec_domain *exec_domain; /* execution domain */
11855 __u32 flags; /* low level flags */
11856 __u32 status; /* thread synchronous flags */
11857 @@ -34,18 +34,12 @@ struct thread_info {
11858 mm_segment_t addr_limit;
11859 struct restart_block restart_block;
11860 void __user *sysenter_return;
11861 -#ifdef CONFIG_X86_32
11862 - unsigned long previous_esp; /* ESP of the previous stack in
11863 - case of nested (IRQ) stacks
11864 - */
11865 - __u8 supervisor_stack[0];
11866 -#endif
11867 + unsigned long lowest_stack;
11868 int uaccess_err;
11869 };
11870
11871 -#define INIT_THREAD_INFO(tsk) \
11872 +#define INIT_THREAD_INFO \
11873 { \
11874 - .task = &tsk, \
11875 .exec_domain = &default_exec_domain, \
11876 .flags = 0, \
11877 .cpu = 0, \
11878 @@ -56,7 +50,7 @@ struct thread_info {
11879 }, \
11880 }
11881
11882 -#define init_thread_info (init_thread_union.thread_info)
11883 +#define init_thread_info (init_thread_union.stack)
11884 #define init_stack (init_thread_union.stack)
11885
11886 #else /* !__ASSEMBLY__ */
11887 @@ -170,45 +164,40 @@ struct thread_info {
11888 ret; \
11889 })
11890
11891 -#ifdef CONFIG_X86_32
11892 -
11893 -#define STACK_WARN (THREAD_SIZE/8)
11894 -/*
11895 - * macros/functions for gaining access to the thread information structure
11896 - *
11897 - * preempt_count needs to be 1 initially, until the scheduler is functional.
11898 - */
11899 -#ifndef __ASSEMBLY__
11900 -
11901 -
11902 -/* how to get the current stack pointer from C */
11903 -register unsigned long current_stack_pointer asm("esp") __used;
11904 -
11905 -/* how to get the thread information struct from C */
11906 -static inline struct thread_info *current_thread_info(void)
11907 -{
11908 - return (struct thread_info *)
11909 - (current_stack_pointer & ~(THREAD_SIZE - 1));
11910 -}
11911 -
11912 -#else /* !__ASSEMBLY__ */
11913 -
11914 +#ifdef __ASSEMBLY__
11915 /* how to get the thread information struct from ASM */
11916 #define GET_THREAD_INFO(reg) \
11917 - movl $-THREAD_SIZE, reg; \
11918 - andl %esp, reg
11919 + mov PER_CPU_VAR(current_tinfo), reg
11920
11921 /* use this one if reg already contains %esp */
11922 -#define GET_THREAD_INFO_WITH_ESP(reg) \
11923 - andl $-THREAD_SIZE, reg
11924 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
11925 +#else
11926 +/* how to get the thread information struct from C */
11927 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
11928 +
11929 +static __always_inline struct thread_info *current_thread_info(void)
11930 +{
11931 + return percpu_read_stable(current_tinfo);
11932 +}
11933 +#endif
11934 +
11935 +#ifdef CONFIG_X86_32
11936 +
11937 +#define STACK_WARN (THREAD_SIZE/8)
11938 +/*
11939 + * macros/functions for gaining access to the thread information structure
11940 + *
11941 + * preempt_count needs to be 1 initially, until the scheduler is functional.
11942 + */
11943 +#ifndef __ASSEMBLY__
11944 +
11945 +/* how to get the current stack pointer from C */
11946 +register unsigned long current_stack_pointer asm("esp") __used;
11947
11948 #endif
11949
11950 #else /* X86_32 */
11951
11952 -#include <asm/percpu.h>
11953 -#define KERNEL_STACK_OFFSET (5*8)
11954 -
11955 /*
11956 * macros/functions for gaining access to the thread information structure
11957 * preempt_count needs to be 1 initially, until the scheduler is functional.
11958 @@ -216,21 +205,8 @@ static inline struct thread_info *current_thread_info(void)
11959 #ifndef __ASSEMBLY__
11960 DECLARE_PER_CPU(unsigned long, kernel_stack);
11961
11962 -static inline struct thread_info *current_thread_info(void)
11963 -{
11964 - struct thread_info *ti;
11965 - ti = (void *)(percpu_read_stable(kernel_stack) +
11966 - KERNEL_STACK_OFFSET - THREAD_SIZE);
11967 - return ti;
11968 -}
11969 -
11970 -#else /* !__ASSEMBLY__ */
11971 -
11972 -/* how to get the thread information struct from ASM */
11973 -#define GET_THREAD_INFO(reg) \
11974 - movq PER_CPU_VAR(kernel_stack),reg ; \
11975 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
11976 -
11977 +/* how to get the current stack pointer from C */
11978 +register unsigned long current_stack_pointer asm("rsp") __used;
11979 #endif
11980
11981 #endif /* !X86_32 */
11982 @@ -264,5 +240,16 @@ extern void arch_task_cache_init(void);
11983 extern void free_thread_info(struct thread_info *ti);
11984 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
11985 #define arch_task_cache_init arch_task_cache_init
11986 +
11987 +#define __HAVE_THREAD_FUNCTIONS
11988 +#define task_thread_info(task) (&(task)->tinfo)
11989 +#define task_stack_page(task) ((task)->stack)
11990 +#define setup_thread_stack(p, org) do {} while (0)
11991 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
11992 +
11993 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
11994 +extern struct task_struct *alloc_task_struct_node(int node);
11995 +extern void free_task_struct(struct task_struct *);
11996 +
11997 #endif
11998 #endif /* _ASM_X86_THREAD_INFO_H */
11999 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12000 index 36361bf..324f262 100644
12001 --- a/arch/x86/include/asm/uaccess.h
12002 +++ b/arch/x86/include/asm/uaccess.h
12003 @@ -7,12 +7,15 @@
12004 #include <linux/compiler.h>
12005 #include <linux/thread_info.h>
12006 #include <linux/string.h>
12007 +#include <linux/sched.h>
12008 #include <asm/asm.h>
12009 #include <asm/page.h>
12010
12011 #define VERIFY_READ 0
12012 #define VERIFY_WRITE 1
12013
12014 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
12015 +
12016 /*
12017 * The fs value determines whether argument validity checking should be
12018 * performed or not. If get_fs() == USER_DS, checking is performed, with
12019 @@ -28,7 +31,12 @@
12020
12021 #define get_ds() (KERNEL_DS)
12022 #define get_fs() (current_thread_info()->addr_limit)
12023 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12024 +void __set_fs(mm_segment_t x);
12025 +void set_fs(mm_segment_t x);
12026 +#else
12027 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12028 +#endif
12029
12030 #define segment_eq(a, b) ((a).seg == (b).seg)
12031
12032 @@ -76,7 +84,33 @@
12033 * checks that the pointer is in the user space range - after calling
12034 * this function, memory access functions may still return -EFAULT.
12035 */
12036 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12037 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12038 +#define access_ok(type, addr, size) \
12039 +({ \
12040 + long __size = size; \
12041 + unsigned long __addr = (unsigned long)addr; \
12042 + unsigned long __addr_ao = __addr & PAGE_MASK; \
12043 + unsigned long __end_ao = __addr + __size - 1; \
12044 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12045 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12046 + while(__addr_ao <= __end_ao) { \
12047 + char __c_ao; \
12048 + __addr_ao += PAGE_SIZE; \
12049 + if (__size > PAGE_SIZE) \
12050 + cond_resched(); \
12051 + if (__get_user(__c_ao, (char __user *)__addr)) \
12052 + break; \
12053 + if (type != VERIFY_WRITE) { \
12054 + __addr = __addr_ao; \
12055 + continue; \
12056 + } \
12057 + if (__put_user(__c_ao, (char __user *)__addr)) \
12058 + break; \
12059 + __addr = __addr_ao; \
12060 + } \
12061 + } \
12062 + __ret_ao; \
12063 +})
12064
12065 /*
12066 * The exception table consists of pairs of addresses: the first is the
12067 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
12068 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12069 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12070
12071 -
12072 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12073 +#define __copyuser_seg "gs;"
12074 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12075 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12076 +#else
12077 +#define __copyuser_seg
12078 +#define __COPYUSER_SET_ES
12079 +#define __COPYUSER_RESTORE_ES
12080 +#endif
12081
12082 #ifdef CONFIG_X86_32
12083 #define __put_user_asm_u64(x, addr, err, errret) \
12084 - asm volatile("1: movl %%eax,0(%2)\n" \
12085 - "2: movl %%edx,4(%2)\n" \
12086 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12087 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12088 "3:\n" \
12089 ".section .fixup,\"ax\"\n" \
12090 "4: movl %3,%0\n" \
12091 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
12092 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12093
12094 #define __put_user_asm_ex_u64(x, addr) \
12095 - asm volatile("1: movl %%eax,0(%1)\n" \
12096 - "2: movl %%edx,4(%1)\n" \
12097 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12098 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12099 "3:\n" \
12100 _ASM_EXTABLE(1b, 2b - 1b) \
12101 _ASM_EXTABLE(2b, 3b - 2b) \
12102 @@ -252,7 +294,7 @@ extern void __put_user_8(void);
12103 __typeof__(*(ptr)) __pu_val; \
12104 __chk_user_ptr(ptr); \
12105 might_fault(); \
12106 - __pu_val = x; \
12107 + __pu_val = (x); \
12108 switch (sizeof(*(ptr))) { \
12109 case 1: \
12110 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12111 @@ -373,7 +415,7 @@ do { \
12112 } while (0)
12113
12114 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12115 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12116 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12117 "2:\n" \
12118 ".section .fixup,\"ax\"\n" \
12119 "3: mov %3,%0\n" \
12120 @@ -381,7 +423,7 @@ do { \
12121 " jmp 2b\n" \
12122 ".previous\n" \
12123 _ASM_EXTABLE(1b, 3b) \
12124 - : "=r" (err), ltype(x) \
12125 + : "=r" (err), ltype (x) \
12126 : "m" (__m(addr)), "i" (errret), "0" (err))
12127
12128 #define __get_user_size_ex(x, ptr, size) \
12129 @@ -406,7 +448,7 @@ do { \
12130 } while (0)
12131
12132 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12133 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12134 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12135 "2:\n" \
12136 _ASM_EXTABLE(1b, 2b - 1b) \
12137 : ltype(x) : "m" (__m(addr)))
12138 @@ -423,13 +465,24 @@ do { \
12139 int __gu_err; \
12140 unsigned long __gu_val; \
12141 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12142 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
12143 + (x) = (__typeof__(*(ptr)))__gu_val; \
12144 __gu_err; \
12145 })
12146
12147 /* FIXME: this hack is definitely wrong -AK */
12148 struct __large_struct { unsigned long buf[100]; };
12149 -#define __m(x) (*(struct __large_struct __user *)(x))
12150 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12151 +#define ____m(x) \
12152 +({ \
12153 + unsigned long ____x = (unsigned long)(x); \
12154 + if (____x < PAX_USER_SHADOW_BASE) \
12155 + ____x += PAX_USER_SHADOW_BASE; \
12156 + (void __user *)____x; \
12157 +})
12158 +#else
12159 +#define ____m(x) (x)
12160 +#endif
12161 +#define __m(x) (*(struct __large_struct __user *)____m(x))
12162
12163 /*
12164 * Tell gcc we read from memory instead of writing: this is because
12165 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
12166 * aliasing issues.
12167 */
12168 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12169 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12170 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12171 "2:\n" \
12172 ".section .fixup,\"ax\"\n" \
12173 "3: mov %3,%0\n" \
12174 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
12175 ".previous\n" \
12176 _ASM_EXTABLE(1b, 3b) \
12177 : "=r"(err) \
12178 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12179 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12180
12181 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12182 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12183 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12184 "2:\n" \
12185 _ASM_EXTABLE(1b, 2b - 1b) \
12186 : : ltype(x), "m" (__m(addr)))
12187 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
12188 * On error, the variable @x is set to zero.
12189 */
12190
12191 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12192 +#define __get_user(x, ptr) get_user((x), (ptr))
12193 +#else
12194 #define __get_user(x, ptr) \
12195 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12196 +#endif
12197
12198 /**
12199 * __put_user: - Write a simple value into user space, with less checking.
12200 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
12201 * Returns zero on success, or -EFAULT on error.
12202 */
12203
12204 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12205 +#define __put_user(x, ptr) put_user((x), (ptr))
12206 +#else
12207 #define __put_user(x, ptr) \
12208 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12209 +#endif
12210
12211 #define __get_user_unaligned __get_user
12212 #define __put_user_unaligned __put_user
12213 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
12214 #define get_user_ex(x, ptr) do { \
12215 unsigned long __gue_val; \
12216 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12217 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
12218 + (x) = (__typeof__(*(ptr)))__gue_val; \
12219 } while (0)
12220
12221 #ifdef CONFIG_X86_WP_WORKS_OK
12222 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12223 index 566e803..b9521e9 100644
12224 --- a/arch/x86/include/asm/uaccess_32.h
12225 +++ b/arch/x86/include/asm/uaccess_32.h
12226 @@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12227 static __always_inline unsigned long __must_check
12228 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12229 {
12230 + if ((long)n < 0)
12231 + return n;
12232 +
12233 if (__builtin_constant_p(n)) {
12234 unsigned long ret;
12235
12236 @@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12237 return ret;
12238 }
12239 }
12240 + if (!__builtin_constant_p(n))
12241 + check_object_size(from, n, true);
12242 return __copy_to_user_ll(to, from, n);
12243 }
12244
12245 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
12246 __copy_to_user(void __user *to, const void *from, unsigned long n)
12247 {
12248 might_fault();
12249 +
12250 return __copy_to_user_inatomic(to, from, n);
12251 }
12252
12253 static __always_inline unsigned long
12254 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12255 {
12256 + if ((long)n < 0)
12257 + return n;
12258 +
12259 /* Avoid zeroing the tail if the copy fails..
12260 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12261 * but as the zeroing behaviour is only significant when n is not
12262 @@ -137,6 +146,10 @@ static __always_inline unsigned long
12263 __copy_from_user(void *to, const void __user *from, unsigned long n)
12264 {
12265 might_fault();
12266 +
12267 + if ((long)n < 0)
12268 + return n;
12269 +
12270 if (__builtin_constant_p(n)) {
12271 unsigned long ret;
12272
12273 @@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12274 return ret;
12275 }
12276 }
12277 + if (!__builtin_constant_p(n))
12278 + check_object_size(to, n, false);
12279 return __copy_from_user_ll(to, from, n);
12280 }
12281
12282 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12283 const void __user *from, unsigned long n)
12284 {
12285 might_fault();
12286 +
12287 + if ((long)n < 0)
12288 + return n;
12289 +
12290 if (__builtin_constant_p(n)) {
12291 unsigned long ret;
12292
12293 @@ -181,15 +200,19 @@ static __always_inline unsigned long
12294 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12295 unsigned long n)
12296 {
12297 - return __copy_from_user_ll_nocache_nozero(to, from, n);
12298 + if ((long)n < 0)
12299 + return n;
12300 +
12301 + return __copy_from_user_ll_nocache_nozero(to, from, n);
12302 }
12303
12304 -unsigned long __must_check copy_to_user(void __user *to,
12305 - const void *from, unsigned long n);
12306 -unsigned long __must_check _copy_from_user(void *to,
12307 - const void __user *from,
12308 - unsigned long n);
12309 -
12310 +extern void copy_to_user_overflow(void)
12311 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12312 + __compiletime_error("copy_to_user() buffer size is not provably correct")
12313 +#else
12314 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
12315 +#endif
12316 +;
12317
12318 extern void copy_from_user_overflow(void)
12319 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12320 @@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
12321 #endif
12322 ;
12323
12324 -static inline unsigned long __must_check copy_from_user(void *to,
12325 - const void __user *from,
12326 - unsigned long n)
12327 +/**
12328 + * copy_to_user: - Copy a block of data into user space.
12329 + * @to: Destination address, in user space.
12330 + * @from: Source address, in kernel space.
12331 + * @n: Number of bytes to copy.
12332 + *
12333 + * Context: User context only. This function may sleep.
12334 + *
12335 + * Copy data from kernel space to user space.
12336 + *
12337 + * Returns number of bytes that could not be copied.
12338 + * On success, this will be zero.
12339 + */
12340 +static inline unsigned long __must_check
12341 +copy_to_user(void __user *to, const void *from, unsigned long n)
12342 +{
12343 + int sz = __compiletime_object_size(from);
12344 +
12345 + if (unlikely(sz != -1 && sz < n))
12346 + copy_to_user_overflow();
12347 + else if (access_ok(VERIFY_WRITE, to, n))
12348 + n = __copy_to_user(to, from, n);
12349 + return n;
12350 +}
12351 +
12352 +/**
12353 + * copy_from_user: - Copy a block of data from user space.
12354 + * @to: Destination address, in kernel space.
12355 + * @from: Source address, in user space.
12356 + * @n: Number of bytes to copy.
12357 + *
12358 + * Context: User context only. This function may sleep.
12359 + *
12360 + * Copy data from user space to kernel space.
12361 + *
12362 + * Returns number of bytes that could not be copied.
12363 + * On success, this will be zero.
12364 + *
12365 + * If some data could not be copied, this function will pad the copied
12366 + * data to the requested size using zero bytes.
12367 + */
12368 +static inline unsigned long __must_check
12369 +copy_from_user(void *to, const void __user *from, unsigned long n)
12370 {
12371 int sz = __compiletime_object_size(to);
12372
12373 - if (likely(sz == -1 || sz >= n))
12374 - n = _copy_from_user(to, from, n);
12375 - else
12376 + if (unlikely(sz != -1 && sz < n))
12377 copy_from_user_overflow();
12378 -
12379 + else if (access_ok(VERIFY_READ, from, n))
12380 + n = __copy_from_user(to, from, n);
12381 + else if ((long)n > 0) {
12382 + if (!__builtin_constant_p(n))
12383 + check_object_size(to, n, false);
12384 + memset(to, 0, n);
12385 + }
12386 return n;
12387 }
12388
12389 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12390 index 1c66d30..e66922c 100644
12391 --- a/arch/x86/include/asm/uaccess_64.h
12392 +++ b/arch/x86/include/asm/uaccess_64.h
12393 @@ -10,6 +10,9 @@
12394 #include <asm/alternative.h>
12395 #include <asm/cpufeature.h>
12396 #include <asm/page.h>
12397 +#include <asm/pgtable.h>
12398 +
12399 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
12400
12401 /*
12402 * Copy To/From Userspace
12403 @@ -17,12 +20,12 @@
12404
12405 /* Handles exceptions in both to and from, but doesn't do access_ok */
12406 __must_check unsigned long
12407 -copy_user_generic_string(void *to, const void *from, unsigned len);
12408 +copy_user_generic_string(void *to, const void *from, unsigned long len);
12409 __must_check unsigned long
12410 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
12411 +copy_user_generic_unrolled(void *to, const void *from, unsigned long len);
12412
12413 static __always_inline __must_check unsigned long
12414 -copy_user_generic(void *to, const void *from, unsigned len)
12415 +copy_user_generic(void *to, const void *from, unsigned long len)
12416 {
12417 unsigned ret;
12418
12419 @@ -32,142 +35,226 @@ copy_user_generic(void *to, const void *from, unsigned len)
12420 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
12421 "=d" (len)),
12422 "1" (to), "2" (from), "3" (len)
12423 - : "memory", "rcx", "r8", "r9", "r10", "r11");
12424 + : "memory", "rcx", "r8", "r9", "r11");
12425 return ret;
12426 }
12427
12428 +static __always_inline __must_check unsigned long
12429 +__copy_to_user(void __user *to, const void *from, unsigned long len);
12430 +static __always_inline __must_check unsigned long
12431 +__copy_from_user(void *to, const void __user *from, unsigned long len);
12432 __must_check unsigned long
12433 -_copy_to_user(void __user *to, const void *from, unsigned len);
12434 -__must_check unsigned long
12435 -_copy_from_user(void *to, const void __user *from, unsigned len);
12436 -__must_check unsigned long
12437 -copy_in_user(void __user *to, const void __user *from, unsigned len);
12438 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
12439
12440 static inline unsigned long __must_check copy_from_user(void *to,
12441 const void __user *from,
12442 unsigned long n)
12443 {
12444 - int sz = __compiletime_object_size(to);
12445 -
12446 might_fault();
12447 - if (likely(sz == -1 || sz >= n))
12448 - n = _copy_from_user(to, from, n);
12449 -#ifdef CONFIG_DEBUG_VM
12450 - else
12451 - WARN(1, "Buffer overflow detected!\n");
12452 -#endif
12453 +
12454 + if (access_ok(VERIFY_READ, from, n))
12455 + n = __copy_from_user(to, from, n);
12456 + else if (n < INT_MAX) {
12457 + if (!__builtin_constant_p(n))
12458 + check_object_size(to, n, false);
12459 + memset(to, 0, n);
12460 + }
12461 return n;
12462 }
12463
12464 static __always_inline __must_check
12465 -int copy_to_user(void __user *dst, const void *src, unsigned size)
12466 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
12467 {
12468 might_fault();
12469
12470 - return _copy_to_user(dst, src, size);
12471 + if (access_ok(VERIFY_WRITE, dst, size))
12472 + size = __copy_to_user(dst, src, size);
12473 + return size;
12474 }
12475
12476 static __always_inline __must_check
12477 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
12478 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
12479 {
12480 - int ret = 0;
12481 + int sz = __compiletime_object_size(dst);
12482 + unsigned ret = 0;
12483
12484 might_fault();
12485 - if (!__builtin_constant_p(size))
12486 - return copy_user_generic(dst, (__force void *)src, size);
12487 +
12488 + if (size > INT_MAX)
12489 + return size;
12490 +
12491 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12492 + if (!__access_ok(VERIFY_READ, src, size))
12493 + return size;
12494 +#endif
12495 +
12496 + if (unlikely(sz != -1 && sz < size)) {
12497 +#ifdef CONFIG_DEBUG_VM
12498 + WARN(1, "Buffer overflow detected!\n");
12499 +#endif
12500 + return size;
12501 + }
12502 +
12503 + if (!__builtin_constant_p(size)) {
12504 + check_object_size(dst, size, false);
12505 +
12506 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12507 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12508 + src += PAX_USER_SHADOW_BASE;
12509 +#endif
12510 +
12511 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12512 + }
12513 switch (size) {
12514 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
12515 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
12516 ret, "b", "b", "=q", 1);
12517 return ret;
12518 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
12519 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
12520 ret, "w", "w", "=r", 2);
12521 return ret;
12522 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
12523 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
12524 ret, "l", "k", "=r", 4);
12525 return ret;
12526 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
12527 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12528 ret, "q", "", "=r", 8);
12529 return ret;
12530 case 10:
12531 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12532 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12533 ret, "q", "", "=r", 10);
12534 if (unlikely(ret))
12535 return ret;
12536 __get_user_asm(*(u16 *)(8 + (char *)dst),
12537 - (u16 __user *)(8 + (char __user *)src),
12538 + (const u16 __user *)(8 + (const char __user *)src),
12539 ret, "w", "w", "=r", 2);
12540 return ret;
12541 case 16:
12542 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12543 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12544 ret, "q", "", "=r", 16);
12545 if (unlikely(ret))
12546 return ret;
12547 __get_user_asm(*(u64 *)(8 + (char *)dst),
12548 - (u64 __user *)(8 + (char __user *)src),
12549 + (const u64 __user *)(8 + (const char __user *)src),
12550 ret, "q", "", "=r", 8);
12551 return ret;
12552 default:
12553 - return copy_user_generic(dst, (__force void *)src, size);
12554 +
12555 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12556 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12557 + src += PAX_USER_SHADOW_BASE;
12558 +#endif
12559 +
12560 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12561 }
12562 }
12563
12564 static __always_inline __must_check
12565 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
12566 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
12567 {
12568 - int ret = 0;
12569 + int sz = __compiletime_object_size(src);
12570 + unsigned ret = 0;
12571
12572 might_fault();
12573 - if (!__builtin_constant_p(size))
12574 - return copy_user_generic((__force void *)dst, src, size);
12575 +
12576 + if (size > INT_MAX)
12577 + return size;
12578 +
12579 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12580 + if (!__access_ok(VERIFY_WRITE, dst, size))
12581 + return size;
12582 +#endif
12583 +
12584 + if (unlikely(sz != -1 && sz < size)) {
12585 +#ifdef CONFIG_DEBUG_VM
12586 + WARN(1, "Buffer overflow detected!\n");
12587 +#endif
12588 + return size;
12589 + }
12590 +
12591 + if (!__builtin_constant_p(size)) {
12592 + check_object_size(src, size, true);
12593 +
12594 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12595 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12596 + dst += PAX_USER_SHADOW_BASE;
12597 +#endif
12598 +
12599 + return copy_user_generic((__force_kernel void *)dst, src, size);
12600 + }
12601 switch (size) {
12602 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
12603 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
12604 ret, "b", "b", "iq", 1);
12605 return ret;
12606 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
12607 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
12608 ret, "w", "w", "ir", 2);
12609 return ret;
12610 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
12611 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
12612 ret, "l", "k", "ir", 4);
12613 return ret;
12614 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
12615 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12616 ret, "q", "", "er", 8);
12617 return ret;
12618 case 10:
12619 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12620 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12621 ret, "q", "", "er", 10);
12622 if (unlikely(ret))
12623 return ret;
12624 asm("":::"memory");
12625 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
12626 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
12627 ret, "w", "w", "ir", 2);
12628 return ret;
12629 case 16:
12630 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12631 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12632 ret, "q", "", "er", 16);
12633 if (unlikely(ret))
12634 return ret;
12635 asm("":::"memory");
12636 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
12637 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
12638 ret, "q", "", "er", 8);
12639 return ret;
12640 default:
12641 - return copy_user_generic((__force void *)dst, src, size);
12642 +
12643 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12644 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12645 + dst += PAX_USER_SHADOW_BASE;
12646 +#endif
12647 +
12648 + return copy_user_generic((__force_kernel void *)dst, src, size);
12649 }
12650 }
12651
12652 static __always_inline __must_check
12653 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12654 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
12655 {
12656 - int ret = 0;
12657 + unsigned ret = 0;
12658
12659 might_fault();
12660 - if (!__builtin_constant_p(size))
12661 - return copy_user_generic((__force void *)dst,
12662 - (__force void *)src, size);
12663 +
12664 + if (size > INT_MAX)
12665 + return size;
12666 +
12667 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12668 + if (!__access_ok(VERIFY_READ, src, size))
12669 + return size;
12670 + if (!__access_ok(VERIFY_WRITE, dst, size))
12671 + return size;
12672 +#endif
12673 +
12674 + if (!__builtin_constant_p(size)) {
12675 +
12676 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12677 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12678 + src += PAX_USER_SHADOW_BASE;
12679 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12680 + dst += PAX_USER_SHADOW_BASE;
12681 +#endif
12682 +
12683 + return copy_user_generic((__force_kernel void *)dst,
12684 + (__force_kernel const void *)src, size);
12685 + }
12686 switch (size) {
12687 case 1: {
12688 u8 tmp;
12689 - __get_user_asm(tmp, (u8 __user *)src,
12690 + __get_user_asm(tmp, (const u8 __user *)src,
12691 ret, "b", "b", "=q", 1);
12692 if (likely(!ret))
12693 __put_user_asm(tmp, (u8 __user *)dst,
12694 @@ -176,7 +263,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12695 }
12696 case 2: {
12697 u16 tmp;
12698 - __get_user_asm(tmp, (u16 __user *)src,
12699 + __get_user_asm(tmp, (const u16 __user *)src,
12700 ret, "w", "w", "=r", 2);
12701 if (likely(!ret))
12702 __put_user_asm(tmp, (u16 __user *)dst,
12703 @@ -186,7 +273,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12704
12705 case 4: {
12706 u32 tmp;
12707 - __get_user_asm(tmp, (u32 __user *)src,
12708 + __get_user_asm(tmp, (const u32 __user *)src,
12709 ret, "l", "k", "=r", 4);
12710 if (likely(!ret))
12711 __put_user_asm(tmp, (u32 __user *)dst,
12712 @@ -195,7 +282,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12713 }
12714 case 8: {
12715 u64 tmp;
12716 - __get_user_asm(tmp, (u64 __user *)src,
12717 + __get_user_asm(tmp, (const u64 __user *)src,
12718 ret, "q", "", "=r", 8);
12719 if (likely(!ret))
12720 __put_user_asm(tmp, (u64 __user *)dst,
12721 @@ -203,8 +290,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12722 return ret;
12723 }
12724 default:
12725 - return copy_user_generic((__force void *)dst,
12726 - (__force void *)src, size);
12727 +
12728 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12729 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12730 + src += PAX_USER_SHADOW_BASE;
12731 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12732 + dst += PAX_USER_SHADOW_BASE;
12733 +#endif
12734 +
12735 + return copy_user_generic((__force_kernel void *)dst,
12736 + (__force_kernel const void *)src, size);
12737 }
12738 }
12739
12740 @@ -219,35 +314,72 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
12741 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
12742
12743 static __must_check __always_inline int
12744 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
12745 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
12746 {
12747 - return copy_user_generic(dst, (__force const void *)src, size);
12748 + if (size > INT_MAX)
12749 + return size;
12750 +
12751 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12752 + if (!__access_ok(VERIFY_READ, src, size))
12753 + return size;
12754 +
12755 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12756 + src += PAX_USER_SHADOW_BASE;
12757 +#endif
12758 +
12759 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12760 }
12761
12762 -static __must_check __always_inline int
12763 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
12764 +static __must_check __always_inline unsigned long
12765 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
12766 {
12767 - return copy_user_generic((__force void *)dst, src, size);
12768 + if (size > INT_MAX)
12769 + return size;
12770 +
12771 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12772 + if (!__access_ok(VERIFY_WRITE, dst, size))
12773 + return size;
12774 +
12775 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12776 + dst += PAX_USER_SHADOW_BASE;
12777 +#endif
12778 +
12779 + return copy_user_generic((__force_kernel void *)dst, src, size);
12780 }
12781
12782 -extern long __copy_user_nocache(void *dst, const void __user *src,
12783 - unsigned size, int zerorest);
12784 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
12785 + unsigned long size, int zerorest);
12786
12787 -static inline int
12788 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
12789 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
12790 {
12791 might_sleep();
12792 +
12793 + if (size > INT_MAX)
12794 + return size;
12795 +
12796 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12797 + if (!__access_ok(VERIFY_READ, src, size))
12798 + return size;
12799 +#endif
12800 +
12801 return __copy_user_nocache(dst, src, size, 1);
12802 }
12803
12804 -static inline int
12805 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12806 - unsigned size)
12807 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12808 + unsigned long size)
12809 {
12810 + if (size > INT_MAX)
12811 + return size;
12812 +
12813 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12814 + if (!__access_ok(VERIFY_READ, src, size))
12815 + return size;
12816 +#endif
12817 +
12818 return __copy_user_nocache(dst, src, size, 0);
12819 }
12820
12821 -unsigned long
12822 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
12823 +extern unsigned long
12824 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
12825
12826 #endif /* _ASM_X86_UACCESS_64_H */
12827 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
12828 index bb05228..d763d5b 100644
12829 --- a/arch/x86/include/asm/vdso.h
12830 +++ b/arch/x86/include/asm/vdso.h
12831 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
12832 #define VDSO32_SYMBOL(base, name) \
12833 ({ \
12834 extern const char VDSO32_##name[]; \
12835 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12836 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12837 })
12838 #endif
12839
12840 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
12841 index 1971e65..1e3559b 100644
12842 --- a/arch/x86/include/asm/x86_init.h
12843 +++ b/arch/x86/include/asm/x86_init.h
12844 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
12845 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
12846 void (*find_smp_config)(void);
12847 void (*get_smp_config)(unsigned int early);
12848 -};
12849 +} __no_const;
12850
12851 /**
12852 * struct x86_init_resources - platform specific resource related ops
12853 @@ -42,7 +42,7 @@ struct x86_init_resources {
12854 void (*probe_roms)(void);
12855 void (*reserve_resources)(void);
12856 char *(*memory_setup)(void);
12857 -};
12858 +} __no_const;
12859
12860 /**
12861 * struct x86_init_irqs - platform specific interrupt setup
12862 @@ -55,7 +55,7 @@ struct x86_init_irqs {
12863 void (*pre_vector_init)(void);
12864 void (*intr_init)(void);
12865 void (*trap_init)(void);
12866 -};
12867 +} __no_const;
12868
12869 /**
12870 * struct x86_init_oem - oem platform specific customizing functions
12871 @@ -65,7 +65,7 @@ struct x86_init_irqs {
12872 struct x86_init_oem {
12873 void (*arch_setup)(void);
12874 void (*banner)(void);
12875 -};
12876 +} __no_const;
12877
12878 /**
12879 * struct x86_init_mapping - platform specific initial kernel pagetable setup
12880 @@ -76,7 +76,7 @@ struct x86_init_oem {
12881 */
12882 struct x86_init_mapping {
12883 void (*pagetable_reserve)(u64 start, u64 end);
12884 -};
12885 +} __no_const;
12886
12887 /**
12888 * struct x86_init_paging - platform specific paging functions
12889 @@ -86,7 +86,7 @@ struct x86_init_mapping {
12890 struct x86_init_paging {
12891 void (*pagetable_setup_start)(pgd_t *base);
12892 void (*pagetable_setup_done)(pgd_t *base);
12893 -};
12894 +} __no_const;
12895
12896 /**
12897 * struct x86_init_timers - platform specific timer setup
12898 @@ -101,7 +101,7 @@ struct x86_init_timers {
12899 void (*tsc_pre_init)(void);
12900 void (*timer_init)(void);
12901 void (*wallclock_init)(void);
12902 -};
12903 +} __no_const;
12904
12905 /**
12906 * struct x86_init_iommu - platform specific iommu setup
12907 @@ -109,7 +109,7 @@ struct x86_init_timers {
12908 */
12909 struct x86_init_iommu {
12910 int (*iommu_init)(void);
12911 -};
12912 +} __no_const;
12913
12914 /**
12915 * struct x86_init_pci - platform specific pci init functions
12916 @@ -123,7 +123,7 @@ struct x86_init_pci {
12917 int (*init)(void);
12918 void (*init_irq)(void);
12919 void (*fixup_irqs)(void);
12920 -};
12921 +} __no_const;
12922
12923 /**
12924 * struct x86_init_ops - functions for platform specific setup
12925 @@ -139,7 +139,7 @@ struct x86_init_ops {
12926 struct x86_init_timers timers;
12927 struct x86_init_iommu iommu;
12928 struct x86_init_pci pci;
12929 -};
12930 +} __no_const;
12931
12932 /**
12933 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
12934 @@ -147,7 +147,7 @@ struct x86_init_ops {
12935 */
12936 struct x86_cpuinit_ops {
12937 void (*setup_percpu_clockev)(void);
12938 -};
12939 +} __no_const;
12940
12941 /**
12942 * struct x86_platform_ops - platform specific runtime functions
12943 @@ -169,7 +169,7 @@ struct x86_platform_ops {
12944 void (*nmi_init)(void);
12945 unsigned char (*get_nmi_reason)(void);
12946 int (*i8042_detect)(void);
12947 -};
12948 +} __no_const;
12949
12950 struct pci_dev;
12951
12952 @@ -177,7 +177,7 @@ struct x86_msi_ops {
12953 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
12954 void (*teardown_msi_irq)(unsigned int irq);
12955 void (*teardown_msi_irqs)(struct pci_dev *dev);
12956 -};
12957 +} __no_const;
12958
12959 extern struct x86_init_ops x86_init;
12960 extern struct x86_cpuinit_ops x86_cpuinit;
12961 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
12962 index c6ce245..ffbdab7 100644
12963 --- a/arch/x86/include/asm/xsave.h
12964 +++ b/arch/x86/include/asm/xsave.h
12965 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
12966 {
12967 int err;
12968
12969 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12970 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
12971 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
12972 +#endif
12973 +
12974 /*
12975 * Clear the xsave header first, so that reserved fields are
12976 * initialized to zero.
12977 @@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
12978 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
12979 {
12980 int err;
12981 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
12982 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
12983 u32 lmask = mask;
12984 u32 hmask = mask >> 32;
12985
12986 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12987 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
12988 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
12989 +#endif
12990 +
12991 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
12992 "2:\n"
12993 ".section .fixup,\"ax\"\n"
12994 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
12995 index 6a564ac..9b1340c 100644
12996 --- a/arch/x86/kernel/acpi/realmode/Makefile
12997 +++ b/arch/x86/kernel/acpi/realmode/Makefile
12998 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
12999 $(call cc-option, -fno-stack-protector) \
13000 $(call cc-option, -mpreferred-stack-boundary=2)
13001 KBUILD_CFLAGS += $(call cc-option, -m32)
13002 +ifdef CONSTIFY_PLUGIN
13003 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13004 +endif
13005 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13006 GCOV_PROFILE := n
13007
13008 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13009 index b4fd836..4358fe3 100644
13010 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
13011 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13012 @@ -108,6 +108,9 @@ wakeup_code:
13013 /* Do any other stuff... */
13014
13015 #ifndef CONFIG_64BIT
13016 + /* Recheck NX bit overrides (64bit path does this in trampoline */
13017 + call verify_cpu
13018 +
13019 /* This could also be done in C code... */
13020 movl pmode_cr3, %eax
13021 movl %eax, %cr3
13022 @@ -131,6 +134,7 @@ wakeup_code:
13023 movl pmode_cr0, %eax
13024 movl %eax, %cr0
13025 jmp pmode_return
13026 +# include "../../verify_cpu.S"
13027 #else
13028 pushw $0
13029 pushw trampoline_segment
13030 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13031 index 103b6ab..2004d0a 100644
13032 --- a/arch/x86/kernel/acpi/sleep.c
13033 +++ b/arch/x86/kernel/acpi/sleep.c
13034 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
13035 header->trampoline_segment = trampoline_address() >> 4;
13036 #ifdef CONFIG_SMP
13037 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13038 +
13039 + pax_open_kernel();
13040 early_gdt_descr.address =
13041 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13042 + pax_close_kernel();
13043 +
13044 initial_gs = per_cpu_offset(smp_processor_id());
13045 #endif
13046 initial_code = (unsigned long)wakeup_long64;
13047 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13048 index 13ab720..95d5442 100644
13049 --- a/arch/x86/kernel/acpi/wakeup_32.S
13050 +++ b/arch/x86/kernel/acpi/wakeup_32.S
13051 @@ -30,13 +30,11 @@ wakeup_pmode_return:
13052 # and restore the stack ... but you need gdt for this to work
13053 movl saved_context_esp, %esp
13054
13055 - movl %cs:saved_magic, %eax
13056 - cmpl $0x12345678, %eax
13057 + cmpl $0x12345678, saved_magic
13058 jne bogus_magic
13059
13060 # jump to place where we left off
13061 - movl saved_eip, %eax
13062 - jmp *%eax
13063 + jmp *(saved_eip)
13064
13065 bogus_magic:
13066 jmp bogus_magic
13067 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13068 index 1f84794..e23f862 100644
13069 --- a/arch/x86/kernel/alternative.c
13070 +++ b/arch/x86/kernel/alternative.c
13071 @@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
13072 */
13073 for (a = start; a < end; a++) {
13074 instr = (u8 *)&a->instr_offset + a->instr_offset;
13075 +
13076 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13077 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13078 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
13079 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13080 +#endif
13081 +
13082 replacement = (u8 *)&a->repl_offset + a->repl_offset;
13083 BUG_ON(a->replacementlen > a->instrlen);
13084 BUG_ON(a->instrlen > sizeof(insnbuf));
13085 @@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
13086 for (poff = start; poff < end; poff++) {
13087 u8 *ptr = (u8 *)poff + *poff;
13088
13089 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13090 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13091 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13092 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13093 +#endif
13094 +
13095 if (!*poff || ptr < text || ptr >= text_end)
13096 continue;
13097 /* turn DS segment override prefix into lock prefix */
13098 - if (*ptr == 0x3e)
13099 + if (*ktla_ktva(ptr) == 0x3e)
13100 text_poke(ptr, ((unsigned char []){0xf0}), 1);
13101 };
13102 mutex_unlock(&text_mutex);
13103 @@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
13104 for (poff = start; poff < end; poff++) {
13105 u8 *ptr = (u8 *)poff + *poff;
13106
13107 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13108 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13109 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13110 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13111 +#endif
13112 +
13113 if (!*poff || ptr < text || ptr >= text_end)
13114 continue;
13115 /* turn lock prefix into DS segment override prefix */
13116 - if (*ptr == 0xf0)
13117 + if (*ktla_ktva(ptr) == 0xf0)
13118 text_poke(ptr, ((unsigned char []){0x3E}), 1);
13119 };
13120 mutex_unlock(&text_mutex);
13121 @@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13122
13123 BUG_ON(p->len > MAX_PATCH_LEN);
13124 /* prep the buffer with the original instructions */
13125 - memcpy(insnbuf, p->instr, p->len);
13126 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13127 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13128 (unsigned long)p->instr, p->len);
13129
13130 @@ -568,7 +587,7 @@ void __init alternative_instructions(void)
13131 if (smp_alt_once)
13132 free_init_pages("SMP alternatives",
13133 (unsigned long)__smp_locks,
13134 - (unsigned long)__smp_locks_end);
13135 + PAGE_ALIGN((unsigned long)__smp_locks_end));
13136
13137 restart_nmi();
13138 }
13139 @@ -585,13 +604,17 @@ void __init alternative_instructions(void)
13140 * instructions. And on the local CPU you need to be protected again NMI or MCE
13141 * handlers seeing an inconsistent instruction while you patch.
13142 */
13143 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
13144 +void *__kprobes text_poke_early(void *addr, const void *opcode,
13145 size_t len)
13146 {
13147 unsigned long flags;
13148 local_irq_save(flags);
13149 - memcpy(addr, opcode, len);
13150 +
13151 + pax_open_kernel();
13152 + memcpy(ktla_ktva(addr), opcode, len);
13153 sync_core();
13154 + pax_close_kernel();
13155 +
13156 local_irq_restore(flags);
13157 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13158 that causes hangs on some VIA CPUs. */
13159 @@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
13160 */
13161 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13162 {
13163 - unsigned long flags;
13164 - char *vaddr;
13165 + unsigned char *vaddr = ktla_ktva(addr);
13166 struct page *pages[2];
13167 - int i;
13168 + size_t i;
13169
13170 if (!core_kernel_text((unsigned long)addr)) {
13171 - pages[0] = vmalloc_to_page(addr);
13172 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13173 + pages[0] = vmalloc_to_page(vaddr);
13174 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13175 } else {
13176 - pages[0] = virt_to_page(addr);
13177 + pages[0] = virt_to_page(vaddr);
13178 WARN_ON(!PageReserved(pages[0]));
13179 - pages[1] = virt_to_page(addr + PAGE_SIZE);
13180 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13181 }
13182 BUG_ON(!pages[0]);
13183 - local_irq_save(flags);
13184 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13185 - if (pages[1])
13186 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13187 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13188 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13189 - clear_fixmap(FIX_TEXT_POKE0);
13190 - if (pages[1])
13191 - clear_fixmap(FIX_TEXT_POKE1);
13192 - local_flush_tlb();
13193 - sync_core();
13194 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
13195 - that causes hangs on some VIA CPUs. */
13196 + text_poke_early(addr, opcode, len);
13197 for (i = 0; i < len; i++)
13198 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13199 - local_irq_restore(flags);
13200 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13201 return addr;
13202 }
13203
13204 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13205 index f98d84c..e402a69 100644
13206 --- a/arch/x86/kernel/apic/apic.c
13207 +++ b/arch/x86/kernel/apic/apic.c
13208 @@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
13209 /*
13210 * Debug level, exported for io_apic.c
13211 */
13212 -unsigned int apic_verbosity;
13213 +int apic_verbosity;
13214
13215 int pic_mode;
13216
13217 @@ -1853,7 +1853,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13218 apic_write(APIC_ESR, 0);
13219 v1 = apic_read(APIC_ESR);
13220 ack_APIC_irq();
13221 - atomic_inc(&irq_err_count);
13222 + atomic_inc_unchecked(&irq_err_count);
13223
13224 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
13225 smp_processor_id(), v0 , v1);
13226 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13227 index 6d939d7..0697fcc 100644
13228 --- a/arch/x86/kernel/apic/io_apic.c
13229 +++ b/arch/x86/kernel/apic/io_apic.c
13230 @@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13231 }
13232 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13233
13234 -void lock_vector_lock(void)
13235 +void lock_vector_lock(void) __acquires(vector_lock)
13236 {
13237 /* Used to the online set of cpus does not change
13238 * during assign_irq_vector.
13239 @@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
13240 raw_spin_lock(&vector_lock);
13241 }
13242
13243 -void unlock_vector_lock(void)
13244 +void unlock_vector_lock(void) __releases(vector_lock)
13245 {
13246 raw_spin_unlock(&vector_lock);
13247 }
13248 @@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
13249 ack_APIC_irq();
13250 }
13251
13252 -atomic_t irq_mis_count;
13253 +atomic_unchecked_t irq_mis_count;
13254
13255 static void ack_apic_level(struct irq_data *data)
13256 {
13257 @@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
13258 * at the cpu.
13259 */
13260 if (!(v & (1 << (i & 0x1f)))) {
13261 - atomic_inc(&irq_mis_count);
13262 + atomic_inc_unchecked(&irq_mis_count);
13263
13264 eoi_ioapic_irq(irq, cfg);
13265 }
13266 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13267 index a46bd38..6b906d7 100644
13268 --- a/arch/x86/kernel/apm_32.c
13269 +++ b/arch/x86/kernel/apm_32.c
13270 @@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
13271 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13272 * even though they are called in protected mode.
13273 */
13274 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13275 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13276 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13277
13278 static const char driver_version[] = "1.16ac"; /* no spaces */
13279 @@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
13280 BUG_ON(cpu != 0);
13281 gdt = get_cpu_gdt_table(cpu);
13282 save_desc_40 = gdt[0x40 / 8];
13283 +
13284 + pax_open_kernel();
13285 gdt[0x40 / 8] = bad_bios_desc;
13286 + pax_close_kernel();
13287
13288 apm_irq_save(flags);
13289 APM_DO_SAVE_SEGS;
13290 @@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
13291 &call->esi);
13292 APM_DO_RESTORE_SEGS;
13293 apm_irq_restore(flags);
13294 +
13295 + pax_open_kernel();
13296 gdt[0x40 / 8] = save_desc_40;
13297 + pax_close_kernel();
13298 +
13299 put_cpu();
13300
13301 return call->eax & 0xff;
13302 @@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
13303 BUG_ON(cpu != 0);
13304 gdt = get_cpu_gdt_table(cpu);
13305 save_desc_40 = gdt[0x40 / 8];
13306 +
13307 + pax_open_kernel();
13308 gdt[0x40 / 8] = bad_bios_desc;
13309 + pax_close_kernel();
13310
13311 apm_irq_save(flags);
13312 APM_DO_SAVE_SEGS;
13313 @@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
13314 &call->eax);
13315 APM_DO_RESTORE_SEGS;
13316 apm_irq_restore(flags);
13317 +
13318 + pax_open_kernel();
13319 gdt[0x40 / 8] = save_desc_40;
13320 + pax_close_kernel();
13321 +
13322 put_cpu();
13323 return error;
13324 }
13325 @@ -2347,12 +2361,15 @@ static int __init apm_init(void)
13326 * code to that CPU.
13327 */
13328 gdt = get_cpu_gdt_table(0);
13329 +
13330 + pax_open_kernel();
13331 set_desc_base(&gdt[APM_CS >> 3],
13332 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13333 set_desc_base(&gdt[APM_CS_16 >> 3],
13334 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13335 set_desc_base(&gdt[APM_DS >> 3],
13336 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13337 + pax_close_kernel();
13338
13339 proc_create("apm", 0, NULL, &apm_file_ops);
13340
13341 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
13342 index 4f13faf..87db5d2 100644
13343 --- a/arch/x86/kernel/asm-offsets.c
13344 +++ b/arch/x86/kernel/asm-offsets.c
13345 @@ -33,6 +33,8 @@ void common(void) {
13346 OFFSET(TI_status, thread_info, status);
13347 OFFSET(TI_addr_limit, thread_info, addr_limit);
13348 OFFSET(TI_preempt_count, thread_info, preempt_count);
13349 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13350 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13351
13352 BLANK();
13353 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
13354 @@ -53,8 +55,26 @@ void common(void) {
13355 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13356 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13357 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13358 +
13359 +#ifdef CONFIG_PAX_KERNEXEC
13360 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13361 #endif
13362
13363 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13364 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13365 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13366 +#ifdef CONFIG_X86_64
13367 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13368 +#endif
13369 +#endif
13370 +
13371 +#endif
13372 +
13373 + BLANK();
13374 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13375 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13376 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13377 +
13378 #ifdef CONFIG_XEN
13379 BLANK();
13380 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13381 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13382 index e72a119..6e2955d 100644
13383 --- a/arch/x86/kernel/asm-offsets_64.c
13384 +++ b/arch/x86/kernel/asm-offsets_64.c
13385 @@ -69,6 +69,7 @@ int main(void)
13386 BLANK();
13387 #undef ENTRY
13388
13389 + DEFINE(TSS_size, sizeof(struct tss_struct));
13390 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
13391 BLANK();
13392
13393 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
13394 index 25f24dc..4094a7f 100644
13395 --- a/arch/x86/kernel/cpu/Makefile
13396 +++ b/arch/x86/kernel/cpu/Makefile
13397 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
13398 CFLAGS_REMOVE_perf_event.o = -pg
13399 endif
13400
13401 -# Make sure load_percpu_segment has no stackprotector
13402 -nostackp := $(call cc-option, -fno-stack-protector)
13403 -CFLAGS_common.o := $(nostackp)
13404 -
13405 obj-y := intel_cacheinfo.o scattered.o topology.o
13406 obj-y += proc.o capflags.o powerflags.o common.o
13407 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
13408 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
13409 index 0bab2b1..d0a1bf8 100644
13410 --- a/arch/x86/kernel/cpu/amd.c
13411 +++ b/arch/x86/kernel/cpu/amd.c
13412 @@ -664,7 +664,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
13413 unsigned int size)
13414 {
13415 /* AMD errata T13 (order #21922) */
13416 - if ((c->x86 == 6)) {
13417 + if (c->x86 == 6) {
13418 /* Duron Rev A0 */
13419 if (c->x86_model == 3 && c->x86_mask == 0)
13420 size = 64;
13421 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
13422 index aa003b1..47ea638 100644
13423 --- a/arch/x86/kernel/cpu/common.c
13424 +++ b/arch/x86/kernel/cpu/common.c
13425 @@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
13426
13427 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
13428
13429 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
13430 -#ifdef CONFIG_X86_64
13431 - /*
13432 - * We need valid kernel segments for data and code in long mode too
13433 - * IRET will check the segment types kkeil 2000/10/28
13434 - * Also sysret mandates a special GDT layout
13435 - *
13436 - * TLS descriptors are currently at a different place compared to i386.
13437 - * Hopefully nobody expects them at a fixed place (Wine?)
13438 - */
13439 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
13440 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
13441 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
13442 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
13443 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
13444 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
13445 -#else
13446 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
13447 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13448 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
13449 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
13450 - /*
13451 - * Segments used for calling PnP BIOS have byte granularity.
13452 - * They code segments and data segments have fixed 64k limits,
13453 - * the transfer segment sizes are set at run time.
13454 - */
13455 - /* 32-bit code */
13456 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13457 - /* 16-bit code */
13458 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13459 - /* 16-bit data */
13460 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
13461 - /* 16-bit data */
13462 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
13463 - /* 16-bit data */
13464 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
13465 - /*
13466 - * The APM segments have byte granularity and their bases
13467 - * are set at run time. All have 64k limits.
13468 - */
13469 - /* 32-bit code */
13470 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13471 - /* 16-bit code */
13472 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13473 - /* data */
13474 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
13475 -
13476 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13477 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13478 - GDT_STACK_CANARY_INIT
13479 -#endif
13480 -} };
13481 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
13482 -
13483 static int __init x86_xsave_setup(char *s)
13484 {
13485 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
13486 @@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
13487 {
13488 struct desc_ptr gdt_descr;
13489
13490 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
13491 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13492 gdt_descr.size = GDT_SIZE - 1;
13493 load_gdt(&gdt_descr);
13494 /* Reload the per-cpu base */
13495 @@ -844,6 +790,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
13496 /* Filter out anything that depends on CPUID levels we don't have */
13497 filter_cpuid_features(c, true);
13498
13499 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
13500 + setup_clear_cpu_cap(X86_FEATURE_SEP);
13501 +#endif
13502 +
13503 /* If the model name is still unset, do table lookup. */
13504 if (!c->x86_model_id[0]) {
13505 const char *p;
13506 @@ -1024,6 +974,9 @@ static __init int setup_disablecpuid(char *arg)
13507 }
13508 __setup("clearcpuid=", setup_disablecpuid);
13509
13510 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
13511 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
13512 +
13513 #ifdef CONFIG_X86_64
13514 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
13515
13516 @@ -1039,7 +992,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
13517 EXPORT_PER_CPU_SYMBOL(current_task);
13518
13519 DEFINE_PER_CPU(unsigned long, kernel_stack) =
13520 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
13521 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
13522 EXPORT_PER_CPU_SYMBOL(kernel_stack);
13523
13524 DEFINE_PER_CPU(char *, irq_stack_ptr) =
13525 @@ -1104,7 +1057,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
13526 {
13527 memset(regs, 0, sizeof(struct pt_regs));
13528 regs->fs = __KERNEL_PERCPU;
13529 - regs->gs = __KERNEL_STACK_CANARY;
13530 + savesegment(gs, regs->gs);
13531
13532 return regs;
13533 }
13534 @@ -1159,7 +1112,7 @@ void __cpuinit cpu_init(void)
13535 int i;
13536
13537 cpu = stack_smp_processor_id();
13538 - t = &per_cpu(init_tss, cpu);
13539 + t = init_tss + cpu;
13540 oist = &per_cpu(orig_ist, cpu);
13541
13542 #ifdef CONFIG_NUMA
13543 @@ -1185,7 +1138,7 @@ void __cpuinit cpu_init(void)
13544 switch_to_new_gdt(cpu);
13545 loadsegment(fs, 0);
13546
13547 - load_idt((const struct desc_ptr *)&idt_descr);
13548 + load_idt(&idt_descr);
13549
13550 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
13551 syscall_init();
13552 @@ -1194,7 +1147,6 @@ void __cpuinit cpu_init(void)
13553 wrmsrl(MSR_KERNEL_GS_BASE, 0);
13554 barrier();
13555
13556 - x86_configure_nx();
13557 if (cpu != 0)
13558 enable_x2apic();
13559
13560 @@ -1248,7 +1200,7 @@ void __cpuinit cpu_init(void)
13561 {
13562 int cpu = smp_processor_id();
13563 struct task_struct *curr = current;
13564 - struct tss_struct *t = &per_cpu(init_tss, cpu);
13565 + struct tss_struct *t = init_tss + cpu;
13566 struct thread_struct *thread = &curr->thread;
13567
13568 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
13569 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
13570 index 5231312..a78a987 100644
13571 --- a/arch/x86/kernel/cpu/intel.c
13572 +++ b/arch/x86/kernel/cpu/intel.c
13573 @@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
13574 * Update the IDT descriptor and reload the IDT so that
13575 * it uses the read-only mapped virtual address.
13576 */
13577 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
13578 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
13579 load_idt(&idt_descr);
13580 }
13581 #endif
13582 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
13583 index 2af127d..8ff7ac0 100644
13584 --- a/arch/x86/kernel/cpu/mcheck/mce.c
13585 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
13586 @@ -42,6 +42,7 @@
13587 #include <asm/processor.h>
13588 #include <asm/mce.h>
13589 #include <asm/msr.h>
13590 +#include <asm/local.h>
13591
13592 #include "mce-internal.h"
13593
13594 @@ -202,7 +203,7 @@ static void print_mce(struct mce *m)
13595 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
13596 m->cs, m->ip);
13597
13598 - if (m->cs == __KERNEL_CS)
13599 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
13600 print_symbol("{%s}", m->ip);
13601 pr_cont("\n");
13602 }
13603 @@ -235,10 +236,10 @@ static void print_mce(struct mce *m)
13604
13605 #define PANIC_TIMEOUT 5 /* 5 seconds */
13606
13607 -static atomic_t mce_paniced;
13608 +static atomic_unchecked_t mce_paniced;
13609
13610 static int fake_panic;
13611 -static atomic_t mce_fake_paniced;
13612 +static atomic_unchecked_t mce_fake_paniced;
13613
13614 /* Panic in progress. Enable interrupts and wait for final IPI */
13615 static void wait_for_panic(void)
13616 @@ -262,7 +263,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13617 /*
13618 * Make sure only one CPU runs in machine check panic
13619 */
13620 - if (atomic_inc_return(&mce_paniced) > 1)
13621 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
13622 wait_for_panic();
13623 barrier();
13624
13625 @@ -270,7 +271,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13626 console_verbose();
13627 } else {
13628 /* Don't log too much for fake panic */
13629 - if (atomic_inc_return(&mce_fake_paniced) > 1)
13630 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
13631 return;
13632 }
13633 /* First print corrected ones that are still unlogged */
13634 @@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
13635 * might have been modified by someone else.
13636 */
13637 rmb();
13638 - if (atomic_read(&mce_paniced))
13639 + if (atomic_read_unchecked(&mce_paniced))
13640 wait_for_panic();
13641 if (!monarch_timeout)
13642 goto out;
13643 @@ -1398,7 +1399,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
13644 }
13645
13646 /* Call the installed machine check handler for this CPU setup. */
13647 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
13648 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
13649 unexpected_machine_check;
13650
13651 /*
13652 @@ -1421,7 +1422,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
13653 return;
13654 }
13655
13656 + pax_open_kernel();
13657 machine_check_vector = do_machine_check;
13658 + pax_close_kernel();
13659
13660 __mcheck_cpu_init_generic();
13661 __mcheck_cpu_init_vendor(c);
13662 @@ -1435,7 +1438,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
13663 */
13664
13665 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
13666 -static int mce_chrdev_open_count; /* #times opened */
13667 +static local_t mce_chrdev_open_count; /* #times opened */
13668 static int mce_chrdev_open_exclu; /* already open exclusive? */
13669
13670 static int mce_chrdev_open(struct inode *inode, struct file *file)
13671 @@ -1443,7 +1446,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
13672 spin_lock(&mce_chrdev_state_lock);
13673
13674 if (mce_chrdev_open_exclu ||
13675 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
13676 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
13677 spin_unlock(&mce_chrdev_state_lock);
13678
13679 return -EBUSY;
13680 @@ -1451,7 +1454,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
13681
13682 if (file->f_flags & O_EXCL)
13683 mce_chrdev_open_exclu = 1;
13684 - mce_chrdev_open_count++;
13685 + local_inc(&mce_chrdev_open_count);
13686
13687 spin_unlock(&mce_chrdev_state_lock);
13688
13689 @@ -1462,7 +1465,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
13690 {
13691 spin_lock(&mce_chrdev_state_lock);
13692
13693 - mce_chrdev_open_count--;
13694 + local_dec(&mce_chrdev_open_count);
13695 mce_chrdev_open_exclu = 0;
13696
13697 spin_unlock(&mce_chrdev_state_lock);
13698 @@ -2171,7 +2174,7 @@ struct dentry *mce_get_debugfs_dir(void)
13699 static void mce_reset(void)
13700 {
13701 cpu_missing = 0;
13702 - atomic_set(&mce_fake_paniced, 0);
13703 + atomic_set_unchecked(&mce_fake_paniced, 0);
13704 atomic_set(&mce_executing, 0);
13705 atomic_set(&mce_callin, 0);
13706 atomic_set(&global_nwo, 0);
13707 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
13708 index 5c0e653..0882b0a 100644
13709 --- a/arch/x86/kernel/cpu/mcheck/p5.c
13710 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
13711 @@ -12,6 +12,7 @@
13712 #include <asm/system.h>
13713 #include <asm/mce.h>
13714 #include <asm/msr.h>
13715 +#include <asm/pgtable.h>
13716
13717 /* By default disabled */
13718 int mce_p5_enabled __read_mostly;
13719 @@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
13720 if (!cpu_has(c, X86_FEATURE_MCE))
13721 return;
13722
13723 + pax_open_kernel();
13724 machine_check_vector = pentium_machine_check;
13725 + pax_close_kernel();
13726 /* Make sure the vector pointer is visible before we enable MCEs: */
13727 wmb();
13728
13729 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
13730 index 54060f5..c1a7577 100644
13731 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
13732 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
13733 @@ -11,6 +11,7 @@
13734 #include <asm/system.h>
13735 #include <asm/mce.h>
13736 #include <asm/msr.h>
13737 +#include <asm/pgtable.h>
13738
13739 /* Machine check handler for WinChip C6: */
13740 static void winchip_machine_check(struct pt_regs *regs, long error_code)
13741 @@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
13742 {
13743 u32 lo, hi;
13744
13745 + pax_open_kernel();
13746 machine_check_vector = winchip_machine_check;
13747 + pax_close_kernel();
13748 /* Make sure the vector pointer is visible before we enable MCEs: */
13749 wmb();
13750
13751 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
13752 index 6b96110..0da73eb 100644
13753 --- a/arch/x86/kernel/cpu/mtrr/main.c
13754 +++ b/arch/x86/kernel/cpu/mtrr/main.c
13755 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
13756 u64 size_or_mask, size_and_mask;
13757 static bool mtrr_aps_delayed_init;
13758
13759 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
13760 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
13761
13762 const struct mtrr_ops *mtrr_if;
13763
13764 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
13765 index df5e41f..816c719 100644
13766 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
13767 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
13768 @@ -25,7 +25,7 @@ struct mtrr_ops {
13769 int (*validate_add_page)(unsigned long base, unsigned long size,
13770 unsigned int type);
13771 int (*have_wrcomb)(void);
13772 -};
13773 +} __do_const;
13774
13775 extern int generic_get_free_region(unsigned long base, unsigned long size,
13776 int replace_reg);
13777 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
13778 index 2bda212..78cc605 100644
13779 --- a/arch/x86/kernel/cpu/perf_event.c
13780 +++ b/arch/x86/kernel/cpu/perf_event.c
13781 @@ -1529,7 +1529,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
13782 break;
13783
13784 perf_callchain_store(entry, frame.return_address);
13785 - fp = frame.next_frame;
13786 + fp = (const void __force_user *)frame.next_frame;
13787 }
13788 }
13789
13790 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
13791 index 13ad899..f642b9a 100644
13792 --- a/arch/x86/kernel/crash.c
13793 +++ b/arch/x86/kernel/crash.c
13794 @@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
13795 {
13796 #ifdef CONFIG_X86_32
13797 struct pt_regs fixed_regs;
13798 -#endif
13799
13800 -#ifdef CONFIG_X86_32
13801 - if (!user_mode_vm(regs)) {
13802 + if (!user_mode(regs)) {
13803 crash_fixup_ss_esp(&fixed_regs, regs);
13804 regs = &fixed_regs;
13805 }
13806 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
13807 index 37250fe..bf2ec74 100644
13808 --- a/arch/x86/kernel/doublefault_32.c
13809 +++ b/arch/x86/kernel/doublefault_32.c
13810 @@ -11,7 +11,7 @@
13811
13812 #define DOUBLEFAULT_STACKSIZE (1024)
13813 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
13814 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
13815 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
13816
13817 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
13818
13819 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
13820 unsigned long gdt, tss;
13821
13822 store_gdt(&gdt_desc);
13823 - gdt = gdt_desc.address;
13824 + gdt = (unsigned long)gdt_desc.address;
13825
13826 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
13827
13828 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
13829 /* 0x2 bit is always set */
13830 .flags = X86_EFLAGS_SF | 0x2,
13831 .sp = STACK_START,
13832 - .es = __USER_DS,
13833 + .es = __KERNEL_DS,
13834 .cs = __KERNEL_CS,
13835 .ss = __KERNEL_DS,
13836 - .ds = __USER_DS,
13837 + .ds = __KERNEL_DS,
13838 .fs = __KERNEL_PERCPU,
13839
13840 .__cr3 = __pa_nodebug(swapper_pg_dir),
13841 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
13842 index 1aae78f..aab3a3d 100644
13843 --- a/arch/x86/kernel/dumpstack.c
13844 +++ b/arch/x86/kernel/dumpstack.c
13845 @@ -2,6 +2,9 @@
13846 * Copyright (C) 1991, 1992 Linus Torvalds
13847 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
13848 */
13849 +#ifdef CONFIG_GRKERNSEC_HIDESYM
13850 +#define __INCLUDED_BY_HIDESYM 1
13851 +#endif
13852 #include <linux/kallsyms.h>
13853 #include <linux/kprobes.h>
13854 #include <linux/uaccess.h>
13855 @@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
13856 static void
13857 print_ftrace_graph_addr(unsigned long addr, void *data,
13858 const struct stacktrace_ops *ops,
13859 - struct thread_info *tinfo, int *graph)
13860 + struct task_struct *task, int *graph)
13861 {
13862 - struct task_struct *task = tinfo->task;
13863 unsigned long ret_addr;
13864 int index = task->curr_ret_stack;
13865
13866 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
13867 static inline void
13868 print_ftrace_graph_addr(unsigned long addr, void *data,
13869 const struct stacktrace_ops *ops,
13870 - struct thread_info *tinfo, int *graph)
13871 + struct task_struct *task, int *graph)
13872 { }
13873 #endif
13874
13875 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
13876 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
13877 */
13878
13879 -static inline int valid_stack_ptr(struct thread_info *tinfo,
13880 - void *p, unsigned int size, void *end)
13881 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
13882 {
13883 - void *t = tinfo;
13884 if (end) {
13885 if (p < end && p >= (end-THREAD_SIZE))
13886 return 1;
13887 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
13888 }
13889
13890 unsigned long
13891 -print_context_stack(struct thread_info *tinfo,
13892 +print_context_stack(struct task_struct *task, void *stack_start,
13893 unsigned long *stack, unsigned long bp,
13894 const struct stacktrace_ops *ops, void *data,
13895 unsigned long *end, int *graph)
13896 {
13897 struct stack_frame *frame = (struct stack_frame *)bp;
13898
13899 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
13900 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
13901 unsigned long addr;
13902
13903 addr = *stack;
13904 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
13905 } else {
13906 ops->address(data, addr, 0);
13907 }
13908 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
13909 + print_ftrace_graph_addr(addr, data, ops, task, graph);
13910 }
13911 stack++;
13912 }
13913 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
13914 EXPORT_SYMBOL_GPL(print_context_stack);
13915
13916 unsigned long
13917 -print_context_stack_bp(struct thread_info *tinfo,
13918 +print_context_stack_bp(struct task_struct *task, void *stack_start,
13919 unsigned long *stack, unsigned long bp,
13920 const struct stacktrace_ops *ops, void *data,
13921 unsigned long *end, int *graph)
13922 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
13923 struct stack_frame *frame = (struct stack_frame *)bp;
13924 unsigned long *ret_addr = &frame->return_address;
13925
13926 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
13927 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
13928 unsigned long addr = *ret_addr;
13929
13930 if (!__kernel_text_address(addr))
13931 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
13932 ops->address(data, addr, 1);
13933 frame = frame->next_frame;
13934 ret_addr = &frame->return_address;
13935 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
13936 + print_ftrace_graph_addr(addr, data, ops, task, graph);
13937 }
13938
13939 return (unsigned long)frame;
13940 @@ -186,7 +186,7 @@ void dump_stack(void)
13941
13942 bp = stack_frame(current, NULL);
13943 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
13944 - current->pid, current->comm, print_tainted(),
13945 + task_pid_nr(current), current->comm, print_tainted(),
13946 init_utsname()->release,
13947 (int)strcspn(init_utsname()->version, " "),
13948 init_utsname()->version);
13949 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
13950 }
13951 EXPORT_SYMBOL_GPL(oops_begin);
13952
13953 +extern void gr_handle_kernel_exploit(void);
13954 +
13955 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
13956 {
13957 if (regs && kexec_should_crash(current))
13958 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
13959 panic("Fatal exception in interrupt");
13960 if (panic_on_oops)
13961 panic("Fatal exception");
13962 - do_exit(signr);
13963 +
13964 + gr_handle_kernel_exploit();
13965 +
13966 + do_group_exit(signr);
13967 }
13968
13969 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
13970 @@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
13971
13972 show_registers(regs);
13973 #ifdef CONFIG_X86_32
13974 - if (user_mode_vm(regs)) {
13975 + if (user_mode(regs)) {
13976 sp = regs->sp;
13977 ss = regs->ss & 0xffff;
13978 } else {
13979 @@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err)
13980 unsigned long flags = oops_begin();
13981 int sig = SIGSEGV;
13982
13983 - if (!user_mode_vm(regs))
13984 + if (!user_mode(regs))
13985 report_bug(regs->ip, regs);
13986
13987 if (__die(str, regs, err))
13988 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
13989 index c99f9ed..2a15d80 100644
13990 --- a/arch/x86/kernel/dumpstack_32.c
13991 +++ b/arch/x86/kernel/dumpstack_32.c
13992 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
13993 bp = stack_frame(task, regs);
13994
13995 for (;;) {
13996 - struct thread_info *context;
13997 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
13998
13999 - context = (struct thread_info *)
14000 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14001 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
14002 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14003
14004 - stack = (unsigned long *)context->previous_esp;
14005 - if (!stack)
14006 + if (stack_start == task_stack_page(task))
14007 break;
14008 + stack = *(unsigned long **)stack_start;
14009 if (ops->stack(data, "IRQ") < 0)
14010 break;
14011 touch_nmi_watchdog();
14012 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
14013 * When in-kernel, we also print out the stack and code at the
14014 * time of the fault..
14015 */
14016 - if (!user_mode_vm(regs)) {
14017 + if (!user_mode(regs)) {
14018 unsigned int code_prologue = code_bytes * 43 / 64;
14019 unsigned int code_len = code_bytes;
14020 unsigned char c;
14021 u8 *ip;
14022 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14023
14024 printk(KERN_EMERG "Stack:\n");
14025 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
14026
14027 printk(KERN_EMERG "Code: ");
14028
14029 - ip = (u8 *)regs->ip - code_prologue;
14030 + ip = (u8 *)regs->ip - code_prologue + cs_base;
14031 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14032 /* try starting at IP */
14033 - ip = (u8 *)regs->ip;
14034 + ip = (u8 *)regs->ip + cs_base;
14035 code_len = code_len - code_prologue + 1;
14036 }
14037 for (i = 0; i < code_len; i++, ip++) {
14038 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
14039 printk(KERN_CONT " Bad EIP value.");
14040 break;
14041 }
14042 - if (ip == (u8 *)regs->ip)
14043 + if (ip == (u8 *)regs->ip + cs_base)
14044 printk(KERN_CONT "<%02x> ", c);
14045 else
14046 printk(KERN_CONT "%02x ", c);
14047 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
14048 {
14049 unsigned short ud2;
14050
14051 + ip = ktla_ktva(ip);
14052 if (ip < PAGE_OFFSET)
14053 return 0;
14054 if (probe_kernel_address((unsigned short *)ip, ud2))
14055 @@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
14056
14057 return ud2 == 0x0b0f;
14058 }
14059 +
14060 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14061 +void pax_check_alloca(unsigned long size)
14062 +{
14063 + unsigned long sp = (unsigned long)&sp, stack_left;
14064 +
14065 + /* all kernel stacks are of the same size */
14066 + stack_left = sp & (THREAD_SIZE - 1);
14067 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14068 +}
14069 +EXPORT_SYMBOL(pax_check_alloca);
14070 +#endif
14071 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14072 index 6d728d9..279514e 100644
14073 --- a/arch/x86/kernel/dumpstack_64.c
14074 +++ b/arch/x86/kernel/dumpstack_64.c
14075 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14076 unsigned long *irq_stack_end =
14077 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14078 unsigned used = 0;
14079 - struct thread_info *tinfo;
14080 int graph = 0;
14081 unsigned long dummy;
14082 + void *stack_start;
14083
14084 if (!task)
14085 task = current;
14086 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14087 * current stack address. If the stacks consist of nested
14088 * exceptions
14089 */
14090 - tinfo = task_thread_info(task);
14091 for (;;) {
14092 char *id;
14093 unsigned long *estack_end;
14094 +
14095 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14096 &used, &id);
14097
14098 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14099 if (ops->stack(data, id) < 0)
14100 break;
14101
14102 - bp = ops->walk_stack(tinfo, stack, bp, ops,
14103 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14104 data, estack_end, &graph);
14105 ops->stack(data, "<EOE>");
14106 /*
14107 @@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14108 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
14109 if (ops->stack(data, "IRQ") < 0)
14110 break;
14111 - bp = ops->walk_stack(tinfo, stack, bp,
14112 + bp = ops->walk_stack(task, irq_stack, stack, bp,
14113 ops, data, irq_stack_end, &graph);
14114 /*
14115 * We link to the next stack (which would be
14116 @@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14117 /*
14118 * This handles the process stack:
14119 */
14120 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14121 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14122 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14123 put_cpu();
14124 }
14125 EXPORT_SYMBOL(dump_trace);
14126 @@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
14127
14128 return ud2 == 0x0b0f;
14129 }
14130 +
14131 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14132 +void pax_check_alloca(unsigned long size)
14133 +{
14134 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14135 + unsigned cpu, used;
14136 + char *id;
14137 +
14138 + /* check the process stack first */
14139 + stack_start = (unsigned long)task_stack_page(current);
14140 + stack_end = stack_start + THREAD_SIZE;
14141 + if (likely(stack_start <= sp && sp < stack_end)) {
14142 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
14143 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14144 + return;
14145 + }
14146 +
14147 + cpu = get_cpu();
14148 +
14149 + /* check the irq stacks */
14150 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14151 + stack_start = stack_end - IRQ_STACK_SIZE;
14152 + if (stack_start <= sp && sp < stack_end) {
14153 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14154 + put_cpu();
14155 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14156 + return;
14157 + }
14158 +
14159 + /* check the exception stacks */
14160 + used = 0;
14161 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14162 + stack_start = stack_end - EXCEPTION_STKSZ;
14163 + if (stack_end && stack_start <= sp && sp < stack_end) {
14164 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14165 + put_cpu();
14166 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14167 + return;
14168 + }
14169 +
14170 + put_cpu();
14171 +
14172 + /* unknown stack */
14173 + BUG();
14174 +}
14175 +EXPORT_SYMBOL(pax_check_alloca);
14176 +#endif
14177 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14178 index cd28a35..c72ed9a 100644
14179 --- a/arch/x86/kernel/early_printk.c
14180 +++ b/arch/x86/kernel/early_printk.c
14181 @@ -7,6 +7,7 @@
14182 #include <linux/pci_regs.h>
14183 #include <linux/pci_ids.h>
14184 #include <linux/errno.h>
14185 +#include <linux/sched.h>
14186 #include <asm/io.h>
14187 #include <asm/processor.h>
14188 #include <asm/fcntl.h>
14189 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14190 index f3f6f53..0841b66 100644
14191 --- a/arch/x86/kernel/entry_32.S
14192 +++ b/arch/x86/kernel/entry_32.S
14193 @@ -186,13 +186,146 @@
14194 /*CFI_REL_OFFSET gs, PT_GS*/
14195 .endm
14196 .macro SET_KERNEL_GS reg
14197 +
14198 +#ifdef CONFIG_CC_STACKPROTECTOR
14199 movl $(__KERNEL_STACK_CANARY), \reg
14200 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14201 + movl $(__USER_DS), \reg
14202 +#else
14203 + xorl \reg, \reg
14204 +#endif
14205 +
14206 movl \reg, %gs
14207 .endm
14208
14209 #endif /* CONFIG_X86_32_LAZY_GS */
14210
14211 -.macro SAVE_ALL
14212 +.macro pax_enter_kernel
14213 +#ifdef CONFIG_PAX_KERNEXEC
14214 + call pax_enter_kernel
14215 +#endif
14216 +.endm
14217 +
14218 +.macro pax_exit_kernel
14219 +#ifdef CONFIG_PAX_KERNEXEC
14220 + call pax_exit_kernel
14221 +#endif
14222 +.endm
14223 +
14224 +#ifdef CONFIG_PAX_KERNEXEC
14225 +ENTRY(pax_enter_kernel)
14226 +#ifdef CONFIG_PARAVIRT
14227 + pushl %eax
14228 + pushl %ecx
14229 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14230 + mov %eax, %esi
14231 +#else
14232 + mov %cr0, %esi
14233 +#endif
14234 + bts $16, %esi
14235 + jnc 1f
14236 + mov %cs, %esi
14237 + cmp $__KERNEL_CS, %esi
14238 + jz 3f
14239 + ljmp $__KERNEL_CS, $3f
14240 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14241 +2:
14242 +#ifdef CONFIG_PARAVIRT
14243 + mov %esi, %eax
14244 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14245 +#else
14246 + mov %esi, %cr0
14247 +#endif
14248 +3:
14249 +#ifdef CONFIG_PARAVIRT
14250 + popl %ecx
14251 + popl %eax
14252 +#endif
14253 + ret
14254 +ENDPROC(pax_enter_kernel)
14255 +
14256 +ENTRY(pax_exit_kernel)
14257 +#ifdef CONFIG_PARAVIRT
14258 + pushl %eax
14259 + pushl %ecx
14260 +#endif
14261 + mov %cs, %esi
14262 + cmp $__KERNEXEC_KERNEL_CS, %esi
14263 + jnz 2f
14264 +#ifdef CONFIG_PARAVIRT
14265 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
14266 + mov %eax, %esi
14267 +#else
14268 + mov %cr0, %esi
14269 +#endif
14270 + btr $16, %esi
14271 + ljmp $__KERNEL_CS, $1f
14272 +1:
14273 +#ifdef CONFIG_PARAVIRT
14274 + mov %esi, %eax
14275 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
14276 +#else
14277 + mov %esi, %cr0
14278 +#endif
14279 +2:
14280 +#ifdef CONFIG_PARAVIRT
14281 + popl %ecx
14282 + popl %eax
14283 +#endif
14284 + ret
14285 +ENDPROC(pax_exit_kernel)
14286 +#endif
14287 +
14288 +.macro pax_erase_kstack
14289 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14290 + call pax_erase_kstack
14291 +#endif
14292 +.endm
14293 +
14294 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14295 +/*
14296 + * ebp: thread_info
14297 + * ecx, edx: can be clobbered
14298 + */
14299 +ENTRY(pax_erase_kstack)
14300 + pushl %edi
14301 + pushl %eax
14302 +
14303 + mov TI_lowest_stack(%ebp), %edi
14304 + mov $-0xBEEF, %eax
14305 + std
14306 +
14307 +1: mov %edi, %ecx
14308 + and $THREAD_SIZE_asm - 1, %ecx
14309 + shr $2, %ecx
14310 + repne scasl
14311 + jecxz 2f
14312 +
14313 + cmp $2*16, %ecx
14314 + jc 2f
14315 +
14316 + mov $2*16, %ecx
14317 + repe scasl
14318 + jecxz 2f
14319 + jne 1b
14320 +
14321 +2: cld
14322 + mov %esp, %ecx
14323 + sub %edi, %ecx
14324 + shr $2, %ecx
14325 + rep stosl
14326 +
14327 + mov TI_task_thread_sp0(%ebp), %edi
14328 + sub $128, %edi
14329 + mov %edi, TI_lowest_stack(%ebp)
14330 +
14331 + popl %eax
14332 + popl %edi
14333 + ret
14334 +ENDPROC(pax_erase_kstack)
14335 +#endif
14336 +
14337 +.macro __SAVE_ALL _DS
14338 cld
14339 PUSH_GS
14340 pushl_cfi %fs
14341 @@ -215,7 +348,7 @@
14342 CFI_REL_OFFSET ecx, 0
14343 pushl_cfi %ebx
14344 CFI_REL_OFFSET ebx, 0
14345 - movl $(__USER_DS), %edx
14346 + movl $\_DS, %edx
14347 movl %edx, %ds
14348 movl %edx, %es
14349 movl $(__KERNEL_PERCPU), %edx
14350 @@ -223,6 +356,15 @@
14351 SET_KERNEL_GS %edx
14352 .endm
14353
14354 +.macro SAVE_ALL
14355 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
14356 + __SAVE_ALL __KERNEL_DS
14357 + pax_enter_kernel
14358 +#else
14359 + __SAVE_ALL __USER_DS
14360 +#endif
14361 +.endm
14362 +
14363 .macro RESTORE_INT_REGS
14364 popl_cfi %ebx
14365 CFI_RESTORE ebx
14366 @@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
14367 popfl_cfi
14368 jmp syscall_exit
14369 CFI_ENDPROC
14370 -END(ret_from_fork)
14371 +ENDPROC(ret_from_fork)
14372
14373 /*
14374 * Interrupt exit functions should be protected against kprobes
14375 @@ -333,7 +475,15 @@ check_userspace:
14376 movb PT_CS(%esp), %al
14377 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
14378 cmpl $USER_RPL, %eax
14379 +
14380 +#ifdef CONFIG_PAX_KERNEXEC
14381 + jae resume_userspace
14382 +
14383 + PAX_EXIT_KERNEL
14384 + jmp resume_kernel
14385 +#else
14386 jb resume_kernel # not returning to v8086 or userspace
14387 +#endif
14388
14389 ENTRY(resume_userspace)
14390 LOCKDEP_SYS_EXIT
14391 @@ -345,8 +495,8 @@ ENTRY(resume_userspace)
14392 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
14393 # int/exception return?
14394 jne work_pending
14395 - jmp restore_all
14396 -END(ret_from_exception)
14397 + jmp restore_all_pax
14398 +ENDPROC(ret_from_exception)
14399
14400 #ifdef CONFIG_PREEMPT
14401 ENTRY(resume_kernel)
14402 @@ -361,7 +511,7 @@ need_resched:
14403 jz restore_all
14404 call preempt_schedule_irq
14405 jmp need_resched
14406 -END(resume_kernel)
14407 +ENDPROC(resume_kernel)
14408 #endif
14409 CFI_ENDPROC
14410 /*
14411 @@ -395,23 +545,34 @@ sysenter_past_esp:
14412 /*CFI_REL_OFFSET cs, 0*/
14413 /*
14414 * Push current_thread_info()->sysenter_return to the stack.
14415 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
14416 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
14417 */
14418 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
14419 + pushl_cfi $0
14420 CFI_REL_OFFSET eip, 0
14421
14422 pushl_cfi %eax
14423 SAVE_ALL
14424 + GET_THREAD_INFO(%ebp)
14425 + movl TI_sysenter_return(%ebp),%ebp
14426 + movl %ebp,PT_EIP(%esp)
14427 ENABLE_INTERRUPTS(CLBR_NONE)
14428
14429 /*
14430 * Load the potential sixth argument from user stack.
14431 * Careful about security.
14432 */
14433 + movl PT_OLDESP(%esp),%ebp
14434 +
14435 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14436 + mov PT_OLDSS(%esp),%ds
14437 +1: movl %ds:(%ebp),%ebp
14438 + push %ss
14439 + pop %ds
14440 +#else
14441 cmpl $__PAGE_OFFSET-3,%ebp
14442 jae syscall_fault
14443 1: movl (%ebp),%ebp
14444 +#endif
14445 +
14446 movl %ebp,PT_EBP(%esp)
14447 .section __ex_table,"a"
14448 .align 4
14449 @@ -434,12 +595,24 @@ sysenter_do_call:
14450 testl $_TIF_ALLWORK_MASK, %ecx
14451 jne sysexit_audit
14452 sysenter_exit:
14453 +
14454 +#ifdef CONFIG_PAX_RANDKSTACK
14455 + pushl_cfi %eax
14456 + movl %esp, %eax
14457 + call pax_randomize_kstack
14458 + popl_cfi %eax
14459 +#endif
14460 +
14461 + pax_erase_kstack
14462 +
14463 /* if something modifies registers it must also disable sysexit */
14464 movl PT_EIP(%esp), %edx
14465 movl PT_OLDESP(%esp), %ecx
14466 xorl %ebp,%ebp
14467 TRACE_IRQS_ON
14468 1: mov PT_FS(%esp), %fs
14469 +2: mov PT_DS(%esp), %ds
14470 +3: mov PT_ES(%esp), %es
14471 PTGS_TO_GS
14472 ENABLE_INTERRUPTS_SYSEXIT
14473
14474 @@ -456,6 +629,9 @@ sysenter_audit:
14475 movl %eax,%edx /* 2nd arg: syscall number */
14476 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
14477 call audit_syscall_entry
14478 +
14479 + pax_erase_kstack
14480 +
14481 pushl_cfi %ebx
14482 movl PT_EAX(%esp),%eax /* reload syscall number */
14483 jmp sysenter_do_call
14484 @@ -482,11 +658,17 @@ sysexit_audit:
14485
14486 CFI_ENDPROC
14487 .pushsection .fixup,"ax"
14488 -2: movl $0,PT_FS(%esp)
14489 +4: movl $0,PT_FS(%esp)
14490 + jmp 1b
14491 +5: movl $0,PT_DS(%esp)
14492 + jmp 1b
14493 +6: movl $0,PT_ES(%esp)
14494 jmp 1b
14495 .section __ex_table,"a"
14496 .align 4
14497 - .long 1b,2b
14498 + .long 1b,4b
14499 + .long 2b,5b
14500 + .long 3b,6b
14501 .popsection
14502 PTGS_TO_GS_EX
14503 ENDPROC(ia32_sysenter_target)
14504 @@ -519,6 +701,15 @@ syscall_exit:
14505 testl $_TIF_ALLWORK_MASK, %ecx # current->work
14506 jne syscall_exit_work
14507
14508 +restore_all_pax:
14509 +
14510 +#ifdef CONFIG_PAX_RANDKSTACK
14511 + movl %esp, %eax
14512 + call pax_randomize_kstack
14513 +#endif
14514 +
14515 + pax_erase_kstack
14516 +
14517 restore_all:
14518 TRACE_IRQS_IRET
14519 restore_all_notrace:
14520 @@ -578,14 +769,34 @@ ldt_ss:
14521 * compensating for the offset by changing to the ESPFIX segment with
14522 * a base address that matches for the difference.
14523 */
14524 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
14525 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
14526 mov %esp, %edx /* load kernel esp */
14527 mov PT_OLDESP(%esp), %eax /* load userspace esp */
14528 mov %dx, %ax /* eax: new kernel esp */
14529 sub %eax, %edx /* offset (low word is 0) */
14530 +#ifdef CONFIG_SMP
14531 + movl PER_CPU_VAR(cpu_number), %ebx
14532 + shll $PAGE_SHIFT_asm, %ebx
14533 + addl $cpu_gdt_table, %ebx
14534 +#else
14535 + movl $cpu_gdt_table, %ebx
14536 +#endif
14537 shr $16, %edx
14538 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
14539 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
14540 +
14541 +#ifdef CONFIG_PAX_KERNEXEC
14542 + mov %cr0, %esi
14543 + btr $16, %esi
14544 + mov %esi, %cr0
14545 +#endif
14546 +
14547 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
14548 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
14549 +
14550 +#ifdef CONFIG_PAX_KERNEXEC
14551 + bts $16, %esi
14552 + mov %esi, %cr0
14553 +#endif
14554 +
14555 pushl_cfi $__ESPFIX_SS
14556 pushl_cfi %eax /* new kernel esp */
14557 /* Disable interrupts, but do not irqtrace this section: we
14558 @@ -614,34 +825,28 @@ work_resched:
14559 movl TI_flags(%ebp), %ecx
14560 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
14561 # than syscall tracing?
14562 - jz restore_all
14563 + jz restore_all_pax
14564 testb $_TIF_NEED_RESCHED, %cl
14565 jnz work_resched
14566
14567 work_notifysig: # deal with pending signals and
14568 # notify-resume requests
14569 + movl %esp, %eax
14570 #ifdef CONFIG_VM86
14571 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
14572 - movl %esp, %eax
14573 - jne work_notifysig_v86 # returning to kernel-space or
14574 + jz 1f # returning to kernel-space or
14575 # vm86-space
14576 - xorl %edx, %edx
14577 - call do_notify_resume
14578 - jmp resume_userspace_sig
14579
14580 - ALIGN
14581 -work_notifysig_v86:
14582 pushl_cfi %ecx # save ti_flags for do_notify_resume
14583 call save_v86_state # %eax contains pt_regs pointer
14584 popl_cfi %ecx
14585 movl %eax, %esp
14586 -#else
14587 - movl %esp, %eax
14588 +1:
14589 #endif
14590 xorl %edx, %edx
14591 call do_notify_resume
14592 jmp resume_userspace_sig
14593 -END(work_pending)
14594 +ENDPROC(work_pending)
14595
14596 # perform syscall exit tracing
14597 ALIGN
14598 @@ -649,11 +854,14 @@ syscall_trace_entry:
14599 movl $-ENOSYS,PT_EAX(%esp)
14600 movl %esp, %eax
14601 call syscall_trace_enter
14602 +
14603 + pax_erase_kstack
14604 +
14605 /* What it returned is what we'll actually use. */
14606 cmpl $(nr_syscalls), %eax
14607 jnae syscall_call
14608 jmp syscall_exit
14609 -END(syscall_trace_entry)
14610 +ENDPROC(syscall_trace_entry)
14611
14612 # perform syscall exit tracing
14613 ALIGN
14614 @@ -666,20 +874,24 @@ syscall_exit_work:
14615 movl %esp, %eax
14616 call syscall_trace_leave
14617 jmp resume_userspace
14618 -END(syscall_exit_work)
14619 +ENDPROC(syscall_exit_work)
14620 CFI_ENDPROC
14621
14622 RING0_INT_FRAME # can't unwind into user space anyway
14623 syscall_fault:
14624 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14625 + push %ss
14626 + pop %ds
14627 +#endif
14628 GET_THREAD_INFO(%ebp)
14629 movl $-EFAULT,PT_EAX(%esp)
14630 jmp resume_userspace
14631 -END(syscall_fault)
14632 +ENDPROC(syscall_fault)
14633
14634 syscall_badsys:
14635 movl $-ENOSYS,PT_EAX(%esp)
14636 jmp resume_userspace
14637 -END(syscall_badsys)
14638 +ENDPROC(syscall_badsys)
14639 CFI_ENDPROC
14640 /*
14641 * End of kprobes section
14642 @@ -753,6 +965,36 @@ ptregs_clone:
14643 CFI_ENDPROC
14644 ENDPROC(ptregs_clone)
14645
14646 + ALIGN;
14647 +ENTRY(kernel_execve)
14648 + CFI_STARTPROC
14649 + pushl_cfi %ebp
14650 + sub $PT_OLDSS+4,%esp
14651 + pushl_cfi %edi
14652 + pushl_cfi %ecx
14653 + pushl_cfi %eax
14654 + lea 3*4(%esp),%edi
14655 + mov $PT_OLDSS/4+1,%ecx
14656 + xorl %eax,%eax
14657 + rep stosl
14658 + popl_cfi %eax
14659 + popl_cfi %ecx
14660 + popl_cfi %edi
14661 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
14662 + pushl_cfi %esp
14663 + call sys_execve
14664 + add $4,%esp
14665 + CFI_ADJUST_CFA_OFFSET -4
14666 + GET_THREAD_INFO(%ebp)
14667 + test %eax,%eax
14668 + jz syscall_exit
14669 + add $PT_OLDSS+4,%esp
14670 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
14671 + popl_cfi %ebp
14672 + ret
14673 + CFI_ENDPROC
14674 +ENDPROC(kernel_execve)
14675 +
14676 .macro FIXUP_ESPFIX_STACK
14677 /*
14678 * Switch back for ESPFIX stack to the normal zerobased stack
14679 @@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
14680 * normal stack and adjusts ESP with the matching offset.
14681 */
14682 /* fixup the stack */
14683 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
14684 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
14685 +#ifdef CONFIG_SMP
14686 + movl PER_CPU_VAR(cpu_number), %ebx
14687 + shll $PAGE_SHIFT_asm, %ebx
14688 + addl $cpu_gdt_table, %ebx
14689 +#else
14690 + movl $cpu_gdt_table, %ebx
14691 +#endif
14692 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
14693 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
14694 shl $16, %eax
14695 addl %esp, %eax /* the adjusted stack pointer */
14696 pushl_cfi $__KERNEL_DS
14697 @@ -816,7 +1065,7 @@ vector=vector+1
14698 .endr
14699 2: jmp common_interrupt
14700 .endr
14701 -END(irq_entries_start)
14702 +ENDPROC(irq_entries_start)
14703
14704 .previous
14705 END(interrupt)
14706 @@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
14707 pushl_cfi $do_coprocessor_error
14708 jmp error_code
14709 CFI_ENDPROC
14710 -END(coprocessor_error)
14711 +ENDPROC(coprocessor_error)
14712
14713 ENTRY(simd_coprocessor_error)
14714 RING0_INT_FRAME
14715 @@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
14716 #endif
14717 jmp error_code
14718 CFI_ENDPROC
14719 -END(simd_coprocessor_error)
14720 +ENDPROC(simd_coprocessor_error)
14721
14722 ENTRY(device_not_available)
14723 RING0_INT_FRAME
14724 @@ -893,7 +1142,7 @@ ENTRY(device_not_available)
14725 pushl_cfi $do_device_not_available
14726 jmp error_code
14727 CFI_ENDPROC
14728 -END(device_not_available)
14729 +ENDPROC(device_not_available)
14730
14731 #ifdef CONFIG_PARAVIRT
14732 ENTRY(native_iret)
14733 @@ -902,12 +1151,12 @@ ENTRY(native_iret)
14734 .align 4
14735 .long native_iret, iret_exc
14736 .previous
14737 -END(native_iret)
14738 +ENDPROC(native_iret)
14739
14740 ENTRY(native_irq_enable_sysexit)
14741 sti
14742 sysexit
14743 -END(native_irq_enable_sysexit)
14744 +ENDPROC(native_irq_enable_sysexit)
14745 #endif
14746
14747 ENTRY(overflow)
14748 @@ -916,7 +1165,7 @@ ENTRY(overflow)
14749 pushl_cfi $do_overflow
14750 jmp error_code
14751 CFI_ENDPROC
14752 -END(overflow)
14753 +ENDPROC(overflow)
14754
14755 ENTRY(bounds)
14756 RING0_INT_FRAME
14757 @@ -924,7 +1173,7 @@ ENTRY(bounds)
14758 pushl_cfi $do_bounds
14759 jmp error_code
14760 CFI_ENDPROC
14761 -END(bounds)
14762 +ENDPROC(bounds)
14763
14764 ENTRY(invalid_op)
14765 RING0_INT_FRAME
14766 @@ -932,7 +1181,7 @@ ENTRY(invalid_op)
14767 pushl_cfi $do_invalid_op
14768 jmp error_code
14769 CFI_ENDPROC
14770 -END(invalid_op)
14771 +ENDPROC(invalid_op)
14772
14773 ENTRY(coprocessor_segment_overrun)
14774 RING0_INT_FRAME
14775 @@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
14776 pushl_cfi $do_coprocessor_segment_overrun
14777 jmp error_code
14778 CFI_ENDPROC
14779 -END(coprocessor_segment_overrun)
14780 +ENDPROC(coprocessor_segment_overrun)
14781
14782 ENTRY(invalid_TSS)
14783 RING0_EC_FRAME
14784 pushl_cfi $do_invalid_TSS
14785 jmp error_code
14786 CFI_ENDPROC
14787 -END(invalid_TSS)
14788 +ENDPROC(invalid_TSS)
14789
14790 ENTRY(segment_not_present)
14791 RING0_EC_FRAME
14792 pushl_cfi $do_segment_not_present
14793 jmp error_code
14794 CFI_ENDPROC
14795 -END(segment_not_present)
14796 +ENDPROC(segment_not_present)
14797
14798 ENTRY(stack_segment)
14799 RING0_EC_FRAME
14800 pushl_cfi $do_stack_segment
14801 jmp error_code
14802 CFI_ENDPROC
14803 -END(stack_segment)
14804 +ENDPROC(stack_segment)
14805
14806 ENTRY(alignment_check)
14807 RING0_EC_FRAME
14808 pushl_cfi $do_alignment_check
14809 jmp error_code
14810 CFI_ENDPROC
14811 -END(alignment_check)
14812 +ENDPROC(alignment_check)
14813
14814 ENTRY(divide_error)
14815 RING0_INT_FRAME
14816 @@ -976,7 +1225,7 @@ ENTRY(divide_error)
14817 pushl_cfi $do_divide_error
14818 jmp error_code
14819 CFI_ENDPROC
14820 -END(divide_error)
14821 +ENDPROC(divide_error)
14822
14823 #ifdef CONFIG_X86_MCE
14824 ENTRY(machine_check)
14825 @@ -985,7 +1234,7 @@ ENTRY(machine_check)
14826 pushl_cfi machine_check_vector
14827 jmp error_code
14828 CFI_ENDPROC
14829 -END(machine_check)
14830 +ENDPROC(machine_check)
14831 #endif
14832
14833 ENTRY(spurious_interrupt_bug)
14834 @@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
14835 pushl_cfi $do_spurious_interrupt_bug
14836 jmp error_code
14837 CFI_ENDPROC
14838 -END(spurious_interrupt_bug)
14839 +ENDPROC(spurious_interrupt_bug)
14840 /*
14841 * End of kprobes section
14842 */
14843 @@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
14844
14845 ENTRY(mcount)
14846 ret
14847 -END(mcount)
14848 +ENDPROC(mcount)
14849
14850 ENTRY(ftrace_caller)
14851 cmpl $0, function_trace_stop
14852 @@ -1138,7 +1387,7 @@ ftrace_graph_call:
14853 .globl ftrace_stub
14854 ftrace_stub:
14855 ret
14856 -END(ftrace_caller)
14857 +ENDPROC(ftrace_caller)
14858
14859 #else /* ! CONFIG_DYNAMIC_FTRACE */
14860
14861 @@ -1174,7 +1423,7 @@ trace:
14862 popl %ecx
14863 popl %eax
14864 jmp ftrace_stub
14865 -END(mcount)
14866 +ENDPROC(mcount)
14867 #endif /* CONFIG_DYNAMIC_FTRACE */
14868 #endif /* CONFIG_FUNCTION_TRACER */
14869
14870 @@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
14871 popl %ecx
14872 popl %eax
14873 ret
14874 -END(ftrace_graph_caller)
14875 +ENDPROC(ftrace_graph_caller)
14876
14877 .globl return_to_handler
14878 return_to_handler:
14879 @@ -1209,7 +1458,6 @@ return_to_handler:
14880 jmp *%ecx
14881 #endif
14882
14883 -.section .rodata,"a"
14884 #include "syscall_table_32.S"
14885
14886 syscall_table_size=(.-sys_call_table)
14887 @@ -1255,15 +1503,18 @@ error_code:
14888 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
14889 REG_TO_PTGS %ecx
14890 SET_KERNEL_GS %ecx
14891 - movl $(__USER_DS), %ecx
14892 + movl $(__KERNEL_DS), %ecx
14893 movl %ecx, %ds
14894 movl %ecx, %es
14895 +
14896 + pax_enter_kernel
14897 +
14898 TRACE_IRQS_OFF
14899 movl %esp,%eax # pt_regs pointer
14900 call *%edi
14901 jmp ret_from_exception
14902 CFI_ENDPROC
14903 -END(page_fault)
14904 +ENDPROC(page_fault)
14905
14906 /*
14907 * Debug traps and NMI can happen at the one SYSENTER instruction
14908 @@ -1305,7 +1556,7 @@ debug_stack_correct:
14909 call do_debug
14910 jmp ret_from_exception
14911 CFI_ENDPROC
14912 -END(debug)
14913 +ENDPROC(debug)
14914
14915 /*
14916 * NMI is doubly nasty. It can happen _while_ we're handling
14917 @@ -1342,6 +1593,9 @@ nmi_stack_correct:
14918 xorl %edx,%edx # zero error code
14919 movl %esp,%eax # pt_regs pointer
14920 call do_nmi
14921 +
14922 + pax_exit_kernel
14923 +
14924 jmp restore_all_notrace
14925 CFI_ENDPROC
14926
14927 @@ -1378,12 +1632,15 @@ nmi_espfix_stack:
14928 FIXUP_ESPFIX_STACK # %eax == %esp
14929 xorl %edx,%edx # zero error code
14930 call do_nmi
14931 +
14932 + pax_exit_kernel
14933 +
14934 RESTORE_REGS
14935 lss 12+4(%esp), %esp # back to espfix stack
14936 CFI_ADJUST_CFA_OFFSET -24
14937 jmp irq_return
14938 CFI_ENDPROC
14939 -END(nmi)
14940 +ENDPROC(nmi)
14941
14942 ENTRY(int3)
14943 RING0_INT_FRAME
14944 @@ -1395,14 +1652,14 @@ ENTRY(int3)
14945 call do_int3
14946 jmp ret_from_exception
14947 CFI_ENDPROC
14948 -END(int3)
14949 +ENDPROC(int3)
14950
14951 ENTRY(general_protection)
14952 RING0_EC_FRAME
14953 pushl_cfi $do_general_protection
14954 jmp error_code
14955 CFI_ENDPROC
14956 -END(general_protection)
14957 +ENDPROC(general_protection)
14958
14959 #ifdef CONFIG_KVM_GUEST
14960 ENTRY(async_page_fault)
14961 @@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
14962 pushl_cfi $do_async_page_fault
14963 jmp error_code
14964 CFI_ENDPROC
14965 -END(async_page_fault)
14966 +ENDPROC(async_page_fault)
14967 #endif
14968
14969 /*
14970 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
14971 index faf8d5e..4f16a68 100644
14972 --- a/arch/x86/kernel/entry_64.S
14973 +++ b/arch/x86/kernel/entry_64.S
14974 @@ -55,6 +55,8 @@
14975 #include <asm/paravirt.h>
14976 #include <asm/ftrace.h>
14977 #include <asm/percpu.h>
14978 +#include <asm/pgtable.h>
14979 +#include <asm/alternative-asm.h>
14980
14981 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14982 #include <linux/elf-em.h>
14983 @@ -68,8 +70,9 @@
14984 #ifdef CONFIG_FUNCTION_TRACER
14985 #ifdef CONFIG_DYNAMIC_FTRACE
14986 ENTRY(mcount)
14987 + pax_force_retaddr
14988 retq
14989 -END(mcount)
14990 +ENDPROC(mcount)
14991
14992 ENTRY(ftrace_caller)
14993 cmpl $0, function_trace_stop
14994 @@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
14995 #endif
14996
14997 GLOBAL(ftrace_stub)
14998 + pax_force_retaddr
14999 retq
15000 -END(ftrace_caller)
15001 +ENDPROC(ftrace_caller)
15002
15003 #else /* ! CONFIG_DYNAMIC_FTRACE */
15004 ENTRY(mcount)
15005 @@ -112,6 +116,7 @@ ENTRY(mcount)
15006 #endif
15007
15008 GLOBAL(ftrace_stub)
15009 + pax_force_retaddr
15010 retq
15011
15012 trace:
15013 @@ -121,12 +126,13 @@ trace:
15014 movq 8(%rbp), %rsi
15015 subq $MCOUNT_INSN_SIZE, %rdi
15016
15017 + pax_force_fptr ftrace_trace_function
15018 call *ftrace_trace_function
15019
15020 MCOUNT_RESTORE_FRAME
15021
15022 jmp ftrace_stub
15023 -END(mcount)
15024 +ENDPROC(mcount)
15025 #endif /* CONFIG_DYNAMIC_FTRACE */
15026 #endif /* CONFIG_FUNCTION_TRACER */
15027
15028 @@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
15029
15030 MCOUNT_RESTORE_FRAME
15031
15032 + pax_force_retaddr
15033 retq
15034 -END(ftrace_graph_caller)
15035 +ENDPROC(ftrace_graph_caller)
15036
15037 GLOBAL(return_to_handler)
15038 subq $24, %rsp
15039 @@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
15040 movq 8(%rsp), %rdx
15041 movq (%rsp), %rax
15042 addq $24, %rsp
15043 + pax_force_fptr %rdi
15044 jmp *%rdi
15045 #endif
15046
15047 @@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64)
15048 ENDPROC(native_usergs_sysret64)
15049 #endif /* CONFIG_PARAVIRT */
15050
15051 + .macro ljmpq sel, off
15052 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15053 + .byte 0x48; ljmp *1234f(%rip)
15054 + .pushsection .rodata
15055 + .align 16
15056 + 1234: .quad \off; .word \sel
15057 + .popsection
15058 +#else
15059 + pushq $\sel
15060 + pushq $\off
15061 + lretq
15062 +#endif
15063 + .endm
15064 +
15065 + .macro pax_enter_kernel
15066 + pax_set_fptr_mask
15067 +#ifdef CONFIG_PAX_KERNEXEC
15068 + call pax_enter_kernel
15069 +#endif
15070 + .endm
15071 +
15072 + .macro pax_exit_kernel
15073 +#ifdef CONFIG_PAX_KERNEXEC
15074 + call pax_exit_kernel
15075 +#endif
15076 + .endm
15077 +
15078 +#ifdef CONFIG_PAX_KERNEXEC
15079 +ENTRY(pax_enter_kernel)
15080 + pushq %rdi
15081 +
15082 +#ifdef CONFIG_PARAVIRT
15083 + PV_SAVE_REGS(CLBR_RDI)
15084 +#endif
15085 +
15086 + GET_CR0_INTO_RDI
15087 + bts $16,%rdi
15088 + jnc 3f
15089 + mov %cs,%edi
15090 + cmp $__KERNEL_CS,%edi
15091 + jnz 2f
15092 +1:
15093 +
15094 +#ifdef CONFIG_PARAVIRT
15095 + PV_RESTORE_REGS(CLBR_RDI)
15096 +#endif
15097 +
15098 + popq %rdi
15099 + pax_force_retaddr
15100 + retq
15101 +
15102 +2: ljmpq __KERNEL_CS,1f
15103 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
15104 +4: SET_RDI_INTO_CR0
15105 + jmp 1b
15106 +ENDPROC(pax_enter_kernel)
15107 +
15108 +ENTRY(pax_exit_kernel)
15109 + pushq %rdi
15110 +
15111 +#ifdef CONFIG_PARAVIRT
15112 + PV_SAVE_REGS(CLBR_RDI)
15113 +#endif
15114 +
15115 + mov %cs,%rdi
15116 + cmp $__KERNEXEC_KERNEL_CS,%edi
15117 + jz 2f
15118 +1:
15119 +
15120 +#ifdef CONFIG_PARAVIRT
15121 + PV_RESTORE_REGS(CLBR_RDI);
15122 +#endif
15123 +
15124 + popq %rdi
15125 + pax_force_retaddr
15126 + retq
15127 +
15128 +2: GET_CR0_INTO_RDI
15129 + btr $16,%rdi
15130 + ljmpq __KERNEL_CS,3f
15131 +3: SET_RDI_INTO_CR0
15132 + jmp 1b
15133 +#ifdef CONFIG_PARAVIRT
15134 + PV_RESTORE_REGS(CLBR_RDI);
15135 +#endif
15136 +
15137 + popq %rdi
15138 + pax_force_retaddr
15139 + retq
15140 +ENDPROC(pax_exit_kernel)
15141 +#endif
15142 +
15143 + .macro pax_enter_kernel_user
15144 + pax_set_fptr_mask
15145 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15146 + call pax_enter_kernel_user
15147 +#endif
15148 + .endm
15149 +
15150 + .macro pax_exit_kernel_user
15151 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15152 + call pax_exit_kernel_user
15153 +#endif
15154 +#ifdef CONFIG_PAX_RANDKSTACK
15155 + pushq %rax
15156 + call pax_randomize_kstack
15157 + popq %rax
15158 +#endif
15159 + .endm
15160 +
15161 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15162 +ENTRY(pax_enter_kernel_user)
15163 + pushq %rdi
15164 + pushq %rbx
15165 +
15166 +#ifdef CONFIG_PARAVIRT
15167 + PV_SAVE_REGS(CLBR_RDI)
15168 +#endif
15169 +
15170 + GET_CR3_INTO_RDI
15171 + mov %rdi,%rbx
15172 + add $__START_KERNEL_map,%rbx
15173 + sub phys_base(%rip),%rbx
15174 +
15175 +#ifdef CONFIG_PARAVIRT
15176 + pushq %rdi
15177 + cmpl $0, pv_info+PARAVIRT_enabled
15178 + jz 1f
15179 + i = 0
15180 + .rept USER_PGD_PTRS
15181 + mov i*8(%rbx),%rsi
15182 + mov $0,%sil
15183 + lea i*8(%rbx),%rdi
15184 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15185 + i = i + 1
15186 + .endr
15187 + jmp 2f
15188 +1:
15189 +#endif
15190 +
15191 + i = 0
15192 + .rept USER_PGD_PTRS
15193 + movb $0,i*8(%rbx)
15194 + i = i + 1
15195 + .endr
15196 +
15197 +#ifdef CONFIG_PARAVIRT
15198 +2: popq %rdi
15199 +#endif
15200 + SET_RDI_INTO_CR3
15201 +
15202 +#ifdef CONFIG_PAX_KERNEXEC
15203 + GET_CR0_INTO_RDI
15204 + bts $16,%rdi
15205 + SET_RDI_INTO_CR0
15206 +#endif
15207 +
15208 +#ifdef CONFIG_PARAVIRT
15209 + PV_RESTORE_REGS(CLBR_RDI)
15210 +#endif
15211 +
15212 + popq %rbx
15213 + popq %rdi
15214 + pax_force_retaddr
15215 + retq
15216 +ENDPROC(pax_enter_kernel_user)
15217 +
15218 +ENTRY(pax_exit_kernel_user)
15219 + push %rdi
15220 +
15221 +#ifdef CONFIG_PARAVIRT
15222 + pushq %rbx
15223 + PV_SAVE_REGS(CLBR_RDI)
15224 +#endif
15225 +
15226 +#ifdef CONFIG_PAX_KERNEXEC
15227 + GET_CR0_INTO_RDI
15228 + btr $16,%rdi
15229 + SET_RDI_INTO_CR0
15230 +#endif
15231 +
15232 + GET_CR3_INTO_RDI
15233 + add $__START_KERNEL_map,%rdi
15234 + sub phys_base(%rip),%rdi
15235 +
15236 +#ifdef CONFIG_PARAVIRT
15237 + cmpl $0, pv_info+PARAVIRT_enabled
15238 + jz 1f
15239 + mov %rdi,%rbx
15240 + i = 0
15241 + .rept USER_PGD_PTRS
15242 + mov i*8(%rbx),%rsi
15243 + mov $0x67,%sil
15244 + lea i*8(%rbx),%rdi
15245 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15246 + i = i + 1
15247 + .endr
15248 + jmp 2f
15249 +1:
15250 +#endif
15251 +
15252 + i = 0
15253 + .rept USER_PGD_PTRS
15254 + movb $0x67,i*8(%rdi)
15255 + i = i + 1
15256 + .endr
15257 +
15258 +#ifdef CONFIG_PARAVIRT
15259 +2: PV_RESTORE_REGS(CLBR_RDI)
15260 + popq %rbx
15261 +#endif
15262 +
15263 + popq %rdi
15264 + pax_force_retaddr
15265 + retq
15266 +ENDPROC(pax_exit_kernel_user)
15267 +#endif
15268 +
15269 +.macro pax_erase_kstack
15270 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15271 + call pax_erase_kstack
15272 +#endif
15273 +.endm
15274 +
15275 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15276 +/*
15277 + * r11: thread_info
15278 + * rcx, rdx: can be clobbered
15279 + */
15280 +ENTRY(pax_erase_kstack)
15281 + pushq %rdi
15282 + pushq %rax
15283 + pushq %r11
15284 +
15285 + GET_THREAD_INFO(%r11)
15286 + mov TI_lowest_stack(%r11), %rdi
15287 + mov $-0xBEEF, %rax
15288 + std
15289 +
15290 +1: mov %edi, %ecx
15291 + and $THREAD_SIZE_asm - 1, %ecx
15292 + shr $3, %ecx
15293 + repne scasq
15294 + jecxz 2f
15295 +
15296 + cmp $2*8, %ecx
15297 + jc 2f
15298 +
15299 + mov $2*8, %ecx
15300 + repe scasq
15301 + jecxz 2f
15302 + jne 1b
15303 +
15304 +2: cld
15305 + mov %esp, %ecx
15306 + sub %edi, %ecx
15307 +
15308 + cmp $THREAD_SIZE_asm, %rcx
15309 + jb 3f
15310 + ud2
15311 +3:
15312 +
15313 + shr $3, %ecx
15314 + rep stosq
15315 +
15316 + mov TI_task_thread_sp0(%r11), %rdi
15317 + sub $256, %rdi
15318 + mov %rdi, TI_lowest_stack(%r11)
15319 +
15320 + popq %r11
15321 + popq %rax
15322 + popq %rdi
15323 + pax_force_retaddr
15324 + ret
15325 +ENDPROC(pax_erase_kstack)
15326 +#endif
15327
15328 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
15329 #ifdef CONFIG_TRACE_IRQFLAGS
15330 @@ -231,8 +515,8 @@ ENDPROC(native_usergs_sysret64)
15331 .endm
15332
15333 .macro UNFAKE_STACK_FRAME
15334 - addq $8*6, %rsp
15335 - CFI_ADJUST_CFA_OFFSET -(6*8)
15336 + addq $8*6 + ARG_SKIP, %rsp
15337 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
15338 .endm
15339
15340 /*
15341 @@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64)
15342 movq %rsp, %rsi
15343
15344 leaq -RBP(%rsp),%rdi /* arg1 for handler */
15345 - testl $3, CS(%rdi)
15346 + testb $3, CS(%rdi)
15347 je 1f
15348 SWAPGS
15349 /*
15350 @@ -355,9 +639,10 @@ ENTRY(save_rest)
15351 movq_cfi r15, R15+16
15352 movq %r11, 8(%rsp) /* return address */
15353 FIXUP_TOP_OF_STACK %r11, 16
15354 + pax_force_retaddr
15355 ret
15356 CFI_ENDPROC
15357 -END(save_rest)
15358 +ENDPROC(save_rest)
15359
15360 /* save complete stack frame */
15361 .pushsection .kprobes.text, "ax"
15362 @@ -386,9 +671,10 @@ ENTRY(save_paranoid)
15363 js 1f /* negative -> in kernel */
15364 SWAPGS
15365 xorl %ebx,%ebx
15366 -1: ret
15367 +1: pax_force_retaddr_bts
15368 + ret
15369 CFI_ENDPROC
15370 -END(save_paranoid)
15371 +ENDPROC(save_paranoid)
15372 .popsection
15373
15374 /*
15375 @@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
15376
15377 RESTORE_REST
15378
15379 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
15380 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
15381 je int_ret_from_sys_call
15382
15383 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
15384 @@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
15385 jmp ret_from_sys_call # go to the SYSRET fastpath
15386
15387 CFI_ENDPROC
15388 -END(ret_from_fork)
15389 +ENDPROC(ret_from_fork)
15390
15391 /*
15392 * System call entry. Up to 6 arguments in registers are supported.
15393 @@ -456,7 +742,7 @@ END(ret_from_fork)
15394 ENTRY(system_call)
15395 CFI_STARTPROC simple
15396 CFI_SIGNAL_FRAME
15397 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
15398 + CFI_DEF_CFA rsp,0
15399 CFI_REGISTER rip,rcx
15400 /*CFI_REGISTER rflags,r11*/
15401 SWAPGS_UNSAFE_STACK
15402 @@ -469,12 +755,13 @@ ENTRY(system_call_after_swapgs)
15403
15404 movq %rsp,PER_CPU_VAR(old_rsp)
15405 movq PER_CPU_VAR(kernel_stack),%rsp
15406 + SAVE_ARGS 8*6,0
15407 + pax_enter_kernel_user
15408 /*
15409 * No need to follow this irqs off/on section - it's straight
15410 * and short:
15411 */
15412 ENABLE_INTERRUPTS(CLBR_NONE)
15413 - SAVE_ARGS 8,0
15414 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
15415 movq %rcx,RIP-ARGOFFSET(%rsp)
15416 CFI_REL_OFFSET rip,RIP-ARGOFFSET
15417 @@ -484,7 +771,7 @@ ENTRY(system_call_after_swapgs)
15418 system_call_fastpath:
15419 cmpq $__NR_syscall_max,%rax
15420 ja badsys
15421 - movq %r10,%rcx
15422 + movq R10-ARGOFFSET(%rsp),%rcx
15423 call *sys_call_table(,%rax,8) # XXX: rip relative
15424 movq %rax,RAX-ARGOFFSET(%rsp)
15425 /*
15426 @@ -503,6 +790,8 @@ sysret_check:
15427 andl %edi,%edx
15428 jnz sysret_careful
15429 CFI_REMEMBER_STATE
15430 + pax_exit_kernel_user
15431 + pax_erase_kstack
15432 /*
15433 * sysretq will re-enable interrupts:
15434 */
15435 @@ -554,14 +843,18 @@ badsys:
15436 * jump back to the normal fast path.
15437 */
15438 auditsys:
15439 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
15440 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
15441 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
15442 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
15443 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
15444 movq %rax,%rsi /* 2nd arg: syscall number */
15445 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
15446 call audit_syscall_entry
15447 +
15448 + pax_erase_kstack
15449 +
15450 LOAD_ARGS 0 /* reload call-clobbered registers */
15451 + pax_set_fptr_mask
15452 jmp system_call_fastpath
15453
15454 /*
15455 @@ -591,16 +884,20 @@ tracesys:
15456 FIXUP_TOP_OF_STACK %rdi
15457 movq %rsp,%rdi
15458 call syscall_trace_enter
15459 +
15460 + pax_erase_kstack
15461 +
15462 /*
15463 * Reload arg registers from stack in case ptrace changed them.
15464 * We don't reload %rax because syscall_trace_enter() returned
15465 * the value it wants us to use in the table lookup.
15466 */
15467 LOAD_ARGS ARGOFFSET, 1
15468 + pax_set_fptr_mask
15469 RESTORE_REST
15470 cmpq $__NR_syscall_max,%rax
15471 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
15472 - movq %r10,%rcx /* fixup for C */
15473 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
15474 call *sys_call_table(,%rax,8)
15475 movq %rax,RAX-ARGOFFSET(%rsp)
15476 /* Use IRET because user could have changed frame */
15477 @@ -612,7 +909,7 @@ tracesys:
15478 GLOBAL(int_ret_from_sys_call)
15479 DISABLE_INTERRUPTS(CLBR_NONE)
15480 TRACE_IRQS_OFF
15481 - testl $3,CS-ARGOFFSET(%rsp)
15482 + testb $3,CS-ARGOFFSET(%rsp)
15483 je retint_restore_args
15484 movl $_TIF_ALLWORK_MASK,%edi
15485 /* edi: mask to check */
15486 @@ -623,6 +920,7 @@ GLOBAL(int_with_check)
15487 andl %edi,%edx
15488 jnz int_careful
15489 andl $~TS_COMPAT,TI_status(%rcx)
15490 + pax_erase_kstack
15491 jmp retint_swapgs
15492
15493 /* Either reschedule or signal or syscall exit tracking needed. */
15494 @@ -669,7 +967,7 @@ int_restore_rest:
15495 TRACE_IRQS_OFF
15496 jmp int_with_check
15497 CFI_ENDPROC
15498 -END(system_call)
15499 +ENDPROC(system_call)
15500
15501 /*
15502 * Certain special system calls that need to save a complete full stack frame.
15503 @@ -685,7 +983,7 @@ ENTRY(\label)
15504 call \func
15505 jmp ptregscall_common
15506 CFI_ENDPROC
15507 -END(\label)
15508 +ENDPROC(\label)
15509 .endm
15510
15511 PTREGSCALL stub_clone, sys_clone, %r8
15512 @@ -703,9 +1001,10 @@ ENTRY(ptregscall_common)
15513 movq_cfi_restore R12+8, r12
15514 movq_cfi_restore RBP+8, rbp
15515 movq_cfi_restore RBX+8, rbx
15516 + pax_force_retaddr
15517 ret $REST_SKIP /* pop extended registers */
15518 CFI_ENDPROC
15519 -END(ptregscall_common)
15520 +ENDPROC(ptregscall_common)
15521
15522 ENTRY(stub_execve)
15523 CFI_STARTPROC
15524 @@ -720,7 +1019,7 @@ ENTRY(stub_execve)
15525 RESTORE_REST
15526 jmp int_ret_from_sys_call
15527 CFI_ENDPROC
15528 -END(stub_execve)
15529 +ENDPROC(stub_execve)
15530
15531 /*
15532 * sigreturn is special because it needs to restore all registers on return.
15533 @@ -738,7 +1037,7 @@ ENTRY(stub_rt_sigreturn)
15534 RESTORE_REST
15535 jmp int_ret_from_sys_call
15536 CFI_ENDPROC
15537 -END(stub_rt_sigreturn)
15538 +ENDPROC(stub_rt_sigreturn)
15539
15540 /*
15541 * Build the entry stubs and pointer table with some assembler magic.
15542 @@ -773,7 +1072,7 @@ vector=vector+1
15543 2: jmp common_interrupt
15544 .endr
15545 CFI_ENDPROC
15546 -END(irq_entries_start)
15547 +ENDPROC(irq_entries_start)
15548
15549 .previous
15550 END(interrupt)
15551 @@ -793,6 +1092,16 @@ END(interrupt)
15552 subq $ORIG_RAX-RBP, %rsp
15553 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
15554 SAVE_ARGS_IRQ
15555 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15556 + testb $3, CS(%rdi)
15557 + jnz 1f
15558 + pax_enter_kernel
15559 + jmp 2f
15560 +1: pax_enter_kernel_user
15561 +2:
15562 +#else
15563 + pax_enter_kernel
15564 +#endif
15565 call \func
15566 .endm
15567
15568 @@ -824,7 +1133,7 @@ ret_from_intr:
15569
15570 exit_intr:
15571 GET_THREAD_INFO(%rcx)
15572 - testl $3,CS-ARGOFFSET(%rsp)
15573 + testb $3,CS-ARGOFFSET(%rsp)
15574 je retint_kernel
15575
15576 /* Interrupt came from user space */
15577 @@ -846,12 +1155,15 @@ retint_swapgs: /* return to user-space */
15578 * The iretq could re-enable interrupts:
15579 */
15580 DISABLE_INTERRUPTS(CLBR_ANY)
15581 + pax_exit_kernel_user
15582 TRACE_IRQS_IRETQ
15583 SWAPGS
15584 jmp restore_args
15585
15586 retint_restore_args: /* return to kernel space */
15587 DISABLE_INTERRUPTS(CLBR_ANY)
15588 + pax_exit_kernel
15589 + pax_force_retaddr RIP-ARGOFFSET
15590 /*
15591 * The iretq could re-enable interrupts:
15592 */
15593 @@ -940,7 +1252,7 @@ ENTRY(retint_kernel)
15594 #endif
15595
15596 CFI_ENDPROC
15597 -END(common_interrupt)
15598 +ENDPROC(common_interrupt)
15599 /*
15600 * End of kprobes section
15601 */
15602 @@ -956,7 +1268,7 @@ ENTRY(\sym)
15603 interrupt \do_sym
15604 jmp ret_from_intr
15605 CFI_ENDPROC
15606 -END(\sym)
15607 +ENDPROC(\sym)
15608 .endm
15609
15610 #ifdef CONFIG_SMP
15611 @@ -1021,12 +1333,22 @@ ENTRY(\sym)
15612 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15613 call error_entry
15614 DEFAULT_FRAME 0
15615 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15616 + testb $3, CS(%rsp)
15617 + jnz 1f
15618 + pax_enter_kernel
15619 + jmp 2f
15620 +1: pax_enter_kernel_user
15621 +2:
15622 +#else
15623 + pax_enter_kernel
15624 +#endif
15625 movq %rsp,%rdi /* pt_regs pointer */
15626 xorl %esi,%esi /* no error code */
15627 call \do_sym
15628 jmp error_exit /* %ebx: no swapgs flag */
15629 CFI_ENDPROC
15630 -END(\sym)
15631 +ENDPROC(\sym)
15632 .endm
15633
15634 .macro paranoidzeroentry sym do_sym
15635 @@ -1038,15 +1360,25 @@ ENTRY(\sym)
15636 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15637 call save_paranoid
15638 TRACE_IRQS_OFF
15639 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15640 + testb $3, CS(%rsp)
15641 + jnz 1f
15642 + pax_enter_kernel
15643 + jmp 2f
15644 +1: pax_enter_kernel_user
15645 +2:
15646 +#else
15647 + pax_enter_kernel
15648 +#endif
15649 movq %rsp,%rdi /* pt_regs pointer */
15650 xorl %esi,%esi /* no error code */
15651 call \do_sym
15652 jmp paranoid_exit /* %ebx: no swapgs flag */
15653 CFI_ENDPROC
15654 -END(\sym)
15655 +ENDPROC(\sym)
15656 .endm
15657
15658 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
15659 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
15660 .macro paranoidzeroentry_ist sym do_sym ist
15661 ENTRY(\sym)
15662 INTR_FRAME
15663 @@ -1056,14 +1388,30 @@ ENTRY(\sym)
15664 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15665 call save_paranoid
15666 TRACE_IRQS_OFF
15667 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15668 + testb $3, CS(%rsp)
15669 + jnz 1f
15670 + pax_enter_kernel
15671 + jmp 2f
15672 +1: pax_enter_kernel_user
15673 +2:
15674 +#else
15675 + pax_enter_kernel
15676 +#endif
15677 movq %rsp,%rdi /* pt_regs pointer */
15678 xorl %esi,%esi /* no error code */
15679 +#ifdef CONFIG_SMP
15680 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
15681 + lea init_tss(%r12), %r12
15682 +#else
15683 + lea init_tss(%rip), %r12
15684 +#endif
15685 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
15686 call \do_sym
15687 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
15688 jmp paranoid_exit /* %ebx: no swapgs flag */
15689 CFI_ENDPROC
15690 -END(\sym)
15691 +ENDPROC(\sym)
15692 .endm
15693
15694 .macro errorentry sym do_sym
15695 @@ -1074,13 +1422,23 @@ ENTRY(\sym)
15696 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15697 call error_entry
15698 DEFAULT_FRAME 0
15699 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15700 + testb $3, CS(%rsp)
15701 + jnz 1f
15702 + pax_enter_kernel
15703 + jmp 2f
15704 +1: pax_enter_kernel_user
15705 +2:
15706 +#else
15707 + pax_enter_kernel
15708 +#endif
15709 movq %rsp,%rdi /* pt_regs pointer */
15710 movq ORIG_RAX(%rsp),%rsi /* get error code */
15711 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
15712 call \do_sym
15713 jmp error_exit /* %ebx: no swapgs flag */
15714 CFI_ENDPROC
15715 -END(\sym)
15716 +ENDPROC(\sym)
15717 .endm
15718
15719 /* error code is on the stack already */
15720 @@ -1093,13 +1451,23 @@ ENTRY(\sym)
15721 call save_paranoid
15722 DEFAULT_FRAME 0
15723 TRACE_IRQS_OFF
15724 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15725 + testb $3, CS(%rsp)
15726 + jnz 1f
15727 + pax_enter_kernel
15728 + jmp 2f
15729 +1: pax_enter_kernel_user
15730 +2:
15731 +#else
15732 + pax_enter_kernel
15733 +#endif
15734 movq %rsp,%rdi /* pt_regs pointer */
15735 movq ORIG_RAX(%rsp),%rsi /* get error code */
15736 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
15737 call \do_sym
15738 jmp paranoid_exit /* %ebx: no swapgs flag */
15739 CFI_ENDPROC
15740 -END(\sym)
15741 +ENDPROC(\sym)
15742 .endm
15743
15744 zeroentry divide_error do_divide_error
15745 @@ -1129,9 +1497,10 @@ gs_change:
15746 2: mfence /* workaround */
15747 SWAPGS
15748 popfq_cfi
15749 + pax_force_retaddr
15750 ret
15751 CFI_ENDPROC
15752 -END(native_load_gs_index)
15753 +ENDPROC(native_load_gs_index)
15754
15755 .section __ex_table,"a"
15756 .align 8
15757 @@ -1153,13 +1522,14 @@ ENTRY(kernel_thread_helper)
15758 * Here we are in the child and the registers are set as they were
15759 * at kernel_thread() invocation in the parent.
15760 */
15761 + pax_force_fptr %rsi
15762 call *%rsi
15763 # exit
15764 mov %eax, %edi
15765 call do_exit
15766 ud2 # padding for call trace
15767 CFI_ENDPROC
15768 -END(kernel_thread_helper)
15769 +ENDPROC(kernel_thread_helper)
15770
15771 /*
15772 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
15773 @@ -1186,11 +1556,11 @@ ENTRY(kernel_execve)
15774 RESTORE_REST
15775 testq %rax,%rax
15776 je int_ret_from_sys_call
15777 - RESTORE_ARGS
15778 UNFAKE_STACK_FRAME
15779 + pax_force_retaddr
15780 ret
15781 CFI_ENDPROC
15782 -END(kernel_execve)
15783 +ENDPROC(kernel_execve)
15784
15785 /* Call softirq on interrupt stack. Interrupts are off. */
15786 ENTRY(call_softirq)
15787 @@ -1208,9 +1578,10 @@ ENTRY(call_softirq)
15788 CFI_DEF_CFA_REGISTER rsp
15789 CFI_ADJUST_CFA_OFFSET -8
15790 decl PER_CPU_VAR(irq_count)
15791 + pax_force_retaddr
15792 ret
15793 CFI_ENDPROC
15794 -END(call_softirq)
15795 +ENDPROC(call_softirq)
15796
15797 #ifdef CONFIG_XEN
15798 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
15799 @@ -1248,7 +1619,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
15800 decl PER_CPU_VAR(irq_count)
15801 jmp error_exit
15802 CFI_ENDPROC
15803 -END(xen_do_hypervisor_callback)
15804 +ENDPROC(xen_do_hypervisor_callback)
15805
15806 /*
15807 * Hypervisor uses this for application faults while it executes.
15808 @@ -1307,7 +1678,7 @@ ENTRY(xen_failsafe_callback)
15809 SAVE_ALL
15810 jmp error_exit
15811 CFI_ENDPROC
15812 -END(xen_failsafe_callback)
15813 +ENDPROC(xen_failsafe_callback)
15814
15815 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
15816 xen_hvm_callback_vector xen_evtchn_do_upcall
15817 @@ -1356,16 +1727,31 @@ ENTRY(paranoid_exit)
15818 TRACE_IRQS_OFF
15819 testl %ebx,%ebx /* swapgs needed? */
15820 jnz paranoid_restore
15821 - testl $3,CS(%rsp)
15822 + testb $3,CS(%rsp)
15823 jnz paranoid_userspace
15824 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15825 + pax_exit_kernel
15826 + TRACE_IRQS_IRETQ 0
15827 + SWAPGS_UNSAFE_STACK
15828 + RESTORE_ALL 8
15829 + pax_force_retaddr_bts
15830 + jmp irq_return
15831 +#endif
15832 paranoid_swapgs:
15833 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15834 + pax_exit_kernel_user
15835 +#else
15836 + pax_exit_kernel
15837 +#endif
15838 TRACE_IRQS_IRETQ 0
15839 SWAPGS_UNSAFE_STACK
15840 RESTORE_ALL 8
15841 jmp irq_return
15842 paranoid_restore:
15843 + pax_exit_kernel
15844 TRACE_IRQS_IRETQ 0
15845 RESTORE_ALL 8
15846 + pax_force_retaddr_bts
15847 jmp irq_return
15848 paranoid_userspace:
15849 GET_THREAD_INFO(%rcx)
15850 @@ -1394,7 +1780,7 @@ paranoid_schedule:
15851 TRACE_IRQS_OFF
15852 jmp paranoid_userspace
15853 CFI_ENDPROC
15854 -END(paranoid_exit)
15855 +ENDPROC(paranoid_exit)
15856
15857 /*
15858 * Exception entry point. This expects an error code/orig_rax on the stack.
15859 @@ -1421,12 +1807,13 @@ ENTRY(error_entry)
15860 movq_cfi r14, R14+8
15861 movq_cfi r15, R15+8
15862 xorl %ebx,%ebx
15863 - testl $3,CS+8(%rsp)
15864 + testb $3,CS+8(%rsp)
15865 je error_kernelspace
15866 error_swapgs:
15867 SWAPGS
15868 error_sti:
15869 TRACE_IRQS_OFF
15870 + pax_force_retaddr_bts
15871 ret
15872
15873 /*
15874 @@ -1453,7 +1840,7 @@ bstep_iret:
15875 movq %rcx,RIP+8(%rsp)
15876 jmp error_swapgs
15877 CFI_ENDPROC
15878 -END(error_entry)
15879 +ENDPROC(error_entry)
15880
15881
15882 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
15883 @@ -1473,7 +1860,7 @@ ENTRY(error_exit)
15884 jnz retint_careful
15885 jmp retint_swapgs
15886 CFI_ENDPROC
15887 -END(error_exit)
15888 +ENDPROC(error_exit)
15889
15890
15891 /* runs on exception stack */
15892 @@ -1485,6 +1872,16 @@ ENTRY(nmi)
15893 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15894 call save_paranoid
15895 DEFAULT_FRAME 0
15896 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15897 + testb $3, CS(%rsp)
15898 + jnz 1f
15899 + pax_enter_kernel
15900 + jmp 2f
15901 +1: pax_enter_kernel_user
15902 +2:
15903 +#else
15904 + pax_enter_kernel
15905 +#endif
15906 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
15907 movq %rsp,%rdi
15908 movq $-1,%rsi
15909 @@ -1495,12 +1892,28 @@ ENTRY(nmi)
15910 DISABLE_INTERRUPTS(CLBR_NONE)
15911 testl %ebx,%ebx /* swapgs needed? */
15912 jnz nmi_restore
15913 - testl $3,CS(%rsp)
15914 + testb $3,CS(%rsp)
15915 jnz nmi_userspace
15916 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15917 + pax_exit_kernel
15918 + SWAPGS_UNSAFE_STACK
15919 + RESTORE_ALL 8
15920 + pax_force_retaddr_bts
15921 + jmp irq_return
15922 +#endif
15923 nmi_swapgs:
15924 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15925 + pax_exit_kernel_user
15926 +#else
15927 + pax_exit_kernel
15928 +#endif
15929 SWAPGS_UNSAFE_STACK
15930 + RESTORE_ALL 8
15931 + jmp irq_return
15932 nmi_restore:
15933 + pax_exit_kernel
15934 RESTORE_ALL 8
15935 + pax_force_retaddr_bts
15936 jmp irq_return
15937 nmi_userspace:
15938 GET_THREAD_INFO(%rcx)
15939 @@ -1529,14 +1942,14 @@ nmi_schedule:
15940 jmp paranoid_exit
15941 CFI_ENDPROC
15942 #endif
15943 -END(nmi)
15944 +ENDPROC(nmi)
15945
15946 ENTRY(ignore_sysret)
15947 CFI_STARTPROC
15948 mov $-ENOSYS,%eax
15949 sysret
15950 CFI_ENDPROC
15951 -END(ignore_sysret)
15952 +ENDPROC(ignore_sysret)
15953
15954 /*
15955 * End of kprobes section
15956 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
15957 index c9a281f..ce2f317 100644
15958 --- a/arch/x86/kernel/ftrace.c
15959 +++ b/arch/x86/kernel/ftrace.c
15960 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
15961 static const void *mod_code_newcode; /* holds the text to write to the IP */
15962
15963 static unsigned nmi_wait_count;
15964 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
15965 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
15966
15967 int ftrace_arch_read_dyn_info(char *buf, int size)
15968 {
15969 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
15970
15971 r = snprintf(buf, size, "%u %u",
15972 nmi_wait_count,
15973 - atomic_read(&nmi_update_count));
15974 + atomic_read_unchecked(&nmi_update_count));
15975 return r;
15976 }
15977
15978 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
15979
15980 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
15981 smp_rmb();
15982 + pax_open_kernel();
15983 ftrace_mod_code();
15984 - atomic_inc(&nmi_update_count);
15985 + pax_close_kernel();
15986 + atomic_inc_unchecked(&nmi_update_count);
15987 }
15988 /* Must have previous changes seen before executions */
15989 smp_mb();
15990 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
15991 {
15992 unsigned char replaced[MCOUNT_INSN_SIZE];
15993
15994 + ip = ktla_ktva(ip);
15995 +
15996 /*
15997 * Note: Due to modules and __init, code can
15998 * disappear and change, we need to protect against faulting
15999 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16000 unsigned char old[MCOUNT_INSN_SIZE], *new;
16001 int ret;
16002
16003 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16004 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16005 new = ftrace_call_replace(ip, (unsigned long)func);
16006 ret = ftrace_modify_code(ip, old, new);
16007
16008 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16009 {
16010 unsigned char code[MCOUNT_INSN_SIZE];
16011
16012 + ip = ktla_ktva(ip);
16013 +
16014 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16015 return -EFAULT;
16016
16017 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16018 index 3bb0850..55a56f4 100644
16019 --- a/arch/x86/kernel/head32.c
16020 +++ b/arch/x86/kernel/head32.c
16021 @@ -19,6 +19,7 @@
16022 #include <asm/io_apic.h>
16023 #include <asm/bios_ebda.h>
16024 #include <asm/tlbflush.h>
16025 +#include <asm/boot.h>
16026
16027 static void __init i386_default_early_setup(void)
16028 {
16029 @@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
16030 {
16031 memblock_init();
16032
16033 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16034 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16035
16036 #ifdef CONFIG_BLK_DEV_INITRD
16037 /* Reserve INITRD */
16038 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16039 index ce0be7c..c41476e 100644
16040 --- a/arch/x86/kernel/head_32.S
16041 +++ b/arch/x86/kernel/head_32.S
16042 @@ -25,6 +25,12 @@
16043 /* Physical address */
16044 #define pa(X) ((X) - __PAGE_OFFSET)
16045
16046 +#ifdef CONFIG_PAX_KERNEXEC
16047 +#define ta(X) (X)
16048 +#else
16049 +#define ta(X) ((X) - __PAGE_OFFSET)
16050 +#endif
16051 +
16052 /*
16053 * References to members of the new_cpu_data structure.
16054 */
16055 @@ -54,11 +60,7 @@
16056 * and small than max_low_pfn, otherwise will waste some page table entries
16057 */
16058
16059 -#if PTRS_PER_PMD > 1
16060 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16061 -#else
16062 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16063 -#endif
16064 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16065
16066 /* Number of possible pages in the lowmem region */
16067 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
16068 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
16069 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16070
16071 /*
16072 + * Real beginning of normal "text" segment
16073 + */
16074 +ENTRY(stext)
16075 +ENTRY(_stext)
16076 +
16077 +/*
16078 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16079 * %esi points to the real-mode code as a 32-bit pointer.
16080 * CS and DS must be 4 GB flat segments, but we don't depend on
16081 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16082 * can.
16083 */
16084 __HEAD
16085 +
16086 +#ifdef CONFIG_PAX_KERNEXEC
16087 + jmp startup_32
16088 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16089 +.fill PAGE_SIZE-5,1,0xcc
16090 +#endif
16091 +
16092 ENTRY(startup_32)
16093 movl pa(stack_start),%ecx
16094
16095 @@ -105,6 +120,57 @@ ENTRY(startup_32)
16096 2:
16097 leal -__PAGE_OFFSET(%ecx),%esp
16098
16099 +#ifdef CONFIG_SMP
16100 + movl $pa(cpu_gdt_table),%edi
16101 + movl $__per_cpu_load,%eax
16102 + movw %ax,__KERNEL_PERCPU + 2(%edi)
16103 + rorl $16,%eax
16104 + movb %al,__KERNEL_PERCPU + 4(%edi)
16105 + movb %ah,__KERNEL_PERCPU + 7(%edi)
16106 + movl $__per_cpu_end - 1,%eax
16107 + subl $__per_cpu_start,%eax
16108 + movw %ax,__KERNEL_PERCPU + 0(%edi)
16109 +#endif
16110 +
16111 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16112 + movl $NR_CPUS,%ecx
16113 + movl $pa(cpu_gdt_table),%edi
16114 +1:
16115 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16116 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16117 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16118 + addl $PAGE_SIZE_asm,%edi
16119 + loop 1b
16120 +#endif
16121 +
16122 +#ifdef CONFIG_PAX_KERNEXEC
16123 + movl $pa(boot_gdt),%edi
16124 + movl $__LOAD_PHYSICAL_ADDR,%eax
16125 + movw %ax,__BOOT_CS + 2(%edi)
16126 + rorl $16,%eax
16127 + movb %al,__BOOT_CS + 4(%edi)
16128 + movb %ah,__BOOT_CS + 7(%edi)
16129 + rorl $16,%eax
16130 +
16131 + ljmp $(__BOOT_CS),$1f
16132 +1:
16133 +
16134 + movl $NR_CPUS,%ecx
16135 + movl $pa(cpu_gdt_table),%edi
16136 + addl $__PAGE_OFFSET,%eax
16137 +1:
16138 + movw %ax,__KERNEL_CS + 2(%edi)
16139 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16140 + rorl $16,%eax
16141 + movb %al,__KERNEL_CS + 4(%edi)
16142 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16143 + movb %ah,__KERNEL_CS + 7(%edi)
16144 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16145 + rorl $16,%eax
16146 + addl $PAGE_SIZE_asm,%edi
16147 + loop 1b
16148 +#endif
16149 +
16150 /*
16151 * Clear BSS first so that there are no surprises...
16152 */
16153 @@ -195,8 +261,11 @@ ENTRY(startup_32)
16154 movl %eax, pa(max_pfn_mapped)
16155
16156 /* Do early initialization of the fixmap area */
16157 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16158 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
16159 +#ifdef CONFIG_COMPAT_VDSO
16160 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
16161 +#else
16162 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
16163 +#endif
16164 #else /* Not PAE */
16165
16166 page_pde_offset = (__PAGE_OFFSET >> 20);
16167 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16168 movl %eax, pa(max_pfn_mapped)
16169
16170 /* Do early initialization of the fixmap area */
16171 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16172 - movl %eax,pa(initial_page_table+0xffc)
16173 +#ifdef CONFIG_COMPAT_VDSO
16174 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
16175 +#else
16176 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
16177 +#endif
16178 #endif
16179
16180 #ifdef CONFIG_PARAVIRT
16181 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16182 cmpl $num_subarch_entries, %eax
16183 jae bad_subarch
16184
16185 - movl pa(subarch_entries)(,%eax,4), %eax
16186 - subl $__PAGE_OFFSET, %eax
16187 - jmp *%eax
16188 + jmp *pa(subarch_entries)(,%eax,4)
16189
16190 bad_subarch:
16191 WEAK(lguest_entry)
16192 @@ -255,10 +325,10 @@ WEAK(xen_entry)
16193 __INITDATA
16194
16195 subarch_entries:
16196 - .long default_entry /* normal x86/PC */
16197 - .long lguest_entry /* lguest hypervisor */
16198 - .long xen_entry /* Xen hypervisor */
16199 - .long default_entry /* Moorestown MID */
16200 + .long ta(default_entry) /* normal x86/PC */
16201 + .long ta(lguest_entry) /* lguest hypervisor */
16202 + .long ta(xen_entry) /* Xen hypervisor */
16203 + .long ta(default_entry) /* Moorestown MID */
16204 num_subarch_entries = (. - subarch_entries) / 4
16205 .previous
16206 #else
16207 @@ -312,6 +382,7 @@ default_entry:
16208 orl %edx,%eax
16209 movl %eax,%cr4
16210
16211 +#ifdef CONFIG_X86_PAE
16212 testb $X86_CR4_PAE, %al # check if PAE is enabled
16213 jz 6f
16214
16215 @@ -340,6 +411,9 @@ default_entry:
16216 /* Make changes effective */
16217 wrmsr
16218
16219 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16220 +#endif
16221 +
16222 6:
16223
16224 /*
16225 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
16226 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16227 movl %eax,%ss # after changing gdt.
16228
16229 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
16230 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
16231 movl %eax,%ds
16232 movl %eax,%es
16233
16234 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
16235 */
16236 cmpb $0,ready
16237 jne 1f
16238 - movl $gdt_page,%eax
16239 + movl $cpu_gdt_table,%eax
16240 movl $stack_canary,%ecx
16241 +#ifdef CONFIG_SMP
16242 + addl $__per_cpu_load,%ecx
16243 +#endif
16244 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
16245 shrl $16, %ecx
16246 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
16247 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
16248 1:
16249 -#endif
16250 movl $(__KERNEL_STACK_CANARY),%eax
16251 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
16252 + movl $(__USER_DS),%eax
16253 +#else
16254 + xorl %eax,%eax
16255 +#endif
16256 movl %eax,%gs
16257
16258 xorl %eax,%eax # Clear LDT
16259 @@ -558,22 +639,22 @@ early_page_fault:
16260 jmp early_fault
16261
16262 early_fault:
16263 - cld
16264 #ifdef CONFIG_PRINTK
16265 + cmpl $1,%ss:early_recursion_flag
16266 + je hlt_loop
16267 + incl %ss:early_recursion_flag
16268 + cld
16269 pusha
16270 movl $(__KERNEL_DS),%eax
16271 movl %eax,%ds
16272 movl %eax,%es
16273 - cmpl $2,early_recursion_flag
16274 - je hlt_loop
16275 - incl early_recursion_flag
16276 movl %cr2,%eax
16277 pushl %eax
16278 pushl %edx /* trapno */
16279 pushl $fault_msg
16280 call printk
16281 +; call dump_stack
16282 #endif
16283 - call dump_stack
16284 hlt_loop:
16285 hlt
16286 jmp hlt_loop
16287 @@ -581,8 +662,11 @@ hlt_loop:
16288 /* This is the default interrupt "handler" :-) */
16289 ALIGN
16290 ignore_int:
16291 - cld
16292 #ifdef CONFIG_PRINTK
16293 + cmpl $2,%ss:early_recursion_flag
16294 + je hlt_loop
16295 + incl %ss:early_recursion_flag
16296 + cld
16297 pushl %eax
16298 pushl %ecx
16299 pushl %edx
16300 @@ -591,9 +675,6 @@ ignore_int:
16301 movl $(__KERNEL_DS),%eax
16302 movl %eax,%ds
16303 movl %eax,%es
16304 - cmpl $2,early_recursion_flag
16305 - je hlt_loop
16306 - incl early_recursion_flag
16307 pushl 16(%esp)
16308 pushl 24(%esp)
16309 pushl 32(%esp)
16310 @@ -622,29 +703,43 @@ ENTRY(initial_code)
16311 /*
16312 * BSS section
16313 */
16314 -__PAGE_ALIGNED_BSS
16315 - .align PAGE_SIZE
16316 #ifdef CONFIG_X86_PAE
16317 +.section .initial_pg_pmd,"a",@progbits
16318 initial_pg_pmd:
16319 .fill 1024*KPMDS,4,0
16320 #else
16321 +.section .initial_page_table,"a",@progbits
16322 ENTRY(initial_page_table)
16323 .fill 1024,4,0
16324 #endif
16325 +.section .initial_pg_fixmap,"a",@progbits
16326 initial_pg_fixmap:
16327 .fill 1024,4,0
16328 +.section .empty_zero_page,"a",@progbits
16329 ENTRY(empty_zero_page)
16330 .fill 4096,1,0
16331 +.section .swapper_pg_dir,"a",@progbits
16332 ENTRY(swapper_pg_dir)
16333 +#ifdef CONFIG_X86_PAE
16334 + .fill 4,8,0
16335 +#else
16336 .fill 1024,4,0
16337 +#endif
16338 +
16339 +/*
16340 + * The IDT has to be page-aligned to simplify the Pentium
16341 + * F0 0F bug workaround.. We have a special link segment
16342 + * for this.
16343 + */
16344 +.section .idt,"a",@progbits
16345 +ENTRY(idt_table)
16346 + .fill 256,8,0
16347
16348 /*
16349 * This starts the data section.
16350 */
16351 #ifdef CONFIG_X86_PAE
16352 -__PAGE_ALIGNED_DATA
16353 - /* Page-aligned for the benefit of paravirt? */
16354 - .align PAGE_SIZE
16355 +.section .initial_page_table,"a",@progbits
16356 ENTRY(initial_page_table)
16357 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
16358 # if KPMDS == 3
16359 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
16360 # error "Kernel PMDs should be 1, 2 or 3"
16361 # endif
16362 .align PAGE_SIZE /* needs to be page-sized too */
16363 +
16364 +#ifdef CONFIG_PAX_PER_CPU_PGD
16365 +ENTRY(cpu_pgd)
16366 + .rept NR_CPUS
16367 + .fill 4,8,0
16368 + .endr
16369 +#endif
16370 +
16371 #endif
16372
16373 .data
16374 .balign 4
16375 ENTRY(stack_start)
16376 - .long init_thread_union+THREAD_SIZE
16377 + .long init_thread_union+THREAD_SIZE-8
16378
16379 +ready: .byte 0
16380 +
16381 +.section .rodata,"a",@progbits
16382 early_recursion_flag:
16383 .long 0
16384
16385 -ready: .byte 0
16386 -
16387 int_msg:
16388 .asciz "Unknown interrupt or fault at: %p %p %p\n"
16389
16390 @@ -707,7 +811,7 @@ fault_msg:
16391 .word 0 # 32 bit align gdt_desc.address
16392 boot_gdt_descr:
16393 .word __BOOT_DS+7
16394 - .long boot_gdt - __PAGE_OFFSET
16395 + .long pa(boot_gdt)
16396
16397 .word 0 # 32-bit align idt_desc.address
16398 idt_descr:
16399 @@ -718,7 +822,7 @@ idt_descr:
16400 .word 0 # 32 bit align gdt_desc.address
16401 ENTRY(early_gdt_descr)
16402 .word GDT_ENTRIES*8-1
16403 - .long gdt_page /* Overwritten for secondary CPUs */
16404 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
16405
16406 /*
16407 * The boot_gdt must mirror the equivalent in setup.S and is
16408 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
16409 .align L1_CACHE_BYTES
16410 ENTRY(boot_gdt)
16411 .fill GDT_ENTRY_BOOT_CS,8,0
16412 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
16413 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
16414 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
16415 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
16416 +
16417 + .align PAGE_SIZE_asm
16418 +ENTRY(cpu_gdt_table)
16419 + .rept NR_CPUS
16420 + .quad 0x0000000000000000 /* NULL descriptor */
16421 + .quad 0x0000000000000000 /* 0x0b reserved */
16422 + .quad 0x0000000000000000 /* 0x13 reserved */
16423 + .quad 0x0000000000000000 /* 0x1b reserved */
16424 +
16425 +#ifdef CONFIG_PAX_KERNEXEC
16426 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
16427 +#else
16428 + .quad 0x0000000000000000 /* 0x20 unused */
16429 +#endif
16430 +
16431 + .quad 0x0000000000000000 /* 0x28 unused */
16432 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
16433 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
16434 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
16435 + .quad 0x0000000000000000 /* 0x4b reserved */
16436 + .quad 0x0000000000000000 /* 0x53 reserved */
16437 + .quad 0x0000000000000000 /* 0x5b reserved */
16438 +
16439 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
16440 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
16441 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
16442 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
16443 +
16444 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
16445 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
16446 +
16447 + /*
16448 + * Segments used for calling PnP BIOS have byte granularity.
16449 + * The code segments and data segments have fixed 64k limits,
16450 + * the transfer segment sizes are set at run time.
16451 + */
16452 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
16453 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
16454 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
16455 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
16456 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
16457 +
16458 + /*
16459 + * The APM segments have byte granularity and their bases
16460 + * are set at run time. All have 64k limits.
16461 + */
16462 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
16463 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
16464 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
16465 +
16466 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
16467 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
16468 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
16469 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
16470 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
16471 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
16472 +
16473 + /* Be sure this is zeroed to avoid false validations in Xen */
16474 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
16475 + .endr
16476 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
16477 index e11e394..9aebc5d 100644
16478 --- a/arch/x86/kernel/head_64.S
16479 +++ b/arch/x86/kernel/head_64.S
16480 @@ -19,6 +19,8 @@
16481 #include <asm/cache.h>
16482 #include <asm/processor-flags.h>
16483 #include <asm/percpu.h>
16484 +#include <asm/cpufeature.h>
16485 +#include <asm/alternative-asm.h>
16486
16487 #ifdef CONFIG_PARAVIRT
16488 #include <asm/asm-offsets.h>
16489 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
16490 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
16491 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
16492 L3_START_KERNEL = pud_index(__START_KERNEL_map)
16493 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
16494 +L3_VMALLOC_START = pud_index(VMALLOC_START)
16495 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
16496 +L3_VMALLOC_END = pud_index(VMALLOC_END)
16497 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
16498 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
16499
16500 .text
16501 __HEAD
16502 @@ -85,35 +93,23 @@ startup_64:
16503 */
16504 addq %rbp, init_level4_pgt + 0(%rip)
16505 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
16506 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
16507 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
16508 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
16509 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
16510
16511 addq %rbp, level3_ident_pgt + 0(%rip)
16512 +#ifndef CONFIG_XEN
16513 + addq %rbp, level3_ident_pgt + 8(%rip)
16514 +#endif
16515
16516 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
16517 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
16518 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
16519 +
16520 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
16521 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
16522
16523 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
16524 -
16525 - /* Add an Identity mapping if I am above 1G */
16526 - leaq _text(%rip), %rdi
16527 - andq $PMD_PAGE_MASK, %rdi
16528 -
16529 - movq %rdi, %rax
16530 - shrq $PUD_SHIFT, %rax
16531 - andq $(PTRS_PER_PUD - 1), %rax
16532 - jz ident_complete
16533 -
16534 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
16535 - leaq level3_ident_pgt(%rip), %rbx
16536 - movq %rdx, 0(%rbx, %rax, 8)
16537 -
16538 - movq %rdi, %rax
16539 - shrq $PMD_SHIFT, %rax
16540 - andq $(PTRS_PER_PMD - 1), %rax
16541 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
16542 - leaq level2_spare_pgt(%rip), %rbx
16543 - movq %rdx, 0(%rbx, %rax, 8)
16544 -ident_complete:
16545 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
16546
16547 /*
16548 * Fixup the kernel text+data virtual addresses. Note that
16549 @@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
16550 * after the boot processor executes this code.
16551 */
16552
16553 - /* Enable PAE mode and PGE */
16554 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
16555 + /* Enable PAE mode and PSE/PGE */
16556 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
16557 movq %rax, %cr4
16558
16559 /* Setup early boot stage 4 level pagetables. */
16560 @@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
16561 movl $MSR_EFER, %ecx
16562 rdmsr
16563 btsl $_EFER_SCE, %eax /* Enable System Call */
16564 - btl $20,%edi /* No Execute supported? */
16565 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
16566 jnc 1f
16567 btsl $_EFER_NX, %eax
16568 + leaq init_level4_pgt(%rip), %rdi
16569 +#ifndef CONFIG_EFI
16570 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
16571 +#endif
16572 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
16573 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
16574 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
16575 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
16576 1: wrmsr /* Make changes effective */
16577
16578 /* Setup cr0 */
16579 @@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
16580 * jump. In addition we need to ensure %cs is set so we make this
16581 * a far return.
16582 */
16583 + pax_set_fptr_mask
16584 movq initial_code(%rip),%rax
16585 pushq $0 # fake return address to stop unwinder
16586 pushq $__KERNEL_CS # set correct cs
16587 @@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
16588 bad_address:
16589 jmp bad_address
16590
16591 - .section ".init.text","ax"
16592 + __INIT
16593 #ifdef CONFIG_EARLY_PRINTK
16594 .globl early_idt_handlers
16595 early_idt_handlers:
16596 @@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
16597 #endif /* EARLY_PRINTK */
16598 1: hlt
16599 jmp 1b
16600 + .previous
16601
16602 #ifdef CONFIG_EARLY_PRINTK
16603 + __INITDATA
16604 early_recursion_flag:
16605 .long 0
16606 + .previous
16607
16608 + .section .rodata,"a",@progbits
16609 early_idt_msg:
16610 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
16611 early_idt_ripmsg:
16612 .asciz "RIP %s\n"
16613 + .previous
16614 #endif /* CONFIG_EARLY_PRINTK */
16615 - .previous
16616
16617 + .section .rodata,"a",@progbits
16618 #define NEXT_PAGE(name) \
16619 .balign PAGE_SIZE; \
16620 ENTRY(name)
16621 @@ -338,7 +348,6 @@ ENTRY(name)
16622 i = i + 1 ; \
16623 .endr
16624
16625 - .data
16626 /*
16627 * This default setting generates an ident mapping at address 0x100000
16628 * and a mapping for the kernel that precisely maps virtual address
16629 @@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
16630 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
16631 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
16632 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
16633 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
16634 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
16635 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
16636 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
16637 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
16638 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
16639 .org init_level4_pgt + L4_START_KERNEL*8, 0
16640 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
16641 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
16642
16643 +#ifdef CONFIG_PAX_PER_CPU_PGD
16644 +NEXT_PAGE(cpu_pgd)
16645 + .rept NR_CPUS
16646 + .fill 512,8,0
16647 + .endr
16648 +#endif
16649 +
16650 NEXT_PAGE(level3_ident_pgt)
16651 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
16652 +#ifdef CONFIG_XEN
16653 .fill 511,8,0
16654 +#else
16655 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
16656 + .fill 510,8,0
16657 +#endif
16658 +
16659 +NEXT_PAGE(level3_vmalloc_start_pgt)
16660 + .fill 512,8,0
16661 +
16662 +NEXT_PAGE(level3_vmalloc_end_pgt)
16663 + .fill 512,8,0
16664 +
16665 +NEXT_PAGE(level3_vmemmap_pgt)
16666 + .fill L3_VMEMMAP_START,8,0
16667 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
16668
16669 NEXT_PAGE(level3_kernel_pgt)
16670 .fill L3_START_KERNEL,8,0
16671 @@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
16672 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
16673 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
16674
16675 +NEXT_PAGE(level2_vmemmap_pgt)
16676 + .fill 512,8,0
16677 +
16678 NEXT_PAGE(level2_fixmap_pgt)
16679 - .fill 506,8,0
16680 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
16681 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
16682 - .fill 5,8,0
16683 + .fill 507,8,0
16684 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
16685 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
16686 + .fill 4,8,0
16687
16688 -NEXT_PAGE(level1_fixmap_pgt)
16689 +NEXT_PAGE(level1_vsyscall_pgt)
16690 .fill 512,8,0
16691
16692 -NEXT_PAGE(level2_ident_pgt)
16693 - /* Since I easily can, map the first 1G.
16694 + /* Since I easily can, map the first 2G.
16695 * Don't set NX because code runs from these pages.
16696 */
16697 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
16698 +NEXT_PAGE(level2_ident_pgt)
16699 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
16700
16701 NEXT_PAGE(level2_kernel_pgt)
16702 /*
16703 @@ -389,33 +429,55 @@ NEXT_PAGE(level2_kernel_pgt)
16704 * If you want to increase this then increase MODULES_VADDR
16705 * too.)
16706 */
16707 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
16708 - KERNEL_IMAGE_SIZE/PMD_SIZE)
16709 -
16710 -NEXT_PAGE(level2_spare_pgt)
16711 - .fill 512, 8, 0
16712 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
16713
16714 #undef PMDS
16715 #undef NEXT_PAGE
16716
16717 - .data
16718 + .align PAGE_SIZE
16719 +ENTRY(cpu_gdt_table)
16720 + .rept NR_CPUS
16721 + .quad 0x0000000000000000 /* NULL descriptor */
16722 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
16723 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
16724 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
16725 + .quad 0x00cffb000000ffff /* __USER32_CS */
16726 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
16727 + .quad 0x00affb000000ffff /* __USER_CS */
16728 +
16729 +#ifdef CONFIG_PAX_KERNEXEC
16730 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
16731 +#else
16732 + .quad 0x0 /* unused */
16733 +#endif
16734 +
16735 + .quad 0,0 /* TSS */
16736 + .quad 0,0 /* LDT */
16737 + .quad 0,0,0 /* three TLS descriptors */
16738 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
16739 + /* asm/segment.h:GDT_ENTRIES must match this */
16740 +
16741 + /* zero the remaining page */
16742 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
16743 + .endr
16744 +
16745 .align 16
16746 .globl early_gdt_descr
16747 early_gdt_descr:
16748 .word GDT_ENTRIES*8-1
16749 early_gdt_descr_base:
16750 - .quad INIT_PER_CPU_VAR(gdt_page)
16751 + .quad cpu_gdt_table
16752
16753 ENTRY(phys_base)
16754 /* This must match the first entry in level2_kernel_pgt */
16755 .quad 0x0000000000000000
16756
16757 #include "../../x86/xen/xen-head.S"
16758 -
16759 - .section .bss, "aw", @nobits
16760 +
16761 + .section .rodata,"a",@progbits
16762 .align L1_CACHE_BYTES
16763 ENTRY(idt_table)
16764 - .skip IDT_ENTRIES * 16
16765 + .fill 512,8,0
16766
16767 __PAGE_ALIGNED_BSS
16768 .align PAGE_SIZE
16769 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
16770 index 9c3bd4a..e1d9b35 100644
16771 --- a/arch/x86/kernel/i386_ksyms_32.c
16772 +++ b/arch/x86/kernel/i386_ksyms_32.c
16773 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
16774 EXPORT_SYMBOL(cmpxchg8b_emu);
16775 #endif
16776
16777 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
16778 +
16779 /* Networking helper routines. */
16780 EXPORT_SYMBOL(csum_partial_copy_generic);
16781 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
16782 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
16783
16784 EXPORT_SYMBOL(__get_user_1);
16785 EXPORT_SYMBOL(__get_user_2);
16786 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
16787
16788 EXPORT_SYMBOL(csum_partial);
16789 EXPORT_SYMBOL(empty_zero_page);
16790 +
16791 +#ifdef CONFIG_PAX_KERNEXEC
16792 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
16793 +#endif
16794 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
16795 index 6104852..6114160 100644
16796 --- a/arch/x86/kernel/i8259.c
16797 +++ b/arch/x86/kernel/i8259.c
16798 @@ -210,7 +210,7 @@ spurious_8259A_irq:
16799 "spurious 8259A interrupt: IRQ%d.\n", irq);
16800 spurious_irq_mask |= irqmask;
16801 }
16802 - atomic_inc(&irq_err_count);
16803 + atomic_inc_unchecked(&irq_err_count);
16804 /*
16805 * Theoretically we do not have to handle this IRQ,
16806 * but in Linux this does not cause problems and is
16807 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
16808 index 43e9ccf..44ccf6f 100644
16809 --- a/arch/x86/kernel/init_task.c
16810 +++ b/arch/x86/kernel/init_task.c
16811 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
16812 * way process stacks are handled. This is done by having a special
16813 * "init_task" linker map entry..
16814 */
16815 -union thread_union init_thread_union __init_task_data =
16816 - { INIT_THREAD_INFO(init_task) };
16817 +union thread_union init_thread_union __init_task_data;
16818
16819 /*
16820 * Initial task structure.
16821 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
16822 * section. Since TSS's are completely CPU-local, we want them
16823 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
16824 */
16825 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
16826 -
16827 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
16828 +EXPORT_SYMBOL(init_tss);
16829 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
16830 index 8c96897..be66bfa 100644
16831 --- a/arch/x86/kernel/ioport.c
16832 +++ b/arch/x86/kernel/ioport.c
16833 @@ -6,6 +6,7 @@
16834 #include <linux/sched.h>
16835 #include <linux/kernel.h>
16836 #include <linux/capability.h>
16837 +#include <linux/security.h>
16838 #include <linux/errno.h>
16839 #include <linux/types.h>
16840 #include <linux/ioport.h>
16841 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
16842
16843 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
16844 return -EINVAL;
16845 +#ifdef CONFIG_GRKERNSEC_IO
16846 + if (turn_on && grsec_disable_privio) {
16847 + gr_handle_ioperm();
16848 + return -EPERM;
16849 + }
16850 +#endif
16851 if (turn_on && !capable(CAP_SYS_RAWIO))
16852 return -EPERM;
16853
16854 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
16855 * because the ->io_bitmap_max value must match the bitmap
16856 * contents:
16857 */
16858 - tss = &per_cpu(init_tss, get_cpu());
16859 + tss = init_tss + get_cpu();
16860
16861 if (turn_on)
16862 bitmap_clear(t->io_bitmap_ptr, from, num);
16863 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
16864 return -EINVAL;
16865 /* Trying to gain more privileges? */
16866 if (level > old) {
16867 +#ifdef CONFIG_GRKERNSEC_IO
16868 + if (grsec_disable_privio) {
16869 + gr_handle_iopl();
16870 + return -EPERM;
16871 + }
16872 +#endif
16873 if (!capable(CAP_SYS_RAWIO))
16874 return -EPERM;
16875 }
16876 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
16877 index 429e0c9..17b3ece 100644
16878 --- a/arch/x86/kernel/irq.c
16879 +++ b/arch/x86/kernel/irq.c
16880 @@ -18,7 +18,7 @@
16881 #include <asm/mce.h>
16882 #include <asm/hw_irq.h>
16883
16884 -atomic_t irq_err_count;
16885 +atomic_unchecked_t irq_err_count;
16886
16887 /* Function pointer for generic interrupt vector handling */
16888 void (*x86_platform_ipi_callback)(void) = NULL;
16889 @@ -117,9 +117,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
16890 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
16891 seq_printf(p, " Machine check polls\n");
16892 #endif
16893 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
16894 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
16895 #if defined(CONFIG_X86_IO_APIC)
16896 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
16897 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
16898 #endif
16899 return 0;
16900 }
16901 @@ -159,10 +159,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
16902
16903 u64 arch_irq_stat(void)
16904 {
16905 - u64 sum = atomic_read(&irq_err_count);
16906 + u64 sum = atomic_read_unchecked(&irq_err_count);
16907
16908 #ifdef CONFIG_X86_IO_APIC
16909 - sum += atomic_read(&irq_mis_count);
16910 + sum += atomic_read_unchecked(&irq_mis_count);
16911 #endif
16912 return sum;
16913 }
16914 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
16915 index 7209070..cbcd71a 100644
16916 --- a/arch/x86/kernel/irq_32.c
16917 +++ b/arch/x86/kernel/irq_32.c
16918 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
16919 __asm__ __volatile__("andl %%esp,%0" :
16920 "=r" (sp) : "0" (THREAD_SIZE - 1));
16921
16922 - return sp < (sizeof(struct thread_info) + STACK_WARN);
16923 + return sp < STACK_WARN;
16924 }
16925
16926 static void print_stack_overflow(void)
16927 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(void) { }
16928 * per-CPU IRQ handling contexts (thread information and stack)
16929 */
16930 union irq_ctx {
16931 - struct thread_info tinfo;
16932 - u32 stack[THREAD_SIZE/sizeof(u32)];
16933 + unsigned long previous_esp;
16934 + u32 stack[THREAD_SIZE/sizeof(u32)];
16935 } __attribute__((aligned(THREAD_SIZE)));
16936
16937 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
16938 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, void *stack)
16939 static inline int
16940 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
16941 {
16942 - union irq_ctx *curctx, *irqctx;
16943 + union irq_ctx *irqctx;
16944 u32 *isp, arg1, arg2;
16945
16946 - curctx = (union irq_ctx *) current_thread_info();
16947 irqctx = __this_cpu_read(hardirq_ctx);
16948
16949 /*
16950 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
16951 * handler) we can't do that and just have to keep using the
16952 * current stack (which is the irq stack already after all)
16953 */
16954 - if (unlikely(curctx == irqctx))
16955 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
16956 return 0;
16957
16958 /* build the stack frame on the IRQ stack */
16959 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
16960 - irqctx->tinfo.task = curctx->tinfo.task;
16961 - irqctx->tinfo.previous_esp = current_stack_pointer;
16962 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
16963 + irqctx->previous_esp = current_stack_pointer;
16964
16965 - /*
16966 - * Copy the softirq bits in preempt_count so that the
16967 - * softirq checks work in the hardirq context.
16968 - */
16969 - irqctx->tinfo.preempt_count =
16970 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
16971 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
16972 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16973 + __set_fs(MAKE_MM_SEG(0));
16974 +#endif
16975
16976 if (unlikely(overflow))
16977 call_on_stack(print_stack_overflow, isp);
16978 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
16979 : "0" (irq), "1" (desc), "2" (isp),
16980 "D" (desc->handle_irq)
16981 : "memory", "cc", "ecx");
16982 +
16983 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16984 + __set_fs(current_thread_info()->addr_limit);
16985 +#endif
16986 +
16987 return 1;
16988 }
16989
16990 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
16991 */
16992 void __cpuinit irq_ctx_init(int cpu)
16993 {
16994 - union irq_ctx *irqctx;
16995 -
16996 if (per_cpu(hardirq_ctx, cpu))
16997 return;
16998
16999 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17000 - THREAD_FLAGS,
17001 - THREAD_ORDER));
17002 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17003 - irqctx->tinfo.cpu = cpu;
17004 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17005 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17006 -
17007 - per_cpu(hardirq_ctx, cpu) = irqctx;
17008 -
17009 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17010 - THREAD_FLAGS,
17011 - THREAD_ORDER));
17012 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17013 - irqctx->tinfo.cpu = cpu;
17014 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17015 -
17016 - per_cpu(softirq_ctx, cpu) = irqctx;
17017 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17018 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17019
17020 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17021 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17022 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
17023 asmlinkage void do_softirq(void)
17024 {
17025 unsigned long flags;
17026 - struct thread_info *curctx;
17027 union irq_ctx *irqctx;
17028 u32 *isp;
17029
17030 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
17031 local_irq_save(flags);
17032
17033 if (local_softirq_pending()) {
17034 - curctx = current_thread_info();
17035 irqctx = __this_cpu_read(softirq_ctx);
17036 - irqctx->tinfo.task = curctx->task;
17037 - irqctx->tinfo.previous_esp = current_stack_pointer;
17038 + irqctx->previous_esp = current_stack_pointer;
17039
17040 /* build the stack frame on the softirq stack */
17041 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17042 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17043 +
17044 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17045 + __set_fs(MAKE_MM_SEG(0));
17046 +#endif
17047
17048 call_on_stack(__do_softirq, isp);
17049 +
17050 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17051 + __set_fs(current_thread_info()->addr_limit);
17052 +#endif
17053 +
17054 /*
17055 * Shouldn't happen, we returned above if in_interrupt():
17056 */
17057 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
17058 index 69bca46..0bac999 100644
17059 --- a/arch/x86/kernel/irq_64.c
17060 +++ b/arch/x86/kernel/irq_64.c
17061 @@ -38,7 +38,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
17062 #ifdef CONFIG_DEBUG_STACKOVERFLOW
17063 u64 curbase = (u64)task_stack_page(current);
17064
17065 - if (user_mode_vm(regs))
17066 + if (user_mode(regs))
17067 return;
17068
17069 WARN_ONCE(regs->sp >= curbase &&
17070 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17071 index faba577..93b9e71 100644
17072 --- a/arch/x86/kernel/kgdb.c
17073 +++ b/arch/x86/kernel/kgdb.c
17074 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
17075 #ifdef CONFIG_X86_32
17076 switch (regno) {
17077 case GDB_SS:
17078 - if (!user_mode_vm(regs))
17079 + if (!user_mode(regs))
17080 *(unsigned long *)mem = __KERNEL_DS;
17081 break;
17082 case GDB_SP:
17083 - if (!user_mode_vm(regs))
17084 + if (!user_mode(regs))
17085 *(unsigned long *)mem = kernel_stack_pointer(regs);
17086 break;
17087 case GDB_GS:
17088 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17089 case 'k':
17090 /* clear the trace bit */
17091 linux_regs->flags &= ~X86_EFLAGS_TF;
17092 - atomic_set(&kgdb_cpu_doing_single_step, -1);
17093 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17094
17095 /* set the trace bit if we're stepping */
17096 if (remcomInBuffer[0] == 's') {
17097 linux_regs->flags |= X86_EFLAGS_TF;
17098 - atomic_set(&kgdb_cpu_doing_single_step,
17099 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17100 raw_smp_processor_id());
17101 }
17102
17103 @@ -543,7 +543,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17104
17105 switch (cmd) {
17106 case DIE_DEBUG:
17107 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
17108 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
17109 if (user_mode(regs))
17110 return single_step_cont(regs, args);
17111 break;
17112 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17113 index 7da647d..56fe348 100644
17114 --- a/arch/x86/kernel/kprobes.c
17115 +++ b/arch/x86/kernel/kprobes.c
17116 @@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
17117 } __attribute__((packed)) *insn;
17118
17119 insn = (struct __arch_relative_insn *)from;
17120 +
17121 + pax_open_kernel();
17122 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
17123 insn->op = op;
17124 + pax_close_kernel();
17125 }
17126
17127 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
17128 @@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
17129 kprobe_opcode_t opcode;
17130 kprobe_opcode_t *orig_opcodes = opcodes;
17131
17132 - if (search_exception_tables((unsigned long)opcodes))
17133 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17134 return 0; /* Page fault may occur on this address. */
17135
17136 retry:
17137 @@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
17138 }
17139 }
17140 insn_get_length(&insn);
17141 + pax_open_kernel();
17142 memcpy(dest, insn.kaddr, insn.length);
17143 + pax_close_kernel();
17144
17145 #ifdef CONFIG_X86_64
17146 if (insn_rip_relative(&insn)) {
17147 @@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
17148 (u8 *) dest;
17149 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
17150 disp = (u8 *) dest + insn_offset_displacement(&insn);
17151 + pax_open_kernel();
17152 *(s32 *) disp = (s32) newdisp;
17153 + pax_close_kernel();
17154 }
17155 #endif
17156 return insn.length;
17157 @@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
17158 */
17159 __copy_instruction(p->ainsn.insn, p->addr, 0);
17160
17161 - if (can_boost(p->addr))
17162 + if (can_boost(ktla_ktva(p->addr)))
17163 p->ainsn.boostable = 0;
17164 else
17165 p->ainsn.boostable = -1;
17166
17167 - p->opcode = *p->addr;
17168 + p->opcode = *(ktla_ktva(p->addr));
17169 }
17170
17171 int __kprobes arch_prepare_kprobe(struct kprobe *p)
17172 @@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17173 * nor set current_kprobe, because it doesn't use single
17174 * stepping.
17175 */
17176 - regs->ip = (unsigned long)p->ainsn.insn;
17177 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17178 preempt_enable_no_resched();
17179 return;
17180 }
17181 @@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17182 if (p->opcode == BREAKPOINT_INSTRUCTION)
17183 regs->ip = (unsigned long)p->addr;
17184 else
17185 - regs->ip = (unsigned long)p->ainsn.insn;
17186 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17187 }
17188
17189 /*
17190 @@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17191 setup_singlestep(p, regs, kcb, 0);
17192 return 1;
17193 }
17194 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
17195 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17196 /*
17197 * The breakpoint instruction was removed right
17198 * after we hit it. Another cpu has removed
17199 @@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17200 " movq %rax, 152(%rsp)\n"
17201 RESTORE_REGS_STRING
17202 " popfq\n"
17203 +#ifdef KERNEXEC_PLUGIN
17204 + " btsq $63,(%rsp)\n"
17205 +#endif
17206 #else
17207 " pushf\n"
17208 SAVE_REGS_STRING
17209 @@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
17210 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
17211 {
17212 unsigned long *tos = stack_addr(regs);
17213 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
17214 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
17215 unsigned long orig_ip = (unsigned long)p->addr;
17216 kprobe_opcode_t *insn = p->ainsn.insn;
17217
17218 @@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
17219 struct die_args *args = data;
17220 int ret = NOTIFY_DONE;
17221
17222 - if (args->regs && user_mode_vm(args->regs))
17223 + if (args->regs && user_mode(args->regs))
17224 return ret;
17225
17226 switch (val) {
17227 @@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17228 * Verify if the address gap is in 2GB range, because this uses
17229 * a relative jump.
17230 */
17231 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
17232 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
17233 if (abs(rel) > 0x7fffffff)
17234 return -ERANGE;
17235
17236 @@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17237 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
17238
17239 /* Set probe function call */
17240 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
17241 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
17242
17243 /* Set returning jmp instruction at the tail of out-of-line buffer */
17244 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
17245 - (u8 *)op->kp.addr + op->optinsn.size);
17246 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
17247
17248 flush_icache_range((unsigned long) buf,
17249 (unsigned long) buf + TMPL_END_IDX +
17250 @@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
17251 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
17252
17253 /* Backup instructions which will be replaced by jump address */
17254 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
17255 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
17256 RELATIVE_ADDR_SIZE);
17257
17258 insn_buf[0] = RELATIVEJUMP_OPCODE;
17259 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
17260 index a9c2116..a52d4fc 100644
17261 --- a/arch/x86/kernel/kvm.c
17262 +++ b/arch/x86/kernel/kvm.c
17263 @@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void)
17264 pv_mmu_ops.set_pud = kvm_set_pud;
17265 #if PAGETABLE_LEVELS == 4
17266 pv_mmu_ops.set_pgd = kvm_set_pgd;
17267 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
17268 #endif
17269 #endif
17270 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
17271 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
17272 index ea69726..604d066 100644
17273 --- a/arch/x86/kernel/ldt.c
17274 +++ b/arch/x86/kernel/ldt.c
17275 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
17276 if (reload) {
17277 #ifdef CONFIG_SMP
17278 preempt_disable();
17279 - load_LDT(pc);
17280 + load_LDT_nolock(pc);
17281 if (!cpumask_equal(mm_cpumask(current->mm),
17282 cpumask_of(smp_processor_id())))
17283 smp_call_function(flush_ldt, current->mm, 1);
17284 preempt_enable();
17285 #else
17286 - load_LDT(pc);
17287 + load_LDT_nolock(pc);
17288 #endif
17289 }
17290 if (oldsize) {
17291 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
17292 return err;
17293
17294 for (i = 0; i < old->size; i++)
17295 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
17296 + write_ldt_entry(new->ldt, i, old->ldt + i);
17297 return 0;
17298 }
17299
17300 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
17301 retval = copy_ldt(&mm->context, &old_mm->context);
17302 mutex_unlock(&old_mm->context.lock);
17303 }
17304 +
17305 + if (tsk == current) {
17306 + mm->context.vdso = 0;
17307 +
17308 +#ifdef CONFIG_X86_32
17309 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17310 + mm->context.user_cs_base = 0UL;
17311 + mm->context.user_cs_limit = ~0UL;
17312 +
17313 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17314 + cpus_clear(mm->context.cpu_user_cs_mask);
17315 +#endif
17316 +
17317 +#endif
17318 +#endif
17319 +
17320 + }
17321 +
17322 return retval;
17323 }
17324
17325 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
17326 }
17327 }
17328
17329 +#ifdef CONFIG_PAX_SEGMEXEC
17330 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
17331 + error = -EINVAL;
17332 + goto out_unlock;
17333 + }
17334 +#endif
17335 +
17336 fill_ldt(&ldt, &ldt_info);
17337 if (oldmode)
17338 ldt.avl = 0;
17339 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
17340 index a3fa43b..8966f4c 100644
17341 --- a/arch/x86/kernel/machine_kexec_32.c
17342 +++ b/arch/x86/kernel/machine_kexec_32.c
17343 @@ -27,7 +27,7 @@
17344 #include <asm/cacheflush.h>
17345 #include <asm/debugreg.h>
17346
17347 -static void set_idt(void *newidt, __u16 limit)
17348 +static void set_idt(struct desc_struct *newidt, __u16 limit)
17349 {
17350 struct desc_ptr curidt;
17351
17352 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
17353 }
17354
17355
17356 -static void set_gdt(void *newgdt, __u16 limit)
17357 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
17358 {
17359 struct desc_ptr curgdt;
17360
17361 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
17362 }
17363
17364 control_page = page_address(image->control_code_page);
17365 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
17366 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
17367
17368 relocate_kernel_ptr = control_page;
17369 page_list[PA_CONTROL_PAGE] = __pa(control_page);
17370 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
17371 index 3ca42d0..7cff8cc 100644
17372 --- a/arch/x86/kernel/microcode_intel.c
17373 +++ b/arch/x86/kernel/microcode_intel.c
17374 @@ -436,13 +436,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
17375
17376 static int get_ucode_user(void *to, const void *from, size_t n)
17377 {
17378 - return copy_from_user(to, from, n);
17379 + return copy_from_user(to, (const void __force_user *)from, n);
17380 }
17381
17382 static enum ucode_state
17383 request_microcode_user(int cpu, const void __user *buf, size_t size)
17384 {
17385 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
17386 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
17387 }
17388
17389 static void microcode_fini_cpu(int cpu)
17390 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
17391 index 925179f..267ac7a 100644
17392 --- a/arch/x86/kernel/module.c
17393 +++ b/arch/x86/kernel/module.c
17394 @@ -36,15 +36,60 @@
17395 #define DEBUGP(fmt...)
17396 #endif
17397
17398 -void *module_alloc(unsigned long size)
17399 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
17400 {
17401 - if (PAGE_ALIGN(size) > MODULES_LEN)
17402 + if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
17403 return NULL;
17404 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
17405 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
17406 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
17407 -1, __builtin_return_address(0));
17408 }
17409
17410 +void *module_alloc(unsigned long size)
17411 +{
17412 +
17413 +#ifdef CONFIG_PAX_KERNEXEC
17414 + return __module_alloc(size, PAGE_KERNEL);
17415 +#else
17416 + return __module_alloc(size, PAGE_KERNEL_EXEC);
17417 +#endif
17418 +
17419 +}
17420 +
17421 +#ifdef CONFIG_PAX_KERNEXEC
17422 +#ifdef CONFIG_X86_32
17423 +void *module_alloc_exec(unsigned long size)
17424 +{
17425 + struct vm_struct *area;
17426 +
17427 + if (size == 0)
17428 + return NULL;
17429 +
17430 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
17431 + return area ? area->addr : NULL;
17432 +}
17433 +EXPORT_SYMBOL(module_alloc_exec);
17434 +
17435 +void module_free_exec(struct module *mod, void *module_region)
17436 +{
17437 + vunmap(module_region);
17438 +}
17439 +EXPORT_SYMBOL(module_free_exec);
17440 +#else
17441 +void module_free_exec(struct module *mod, void *module_region)
17442 +{
17443 + module_free(mod, module_region);
17444 +}
17445 +EXPORT_SYMBOL(module_free_exec);
17446 +
17447 +void *module_alloc_exec(unsigned long size)
17448 +{
17449 + return __module_alloc(size, PAGE_KERNEL_RX);
17450 +}
17451 +EXPORT_SYMBOL(module_alloc_exec);
17452 +#endif
17453 +#endif
17454 +
17455 #ifdef CONFIG_X86_32
17456 int apply_relocate(Elf32_Shdr *sechdrs,
17457 const char *strtab,
17458 @@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
17459 unsigned int i;
17460 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
17461 Elf32_Sym *sym;
17462 - uint32_t *location;
17463 + uint32_t *plocation, location;
17464
17465 DEBUGP("Applying relocate section %u to %u\n", relsec,
17466 sechdrs[relsec].sh_info);
17467 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
17468 /* This is where to make the change */
17469 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
17470 - + rel[i].r_offset;
17471 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
17472 + location = (uint32_t)plocation;
17473 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
17474 + plocation = ktla_ktva((void *)plocation);
17475 /* This is the symbol it is referring to. Note that all
17476 undefined symbols have been resolved. */
17477 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
17478 @@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
17479 switch (ELF32_R_TYPE(rel[i].r_info)) {
17480 case R_386_32:
17481 /* We add the value into the location given */
17482 - *location += sym->st_value;
17483 + pax_open_kernel();
17484 + *plocation += sym->st_value;
17485 + pax_close_kernel();
17486 break;
17487 case R_386_PC32:
17488 /* Add the value, subtract its postition */
17489 - *location += sym->st_value - (uint32_t)location;
17490 + pax_open_kernel();
17491 + *plocation += sym->st_value - location;
17492 + pax_close_kernel();
17493 break;
17494 default:
17495 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
17496 @@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
17497 case R_X86_64_NONE:
17498 break;
17499 case R_X86_64_64:
17500 + pax_open_kernel();
17501 *(u64 *)loc = val;
17502 + pax_close_kernel();
17503 break;
17504 case R_X86_64_32:
17505 + pax_open_kernel();
17506 *(u32 *)loc = val;
17507 + pax_close_kernel();
17508 if (val != *(u32 *)loc)
17509 goto overflow;
17510 break;
17511 case R_X86_64_32S:
17512 + pax_open_kernel();
17513 *(s32 *)loc = val;
17514 + pax_close_kernel();
17515 if ((s64)val != *(s32 *)loc)
17516 goto overflow;
17517 break;
17518 case R_X86_64_PC32:
17519 val -= (u64)loc;
17520 + pax_open_kernel();
17521 *(u32 *)loc = val;
17522 + pax_close_kernel();
17523 +
17524 #if 0
17525 if ((s64)val != *(s32 *)loc)
17526 goto overflow;
17527 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
17528 index e88f37b..1353db6 100644
17529 --- a/arch/x86/kernel/nmi.c
17530 +++ b/arch/x86/kernel/nmi.c
17531 @@ -408,6 +408,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
17532 dotraplinkage notrace __kprobes void
17533 do_nmi(struct pt_regs *regs, long error_code)
17534 {
17535 +
17536 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17537 + if (!user_mode(regs)) {
17538 + unsigned long cs = regs->cs & 0xFFFF;
17539 + unsigned long ip = ktva_ktla(regs->ip);
17540 +
17541 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17542 + regs->ip = ip;
17543 + }
17544 +#endif
17545 +
17546 nmi_enter();
17547
17548 inc_irq_stat(__nmi_count);
17549 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
17550 index 676b8c7..870ba04 100644
17551 --- a/arch/x86/kernel/paravirt-spinlocks.c
17552 +++ b/arch/x86/kernel/paravirt-spinlocks.c
17553 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
17554 arch_spin_lock(lock);
17555 }
17556
17557 -struct pv_lock_ops pv_lock_ops = {
17558 +struct pv_lock_ops pv_lock_ops __read_only = {
17559 #ifdef CONFIG_SMP
17560 .spin_is_locked = __ticket_spin_is_locked,
17561 .spin_is_contended = __ticket_spin_is_contended,
17562 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
17563 index d90272e..6bb013b 100644
17564 --- a/arch/x86/kernel/paravirt.c
17565 +++ b/arch/x86/kernel/paravirt.c
17566 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
17567 {
17568 return x;
17569 }
17570 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
17571 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
17572 +#endif
17573
17574 void __init default_banner(void)
17575 {
17576 @@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
17577 if (opfunc == NULL)
17578 /* If there's no function, patch it with a ud2a (BUG) */
17579 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
17580 - else if (opfunc == _paravirt_nop)
17581 + else if (opfunc == (void *)_paravirt_nop)
17582 /* If the operation is a nop, then nop the callsite */
17583 ret = paravirt_patch_nop();
17584
17585 /* identity functions just return their single argument */
17586 - else if (opfunc == _paravirt_ident_32)
17587 + else if (opfunc == (void *)_paravirt_ident_32)
17588 ret = paravirt_patch_ident_32(insnbuf, len);
17589 - else if (opfunc == _paravirt_ident_64)
17590 + else if (opfunc == (void *)_paravirt_ident_64)
17591 ret = paravirt_patch_ident_64(insnbuf, len);
17592 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
17593 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
17594 + ret = paravirt_patch_ident_64(insnbuf, len);
17595 +#endif
17596
17597 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
17598 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
17599 @@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
17600 if (insn_len > len || start == NULL)
17601 insn_len = len;
17602 else
17603 - memcpy(insnbuf, start, insn_len);
17604 + memcpy(insnbuf, ktla_ktva(start), insn_len);
17605
17606 return insn_len;
17607 }
17608 @@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
17609 preempt_enable();
17610 }
17611
17612 -struct pv_info pv_info = {
17613 +struct pv_info pv_info __read_only = {
17614 .name = "bare hardware",
17615 .paravirt_enabled = 0,
17616 .kernel_rpl = 0,
17617 @@ -313,16 +320,16 @@ struct pv_info pv_info = {
17618 #endif
17619 };
17620
17621 -struct pv_init_ops pv_init_ops = {
17622 +struct pv_init_ops pv_init_ops __read_only = {
17623 .patch = native_patch,
17624 };
17625
17626 -struct pv_time_ops pv_time_ops = {
17627 +struct pv_time_ops pv_time_ops __read_only = {
17628 .sched_clock = native_sched_clock,
17629 .steal_clock = native_steal_clock,
17630 };
17631
17632 -struct pv_irq_ops pv_irq_ops = {
17633 +struct pv_irq_ops pv_irq_ops __read_only = {
17634 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
17635 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
17636 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
17637 @@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
17638 #endif
17639 };
17640
17641 -struct pv_cpu_ops pv_cpu_ops = {
17642 +struct pv_cpu_ops pv_cpu_ops __read_only = {
17643 .cpuid = native_cpuid,
17644 .get_debugreg = native_get_debugreg,
17645 .set_debugreg = native_set_debugreg,
17646 @@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
17647 .end_context_switch = paravirt_nop,
17648 };
17649
17650 -struct pv_apic_ops pv_apic_ops = {
17651 +struct pv_apic_ops pv_apic_ops __read_only = {
17652 #ifdef CONFIG_X86_LOCAL_APIC
17653 .startup_ipi_hook = paravirt_nop,
17654 #endif
17655 };
17656
17657 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
17658 +#ifdef CONFIG_X86_32
17659 +#ifdef CONFIG_X86_PAE
17660 +/* 64-bit pagetable entries */
17661 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
17662 +#else
17663 /* 32-bit pagetable entries */
17664 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
17665 +#endif
17666 #else
17667 /* 64-bit pagetable entries */
17668 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
17669 #endif
17670
17671 -struct pv_mmu_ops pv_mmu_ops = {
17672 +struct pv_mmu_ops pv_mmu_ops __read_only = {
17673
17674 .read_cr2 = native_read_cr2,
17675 .write_cr2 = native_write_cr2,
17676 @@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
17677 .make_pud = PTE_IDENT,
17678
17679 .set_pgd = native_set_pgd,
17680 + .set_pgd_batched = native_set_pgd_batched,
17681 #endif
17682 #endif /* PAGETABLE_LEVELS >= 3 */
17683
17684 @@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
17685 },
17686
17687 .set_fixmap = native_set_fixmap,
17688 +
17689 +#ifdef CONFIG_PAX_KERNEXEC
17690 + .pax_open_kernel = native_pax_open_kernel,
17691 + .pax_close_kernel = native_pax_close_kernel,
17692 +#endif
17693 +
17694 };
17695
17696 EXPORT_SYMBOL_GPL(pv_time_ops);
17697 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
17698 index 35ccf75..7a15747 100644
17699 --- a/arch/x86/kernel/pci-iommu_table.c
17700 +++ b/arch/x86/kernel/pci-iommu_table.c
17701 @@ -2,7 +2,7 @@
17702 #include <asm/iommu_table.h>
17703 #include <linux/string.h>
17704 #include <linux/kallsyms.h>
17705 -
17706 +#include <linux/sched.h>
17707
17708 #define DEBUG 1
17709
17710 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
17711 index ee5d4fb..426649b 100644
17712 --- a/arch/x86/kernel/process.c
17713 +++ b/arch/x86/kernel/process.c
17714 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
17715
17716 void free_thread_info(struct thread_info *ti)
17717 {
17718 - free_thread_xstate(ti->task);
17719 free_pages((unsigned long)ti, THREAD_ORDER);
17720 }
17721
17722 +static struct kmem_cache *task_struct_cachep;
17723 +
17724 void arch_task_cache_init(void)
17725 {
17726 - task_xstate_cachep =
17727 - kmem_cache_create("task_xstate", xstate_size,
17728 + /* create a slab on which task_structs can be allocated */
17729 + task_struct_cachep =
17730 + kmem_cache_create("task_struct", sizeof(struct task_struct),
17731 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
17732 +
17733 + task_xstate_cachep =
17734 + kmem_cache_create("task_xstate", xstate_size,
17735 __alignof__(union thread_xstate),
17736 - SLAB_PANIC | SLAB_NOTRACK, NULL);
17737 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
17738 +}
17739 +
17740 +struct task_struct *alloc_task_struct_node(int node)
17741 +{
17742 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
17743 +}
17744 +
17745 +void free_task_struct(struct task_struct *task)
17746 +{
17747 + free_thread_xstate(task);
17748 + kmem_cache_free(task_struct_cachep, task);
17749 }
17750
17751 /*
17752 @@ -70,7 +87,7 @@ void exit_thread(void)
17753 unsigned long *bp = t->io_bitmap_ptr;
17754
17755 if (bp) {
17756 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
17757 + struct tss_struct *tss = init_tss + get_cpu();
17758
17759 t->io_bitmap_ptr = NULL;
17760 clear_thread_flag(TIF_IO_BITMAP);
17761 @@ -106,7 +123,7 @@ void show_regs_common(void)
17762
17763 printk(KERN_CONT "\n");
17764 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
17765 - current->pid, current->comm, print_tainted(),
17766 + task_pid_nr(current), current->comm, print_tainted(),
17767 init_utsname()->release,
17768 (int)strcspn(init_utsname()->version, " "),
17769 init_utsname()->version);
17770 @@ -120,6 +137,9 @@ void flush_thread(void)
17771 {
17772 struct task_struct *tsk = current;
17773
17774 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
17775 + loadsegment(gs, 0);
17776 +#endif
17777 flush_ptrace_hw_breakpoint(tsk);
17778 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
17779 /*
17780 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
17781 regs.di = (unsigned long) arg;
17782
17783 #ifdef CONFIG_X86_32
17784 - regs.ds = __USER_DS;
17785 - regs.es = __USER_DS;
17786 + regs.ds = __KERNEL_DS;
17787 + regs.es = __KERNEL_DS;
17788 regs.fs = __KERNEL_PERCPU;
17789 - regs.gs = __KERNEL_STACK_CANARY;
17790 + savesegment(gs, regs.gs);
17791 #else
17792 regs.ss = __KERNEL_DS;
17793 #endif
17794 @@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
17795
17796 return ret;
17797 }
17798 -void stop_this_cpu(void *dummy)
17799 +__noreturn void stop_this_cpu(void *dummy)
17800 {
17801 local_irq_disable();
17802 /*
17803 @@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
17804 }
17805 early_param("idle", idle_setup);
17806
17807 -unsigned long arch_align_stack(unsigned long sp)
17808 +#ifdef CONFIG_PAX_RANDKSTACK
17809 +void pax_randomize_kstack(struct pt_regs *regs)
17810 {
17811 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
17812 - sp -= get_random_int() % 8192;
17813 - return sp & ~0xf;
17814 -}
17815 + struct thread_struct *thread = &current->thread;
17816 + unsigned long time;
17817
17818 -unsigned long arch_randomize_brk(struct mm_struct *mm)
17819 -{
17820 - unsigned long range_end = mm->brk + 0x02000000;
17821 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
17822 -}
17823 + if (!randomize_va_space)
17824 + return;
17825 +
17826 + if (v8086_mode(regs))
17827 + return;
17828
17829 + rdtscl(time);
17830 +
17831 + /* P4 seems to return a 0 LSB, ignore it */
17832 +#ifdef CONFIG_MPENTIUM4
17833 + time &= 0x3EUL;
17834 + time <<= 2;
17835 +#elif defined(CONFIG_X86_64)
17836 + time &= 0xFUL;
17837 + time <<= 4;
17838 +#else
17839 + time &= 0x1FUL;
17840 + time <<= 3;
17841 +#endif
17842 +
17843 + thread->sp0 ^= time;
17844 + load_sp0(init_tss + smp_processor_id(), thread);
17845 +
17846 +#ifdef CONFIG_X86_64
17847 + percpu_write(kernel_stack, thread->sp0);
17848 +#endif
17849 +}
17850 +#endif
17851 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
17852 index 8598296..bfadef0 100644
17853 --- a/arch/x86/kernel/process_32.c
17854 +++ b/arch/x86/kernel/process_32.c
17855 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
17856 unsigned long thread_saved_pc(struct task_struct *tsk)
17857 {
17858 return ((unsigned long *)tsk->thread.sp)[3];
17859 +//XXX return tsk->thread.eip;
17860 }
17861
17862 #ifndef CONFIG_SMP
17863 @@ -130,15 +131,14 @@ void __show_regs(struct pt_regs *regs, int all)
17864 unsigned long sp;
17865 unsigned short ss, gs;
17866
17867 - if (user_mode_vm(regs)) {
17868 + if (user_mode(regs)) {
17869 sp = regs->sp;
17870 ss = regs->ss & 0xffff;
17871 - gs = get_user_gs(regs);
17872 } else {
17873 sp = kernel_stack_pointer(regs);
17874 savesegment(ss, ss);
17875 - savesegment(gs, gs);
17876 }
17877 + gs = get_user_gs(regs);
17878
17879 show_regs_common();
17880
17881 @@ -200,13 +200,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
17882 struct task_struct *tsk;
17883 int err;
17884
17885 - childregs = task_pt_regs(p);
17886 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
17887 *childregs = *regs;
17888 childregs->ax = 0;
17889 childregs->sp = sp;
17890
17891 p->thread.sp = (unsigned long) childregs;
17892 p->thread.sp0 = (unsigned long) (childregs+1);
17893 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
17894
17895 p->thread.ip = (unsigned long) ret_from_fork;
17896
17897 @@ -296,7 +297,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17898 struct thread_struct *prev = &prev_p->thread,
17899 *next = &next_p->thread;
17900 int cpu = smp_processor_id();
17901 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
17902 + struct tss_struct *tss = init_tss + cpu;
17903 fpu_switch_t fpu;
17904
17905 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
17906 @@ -320,6 +321,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17907 */
17908 lazy_save_gs(prev->gs);
17909
17910 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17911 + __set_fs(task_thread_info(next_p)->addr_limit);
17912 +#endif
17913 +
17914 /*
17915 * Load the per-thread Thread-Local Storage descriptor.
17916 */
17917 @@ -350,6 +355,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17918 */
17919 arch_end_context_switch(next_p);
17920
17921 + percpu_write(current_task, next_p);
17922 + percpu_write(current_tinfo, &next_p->tinfo);
17923 +
17924 /*
17925 * Restore %gs if needed (which is common)
17926 */
17927 @@ -358,8 +366,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17928
17929 switch_fpu_finish(next_p, fpu);
17930
17931 - percpu_write(current_task, next_p);
17932 -
17933 return prev_p;
17934 }
17935
17936 @@ -389,4 +395,3 @@ unsigned long get_wchan(struct task_struct *p)
17937 } while (count++ < 16);
17938 return 0;
17939 }
17940 -
17941 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
17942 index 6a364a6..b147d11 100644
17943 --- a/arch/x86/kernel/process_64.c
17944 +++ b/arch/x86/kernel/process_64.c
17945 @@ -89,7 +89,7 @@ static void __exit_idle(void)
17946 void exit_idle(void)
17947 {
17948 /* idle loop has pid 0 */
17949 - if (current->pid)
17950 + if (task_pid_nr(current))
17951 return;
17952 __exit_idle();
17953 }
17954 @@ -264,8 +264,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
17955 struct pt_regs *childregs;
17956 struct task_struct *me = current;
17957
17958 - childregs = ((struct pt_regs *)
17959 - (THREAD_SIZE + task_stack_page(p))) - 1;
17960 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
17961 *childregs = *regs;
17962
17963 childregs->ax = 0;
17964 @@ -277,6 +276,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
17965 p->thread.sp = (unsigned long) childregs;
17966 p->thread.sp0 = (unsigned long) (childregs+1);
17967 p->thread.usersp = me->thread.usersp;
17968 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
17969
17970 set_tsk_thread_flag(p, TIF_FORK);
17971
17972 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17973 struct thread_struct *prev = &prev_p->thread;
17974 struct thread_struct *next = &next_p->thread;
17975 int cpu = smp_processor_id();
17976 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
17977 + struct tss_struct *tss = init_tss + cpu;
17978 unsigned fsindex, gsindex;
17979 fpu_switch_t fpu;
17980
17981 @@ -461,10 +461,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17982 prev->usersp = percpu_read(old_rsp);
17983 percpu_write(old_rsp, next->usersp);
17984 percpu_write(current_task, next_p);
17985 + percpu_write(current_tinfo, &next_p->tinfo);
17986
17987 - percpu_write(kernel_stack,
17988 - (unsigned long)task_stack_page(next_p) +
17989 - THREAD_SIZE - KERNEL_STACK_OFFSET);
17990 + percpu_write(kernel_stack, next->sp0);
17991
17992 /*
17993 * Now maybe reload the debug registers and handle I/O bitmaps
17994 @@ -519,12 +518,11 @@ unsigned long get_wchan(struct task_struct *p)
17995 if (!p || p == current || p->state == TASK_RUNNING)
17996 return 0;
17997 stack = (unsigned long)task_stack_page(p);
17998 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
17999 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18000 return 0;
18001 fp = *(u64 *)(p->thread.sp);
18002 do {
18003 - if (fp < (unsigned long)stack ||
18004 - fp >= (unsigned long)stack+THREAD_SIZE)
18005 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18006 return 0;
18007 ip = *(u64 *)(fp+8);
18008 if (!in_sched_functions(ip))
18009 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18010 index 8252879..d3219e0 100644
18011 --- a/arch/x86/kernel/ptrace.c
18012 +++ b/arch/x86/kernel/ptrace.c
18013 @@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *child, long request,
18014 unsigned long addr, unsigned long data)
18015 {
18016 int ret;
18017 - unsigned long __user *datap = (unsigned long __user *)data;
18018 + unsigned long __user *datap = (__force unsigned long __user *)data;
18019
18020 switch (request) {
18021 /* read the word at location addr in the USER area. */
18022 @@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *child, long request,
18023 if ((int) addr < 0)
18024 return -EIO;
18025 ret = do_get_thread_area(child, addr,
18026 - (struct user_desc __user *)data);
18027 + (__force struct user_desc __user *) data);
18028 break;
18029
18030 case PTRACE_SET_THREAD_AREA:
18031 if ((int) addr < 0)
18032 return -EIO;
18033 ret = do_set_thread_area(child, addr,
18034 - (struct user_desc __user *)data, 0);
18035 + (__force struct user_desc __user *) data, 0);
18036 break;
18037 #endif
18038
18039 @@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
18040 memset(info, 0, sizeof(*info));
18041 info->si_signo = SIGTRAP;
18042 info->si_code = si_code;
18043 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
18044 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
18045 }
18046
18047 void user_single_step_siginfo(struct task_struct *tsk,
18048 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
18049 index 42eb330..139955c 100644
18050 --- a/arch/x86/kernel/pvclock.c
18051 +++ b/arch/x86/kernel/pvclock.c
18052 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
18053 return pv_tsc_khz;
18054 }
18055
18056 -static atomic64_t last_value = ATOMIC64_INIT(0);
18057 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
18058
18059 void pvclock_resume(void)
18060 {
18061 - atomic64_set(&last_value, 0);
18062 + atomic64_set_unchecked(&last_value, 0);
18063 }
18064
18065 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18066 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18067 * updating at the same time, and one of them could be slightly behind,
18068 * making the assumption that last_value always go forward fail to hold.
18069 */
18070 - last = atomic64_read(&last_value);
18071 + last = atomic64_read_unchecked(&last_value);
18072 do {
18073 if (ret < last)
18074 return last;
18075 - last = atomic64_cmpxchg(&last_value, last, ret);
18076 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
18077 } while (unlikely(last != ret));
18078
18079 return ret;
18080 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18081 index 37a458b..e63d183 100644
18082 --- a/arch/x86/kernel/reboot.c
18083 +++ b/arch/x86/kernel/reboot.c
18084 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
18085 EXPORT_SYMBOL(pm_power_off);
18086
18087 static const struct desc_ptr no_idt = {};
18088 -static int reboot_mode;
18089 +static unsigned short reboot_mode;
18090 enum reboot_type reboot_type = BOOT_ACPI;
18091 int reboot_force;
18092
18093 @@ -324,13 +324,17 @@ core_initcall(reboot_init);
18094 extern const unsigned char machine_real_restart_asm[];
18095 extern const u64 machine_real_restart_gdt[3];
18096
18097 -void machine_real_restart(unsigned int type)
18098 +__noreturn void machine_real_restart(unsigned int type)
18099 {
18100 void *restart_va;
18101 unsigned long restart_pa;
18102 - void (*restart_lowmem)(unsigned int);
18103 + void (* __noreturn restart_lowmem)(unsigned int);
18104 u64 *lowmem_gdt;
18105
18106 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18107 + struct desc_struct *gdt;
18108 +#endif
18109 +
18110 local_irq_disable();
18111
18112 /* Write zero to CMOS register number 0x0f, which the BIOS POST
18113 @@ -356,14 +360,14 @@ void machine_real_restart(unsigned int type)
18114 boot)". This seems like a fairly standard thing that gets set by
18115 REBOOT.COM programs, and the previous reset routine did this
18116 too. */
18117 - *((unsigned short *)0x472) = reboot_mode;
18118 + *(unsigned short *)(__va(0x472)) = reboot_mode;
18119
18120 /* Patch the GDT in the low memory trampoline */
18121 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
18122
18123 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
18124 restart_pa = virt_to_phys(restart_va);
18125 - restart_lowmem = (void (*)(unsigned int))restart_pa;
18126 + restart_lowmem = (void *)restart_pa;
18127
18128 /* GDT[0]: GDT self-pointer */
18129 lowmem_gdt[0] =
18130 @@ -374,7 +378,33 @@ void machine_real_restart(unsigned int type)
18131 GDT_ENTRY(0x009b, restart_pa, 0xffff);
18132
18133 /* Jump to the identity-mapped low memory code */
18134 +
18135 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18136 + gdt = get_cpu_gdt_table(smp_processor_id());
18137 + pax_open_kernel();
18138 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18139 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
18140 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
18141 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
18142 +#endif
18143 +#ifdef CONFIG_PAX_KERNEXEC
18144 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
18145 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
18146 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
18147 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
18148 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
18149 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
18150 +#endif
18151 + pax_close_kernel();
18152 +#endif
18153 +
18154 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18155 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
18156 + unreachable();
18157 +#else
18158 restart_lowmem(type);
18159 +#endif
18160 +
18161 }
18162 #ifdef CONFIG_APM_MODULE
18163 EXPORT_SYMBOL(machine_real_restart);
18164 @@ -540,7 +570,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
18165 * try to force a triple fault and then cycle between hitting the keyboard
18166 * controller and doing that
18167 */
18168 -static void native_machine_emergency_restart(void)
18169 +__noreturn static void native_machine_emergency_restart(void)
18170 {
18171 int i;
18172 int attempt = 0;
18173 @@ -664,13 +694,13 @@ void native_machine_shutdown(void)
18174 #endif
18175 }
18176
18177 -static void __machine_emergency_restart(int emergency)
18178 +static __noreturn void __machine_emergency_restart(int emergency)
18179 {
18180 reboot_emergency = emergency;
18181 machine_ops.emergency_restart();
18182 }
18183
18184 -static void native_machine_restart(char *__unused)
18185 +static __noreturn void native_machine_restart(char *__unused)
18186 {
18187 printk("machine restart\n");
18188
18189 @@ -679,7 +709,7 @@ static void native_machine_restart(char *__unused)
18190 __machine_emergency_restart(0);
18191 }
18192
18193 -static void native_machine_halt(void)
18194 +static __noreturn void native_machine_halt(void)
18195 {
18196 /* stop other cpus and apics */
18197 machine_shutdown();
18198 @@ -690,7 +720,7 @@ static void native_machine_halt(void)
18199 stop_this_cpu(NULL);
18200 }
18201
18202 -static void native_machine_power_off(void)
18203 +__noreturn static void native_machine_power_off(void)
18204 {
18205 if (pm_power_off) {
18206 if (!reboot_force)
18207 @@ -699,6 +729,7 @@ static void native_machine_power_off(void)
18208 }
18209 /* a fallback in case there is no PM info available */
18210 tboot_shutdown(TB_SHUTDOWN_HALT);
18211 + unreachable();
18212 }
18213
18214 struct machine_ops machine_ops = {
18215 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
18216 index 7a6f3b3..bed145d7 100644
18217 --- a/arch/x86/kernel/relocate_kernel_64.S
18218 +++ b/arch/x86/kernel/relocate_kernel_64.S
18219 @@ -11,6 +11,7 @@
18220 #include <asm/kexec.h>
18221 #include <asm/processor-flags.h>
18222 #include <asm/pgtable_types.h>
18223 +#include <asm/alternative-asm.h>
18224
18225 /*
18226 * Must be relocatable PIC code callable as a C function
18227 @@ -160,13 +161,14 @@ identity_mapped:
18228 xorq %rbp, %rbp
18229 xorq %r8, %r8
18230 xorq %r9, %r9
18231 - xorq %r10, %r9
18232 + xorq %r10, %r10
18233 xorq %r11, %r11
18234 xorq %r12, %r12
18235 xorq %r13, %r13
18236 xorq %r14, %r14
18237 xorq %r15, %r15
18238
18239 + pax_force_retaddr 0, 1
18240 ret
18241
18242 1:
18243 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
18244 index cf0ef98..e3f780b 100644
18245 --- a/arch/x86/kernel/setup.c
18246 +++ b/arch/x86/kernel/setup.c
18247 @@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
18248
18249 switch (data->type) {
18250 case SETUP_E820_EXT:
18251 - parse_e820_ext(data);
18252 + parse_e820_ext((struct setup_data __force_kernel *)data);
18253 break;
18254 case SETUP_DTB:
18255 add_dtb(pa_data);
18256 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
18257 * area (640->1Mb) as ram even though it is not.
18258 * take them out.
18259 */
18260 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
18261 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
18262 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
18263 }
18264
18265 @@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
18266
18267 if (!boot_params.hdr.root_flags)
18268 root_mountflags &= ~MS_RDONLY;
18269 - init_mm.start_code = (unsigned long) _text;
18270 - init_mm.end_code = (unsigned long) _etext;
18271 + init_mm.start_code = ktla_ktva((unsigned long) _text);
18272 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
18273 init_mm.end_data = (unsigned long) _edata;
18274 init_mm.brk = _brk_end;
18275
18276 - code_resource.start = virt_to_phys(_text);
18277 - code_resource.end = virt_to_phys(_etext)-1;
18278 - data_resource.start = virt_to_phys(_etext);
18279 + code_resource.start = virt_to_phys(ktla_ktva(_text));
18280 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
18281 + data_resource.start = virt_to_phys(_sdata);
18282 data_resource.end = virt_to_phys(_edata)-1;
18283 bss_resource.start = virt_to_phys(&__bss_start);
18284 bss_resource.end = virt_to_phys(&__bss_stop)-1;
18285 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
18286 index 71f4727..16dc9f7 100644
18287 --- a/arch/x86/kernel/setup_percpu.c
18288 +++ b/arch/x86/kernel/setup_percpu.c
18289 @@ -21,19 +21,17 @@
18290 #include <asm/cpu.h>
18291 #include <asm/stackprotector.h>
18292
18293 -DEFINE_PER_CPU(int, cpu_number);
18294 +#ifdef CONFIG_SMP
18295 +DEFINE_PER_CPU(unsigned int, cpu_number);
18296 EXPORT_PER_CPU_SYMBOL(cpu_number);
18297 +#endif
18298
18299 -#ifdef CONFIG_X86_64
18300 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
18301 -#else
18302 -#define BOOT_PERCPU_OFFSET 0
18303 -#endif
18304
18305 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
18306 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
18307
18308 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
18309 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
18310 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
18311 };
18312 EXPORT_SYMBOL(__per_cpu_offset);
18313 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
18314 {
18315 #ifdef CONFIG_X86_32
18316 struct desc_struct gdt;
18317 + unsigned long base = per_cpu_offset(cpu);
18318
18319 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
18320 - 0x2 | DESCTYPE_S, 0x8);
18321 - gdt.s = 1;
18322 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
18323 + 0x83 | DESCTYPE_S, 0xC);
18324 write_gdt_entry(get_cpu_gdt_table(cpu),
18325 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
18326 #endif
18327 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
18328 /* alrighty, percpu areas up and running */
18329 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
18330 for_each_possible_cpu(cpu) {
18331 +#ifdef CONFIG_CC_STACKPROTECTOR
18332 +#ifdef CONFIG_X86_32
18333 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
18334 +#endif
18335 +#endif
18336 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
18337 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
18338 per_cpu(cpu_number, cpu) = cpu;
18339 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
18340 */
18341 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
18342 #endif
18343 +#ifdef CONFIG_CC_STACKPROTECTOR
18344 +#ifdef CONFIG_X86_32
18345 + if (!cpu)
18346 + per_cpu(stack_canary.canary, cpu) = canary;
18347 +#endif
18348 +#endif
18349 /*
18350 * Up to this point, the boot CPU has been using .init.data
18351 * area. Reload any changed state for the boot CPU.
18352 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
18353 index 54ddaeb2..22c3bdc 100644
18354 --- a/arch/x86/kernel/signal.c
18355 +++ b/arch/x86/kernel/signal.c
18356 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
18357 * Align the stack pointer according to the i386 ABI,
18358 * i.e. so that on function entry ((sp + 4) & 15) == 0.
18359 */
18360 - sp = ((sp + 4) & -16ul) - 4;
18361 + sp = ((sp - 12) & -16ul) - 4;
18362 #else /* !CONFIG_X86_32 */
18363 sp = round_down(sp, 16) - 8;
18364 #endif
18365 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
18366 * Return an always-bogus address instead so we will die with SIGSEGV.
18367 */
18368 if (onsigstack && !likely(on_sig_stack(sp)))
18369 - return (void __user *)-1L;
18370 + return (__force void __user *)-1L;
18371
18372 /* save i387 state */
18373 if (used_math() && save_i387_xstate(*fpstate) < 0)
18374 - return (void __user *)-1L;
18375 + return (__force void __user *)-1L;
18376
18377 return (void __user *)sp;
18378 }
18379 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
18380 }
18381
18382 if (current->mm->context.vdso)
18383 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
18384 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
18385 else
18386 - restorer = &frame->retcode;
18387 + restorer = (void __user *)&frame->retcode;
18388 if (ka->sa.sa_flags & SA_RESTORER)
18389 restorer = ka->sa.sa_restorer;
18390
18391 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
18392 * reasons and because gdb uses it as a signature to notice
18393 * signal handler stack frames.
18394 */
18395 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
18396 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
18397
18398 if (err)
18399 return -EFAULT;
18400 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
18401 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
18402
18403 /* Set up to return from userspace. */
18404 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
18405 + if (current->mm->context.vdso)
18406 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
18407 + else
18408 + restorer = (void __user *)&frame->retcode;
18409 if (ka->sa.sa_flags & SA_RESTORER)
18410 restorer = ka->sa.sa_restorer;
18411 put_user_ex(restorer, &frame->pretcode);
18412 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
18413 * reasons and because gdb uses it as a signature to notice
18414 * signal handler stack frames.
18415 */
18416 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
18417 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
18418 } put_user_catch(err);
18419
18420 if (err)
18421 @@ -769,7 +772,7 @@ static void do_signal(struct pt_regs *regs)
18422 * X86_32: vm86 regs switched out by assembly code before reaching
18423 * here, so testing against kernel CS suffices.
18424 */
18425 - if (!user_mode(regs))
18426 + if (!user_mode_novm(regs))
18427 return;
18428
18429 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
18430 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
18431 index 9f548cb..caf76f7 100644
18432 --- a/arch/x86/kernel/smpboot.c
18433 +++ b/arch/x86/kernel/smpboot.c
18434 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
18435 set_idle_for_cpu(cpu, c_idle.idle);
18436 do_rest:
18437 per_cpu(current_task, cpu) = c_idle.idle;
18438 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
18439 #ifdef CONFIG_X86_32
18440 /* Stack for startup_32 can be just as for start_secondary onwards */
18441 irq_ctx_init(cpu);
18442 #else
18443 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
18444 initial_gs = per_cpu_offset(cpu);
18445 - per_cpu(kernel_stack, cpu) =
18446 - (unsigned long)task_stack_page(c_idle.idle) -
18447 - KERNEL_STACK_OFFSET + THREAD_SIZE;
18448 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
18449 #endif
18450 +
18451 + pax_open_kernel();
18452 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
18453 + pax_close_kernel();
18454 +
18455 initial_code = (unsigned long)start_secondary;
18456 stack_start = c_idle.idle->thread.sp;
18457
18458 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
18459
18460 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
18461
18462 +#ifdef CONFIG_PAX_PER_CPU_PGD
18463 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
18464 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
18465 + KERNEL_PGD_PTRS);
18466 +#endif
18467 +
18468 err = do_boot_cpu(apicid, cpu);
18469 if (err) {
18470 pr_debug("do_boot_cpu failed %d\n", err);
18471 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
18472 index c346d11..d43b163 100644
18473 --- a/arch/x86/kernel/step.c
18474 +++ b/arch/x86/kernel/step.c
18475 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
18476 struct desc_struct *desc;
18477 unsigned long base;
18478
18479 - seg &= ~7UL;
18480 + seg >>= 3;
18481
18482 mutex_lock(&child->mm->context.lock);
18483 - if (unlikely((seg >> 3) >= child->mm->context.size))
18484 + if (unlikely(seg >= child->mm->context.size))
18485 addr = -1L; /* bogus selector, access would fault */
18486 else {
18487 desc = child->mm->context.ldt + seg;
18488 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
18489 addr += base;
18490 }
18491 mutex_unlock(&child->mm->context.lock);
18492 - }
18493 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
18494 + addr = ktla_ktva(addr);
18495
18496 return addr;
18497 }
18498 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
18499 unsigned char opcode[15];
18500 unsigned long addr = convert_ip_to_linear(child, regs);
18501
18502 + if (addr == -EINVAL)
18503 + return 0;
18504 +
18505 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
18506 for (i = 0; i < copied; i++) {
18507 switch (opcode[i]) {
18508 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
18509 index 0b0cb5f..db6b9ed 100644
18510 --- a/arch/x86/kernel/sys_i386_32.c
18511 +++ b/arch/x86/kernel/sys_i386_32.c
18512 @@ -24,17 +24,224 @@
18513
18514 #include <asm/syscalls.h>
18515
18516 -/*
18517 - * Do a system call from kernel instead of calling sys_execve so we
18518 - * end up with proper pt_regs.
18519 - */
18520 -int kernel_execve(const char *filename,
18521 - const char *const argv[],
18522 - const char *const envp[])
18523 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
18524 {
18525 - long __res;
18526 - asm volatile ("int $0x80"
18527 - : "=a" (__res)
18528 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
18529 - return __res;
18530 + unsigned long pax_task_size = TASK_SIZE;
18531 +
18532 +#ifdef CONFIG_PAX_SEGMEXEC
18533 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
18534 + pax_task_size = SEGMEXEC_TASK_SIZE;
18535 +#endif
18536 +
18537 + if (len > pax_task_size || addr > pax_task_size - len)
18538 + return -EINVAL;
18539 +
18540 + return 0;
18541 +}
18542 +
18543 +unsigned long
18544 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
18545 + unsigned long len, unsigned long pgoff, unsigned long flags)
18546 +{
18547 + struct mm_struct *mm = current->mm;
18548 + struct vm_area_struct *vma;
18549 + unsigned long start_addr, pax_task_size = TASK_SIZE;
18550 +
18551 +#ifdef CONFIG_PAX_SEGMEXEC
18552 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
18553 + pax_task_size = SEGMEXEC_TASK_SIZE;
18554 +#endif
18555 +
18556 + pax_task_size -= PAGE_SIZE;
18557 +
18558 + if (len > pax_task_size)
18559 + return -ENOMEM;
18560 +
18561 + if (flags & MAP_FIXED)
18562 + return addr;
18563 +
18564 +#ifdef CONFIG_PAX_RANDMMAP
18565 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
18566 +#endif
18567 +
18568 + if (addr) {
18569 + addr = PAGE_ALIGN(addr);
18570 + if (pax_task_size - len >= addr) {
18571 + vma = find_vma(mm, addr);
18572 + if (check_heap_stack_gap(vma, addr, len))
18573 + return addr;
18574 + }
18575 + }
18576 + if (len > mm->cached_hole_size) {
18577 + start_addr = addr = mm->free_area_cache;
18578 + } else {
18579 + start_addr = addr = mm->mmap_base;
18580 + mm->cached_hole_size = 0;
18581 + }
18582 +
18583 +#ifdef CONFIG_PAX_PAGEEXEC
18584 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
18585 + start_addr = 0x00110000UL;
18586 +
18587 +#ifdef CONFIG_PAX_RANDMMAP
18588 + if (mm->pax_flags & MF_PAX_RANDMMAP)
18589 + start_addr += mm->delta_mmap & 0x03FFF000UL;
18590 +#endif
18591 +
18592 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
18593 + start_addr = addr = mm->mmap_base;
18594 + else
18595 + addr = start_addr;
18596 + }
18597 +#endif
18598 +
18599 +full_search:
18600 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
18601 + /* At this point: (!vma || addr < vma->vm_end). */
18602 + if (pax_task_size - len < addr) {
18603 + /*
18604 + * Start a new search - just in case we missed
18605 + * some holes.
18606 + */
18607 + if (start_addr != mm->mmap_base) {
18608 + start_addr = addr = mm->mmap_base;
18609 + mm->cached_hole_size = 0;
18610 + goto full_search;
18611 + }
18612 + return -ENOMEM;
18613 + }
18614 + if (check_heap_stack_gap(vma, addr, len))
18615 + break;
18616 + if (addr + mm->cached_hole_size < vma->vm_start)
18617 + mm->cached_hole_size = vma->vm_start - addr;
18618 + addr = vma->vm_end;
18619 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
18620 + start_addr = addr = mm->mmap_base;
18621 + mm->cached_hole_size = 0;
18622 + goto full_search;
18623 + }
18624 + }
18625 +
18626 + /*
18627 + * Remember the place where we stopped the search:
18628 + */
18629 + mm->free_area_cache = addr + len;
18630 + return addr;
18631 +}
18632 +
18633 +unsigned long
18634 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18635 + const unsigned long len, const unsigned long pgoff,
18636 + const unsigned long flags)
18637 +{
18638 + struct vm_area_struct *vma;
18639 + struct mm_struct *mm = current->mm;
18640 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
18641 +
18642 +#ifdef CONFIG_PAX_SEGMEXEC
18643 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
18644 + pax_task_size = SEGMEXEC_TASK_SIZE;
18645 +#endif
18646 +
18647 + pax_task_size -= PAGE_SIZE;
18648 +
18649 + /* requested length too big for entire address space */
18650 + if (len > pax_task_size)
18651 + return -ENOMEM;
18652 +
18653 + if (flags & MAP_FIXED)
18654 + return addr;
18655 +
18656 +#ifdef CONFIG_PAX_PAGEEXEC
18657 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
18658 + goto bottomup;
18659 +#endif
18660 +
18661 +#ifdef CONFIG_PAX_RANDMMAP
18662 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
18663 +#endif
18664 +
18665 + /* requesting a specific address */
18666 + if (addr) {
18667 + addr = PAGE_ALIGN(addr);
18668 + if (pax_task_size - len >= addr) {
18669 + vma = find_vma(mm, addr);
18670 + if (check_heap_stack_gap(vma, addr, len))
18671 + return addr;
18672 + }
18673 + }
18674 +
18675 + /* check if free_area_cache is useful for us */
18676 + if (len <= mm->cached_hole_size) {
18677 + mm->cached_hole_size = 0;
18678 + mm->free_area_cache = mm->mmap_base;
18679 + }
18680 +
18681 + /* either no address requested or can't fit in requested address hole */
18682 + addr = mm->free_area_cache;
18683 +
18684 + /* make sure it can fit in the remaining address space */
18685 + if (addr > len) {
18686 + vma = find_vma(mm, addr-len);
18687 + if (check_heap_stack_gap(vma, addr - len, len))
18688 + /* remember the address as a hint for next time */
18689 + return (mm->free_area_cache = addr-len);
18690 + }
18691 +
18692 + if (mm->mmap_base < len)
18693 + goto bottomup;
18694 +
18695 + addr = mm->mmap_base-len;
18696 +
18697 + do {
18698 + /*
18699 + * Lookup failure means no vma is above this address,
18700 + * else if new region fits below vma->vm_start,
18701 + * return with success:
18702 + */
18703 + vma = find_vma(mm, addr);
18704 + if (check_heap_stack_gap(vma, addr, len))
18705 + /* remember the address as a hint for next time */
18706 + return (mm->free_area_cache = addr);
18707 +
18708 + /* remember the largest hole we saw so far */
18709 + if (addr + mm->cached_hole_size < vma->vm_start)
18710 + mm->cached_hole_size = vma->vm_start - addr;
18711 +
18712 + /* try just below the current vma->vm_start */
18713 + addr = skip_heap_stack_gap(vma, len);
18714 + } while (!IS_ERR_VALUE(addr));
18715 +
18716 +bottomup:
18717 + /*
18718 + * A failed mmap() very likely causes application failure,
18719 + * so fall back to the bottom-up function here. This scenario
18720 + * can happen with large stack limits and large mmap()
18721 + * allocations.
18722 + */
18723 +
18724 +#ifdef CONFIG_PAX_SEGMEXEC
18725 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
18726 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
18727 + else
18728 +#endif
18729 +
18730 + mm->mmap_base = TASK_UNMAPPED_BASE;
18731 +
18732 +#ifdef CONFIG_PAX_RANDMMAP
18733 + if (mm->pax_flags & MF_PAX_RANDMMAP)
18734 + mm->mmap_base += mm->delta_mmap;
18735 +#endif
18736 +
18737 + mm->free_area_cache = mm->mmap_base;
18738 + mm->cached_hole_size = ~0UL;
18739 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
18740 + /*
18741 + * Restore the topdown base:
18742 + */
18743 + mm->mmap_base = base;
18744 + mm->free_area_cache = base;
18745 + mm->cached_hole_size = ~0UL;
18746 +
18747 + return addr;
18748 }
18749 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
18750 index 0514890..3dbebce 100644
18751 --- a/arch/x86/kernel/sys_x86_64.c
18752 +++ b/arch/x86/kernel/sys_x86_64.c
18753 @@ -95,8 +95,8 @@ out:
18754 return error;
18755 }
18756
18757 -static void find_start_end(unsigned long flags, unsigned long *begin,
18758 - unsigned long *end)
18759 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
18760 + unsigned long *begin, unsigned long *end)
18761 {
18762 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
18763 unsigned long new_begin;
18764 @@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
18765 *begin = new_begin;
18766 }
18767 } else {
18768 - *begin = TASK_UNMAPPED_BASE;
18769 + *begin = mm->mmap_base;
18770 *end = TASK_SIZE;
18771 }
18772 }
18773 @@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
18774 if (flags & MAP_FIXED)
18775 return addr;
18776
18777 - find_start_end(flags, &begin, &end);
18778 + find_start_end(mm, flags, &begin, &end);
18779
18780 if (len > end)
18781 return -ENOMEM;
18782
18783 +#ifdef CONFIG_PAX_RANDMMAP
18784 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
18785 +#endif
18786 +
18787 if (addr) {
18788 addr = PAGE_ALIGN(addr);
18789 vma = find_vma(mm, addr);
18790 - if (end - len >= addr &&
18791 - (!vma || addr + len <= vma->vm_start))
18792 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
18793 return addr;
18794 }
18795 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
18796 @@ -172,7 +175,7 @@ full_search:
18797 }
18798 return -ENOMEM;
18799 }
18800 - if (!vma || addr + len <= vma->vm_start) {
18801 + if (check_heap_stack_gap(vma, addr, len)) {
18802 /*
18803 * Remember the place where we stopped the search:
18804 */
18805 @@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18806 {
18807 struct vm_area_struct *vma;
18808 struct mm_struct *mm = current->mm;
18809 - unsigned long addr = addr0;
18810 + unsigned long base = mm->mmap_base, addr = addr0;
18811
18812 /* requested length too big for entire address space */
18813 if (len > TASK_SIZE)
18814 @@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18815 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
18816 goto bottomup;
18817
18818 +#ifdef CONFIG_PAX_RANDMMAP
18819 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
18820 +#endif
18821 +
18822 /* requesting a specific address */
18823 if (addr) {
18824 addr = PAGE_ALIGN(addr);
18825 - vma = find_vma(mm, addr);
18826 - if (TASK_SIZE - len >= addr &&
18827 - (!vma || addr + len <= vma->vm_start))
18828 - return addr;
18829 + if (TASK_SIZE - len >= addr) {
18830 + vma = find_vma(mm, addr);
18831 + if (check_heap_stack_gap(vma, addr, len))
18832 + return addr;
18833 + }
18834 }
18835
18836 /* check if free_area_cache is useful for us */
18837 @@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18838 ALIGN_TOPDOWN);
18839
18840 vma = find_vma(mm, tmp_addr);
18841 - if (!vma || tmp_addr + len <= vma->vm_start)
18842 + if (check_heap_stack_gap(vma, tmp_addr, len))
18843 /* remember the address as a hint for next time */
18844 return mm->free_area_cache = tmp_addr;
18845 }
18846 @@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18847 * return with success:
18848 */
18849 vma = find_vma(mm, addr);
18850 - if (!vma || addr+len <= vma->vm_start)
18851 + if (check_heap_stack_gap(vma, addr, len))
18852 /* remember the address as a hint for next time */
18853 return mm->free_area_cache = addr;
18854
18855 @@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18856 mm->cached_hole_size = vma->vm_start - addr;
18857
18858 /* try just below the current vma->vm_start */
18859 - addr = vma->vm_start-len;
18860 - } while (len < vma->vm_start);
18861 + addr = skip_heap_stack_gap(vma, len);
18862 + } while (!IS_ERR_VALUE(addr));
18863
18864 bottomup:
18865 /*
18866 @@ -270,13 +278,21 @@ bottomup:
18867 * can happen with large stack limits and large mmap()
18868 * allocations.
18869 */
18870 + mm->mmap_base = TASK_UNMAPPED_BASE;
18871 +
18872 +#ifdef CONFIG_PAX_RANDMMAP
18873 + if (mm->pax_flags & MF_PAX_RANDMMAP)
18874 + mm->mmap_base += mm->delta_mmap;
18875 +#endif
18876 +
18877 + mm->free_area_cache = mm->mmap_base;
18878 mm->cached_hole_size = ~0UL;
18879 - mm->free_area_cache = TASK_UNMAPPED_BASE;
18880 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
18881 /*
18882 * Restore the topdown base:
18883 */
18884 - mm->free_area_cache = mm->mmap_base;
18885 + mm->mmap_base = base;
18886 + mm->free_area_cache = base;
18887 mm->cached_hole_size = ~0UL;
18888
18889 return addr;
18890 diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
18891 index 9a0e312..e6f66f2 100644
18892 --- a/arch/x86/kernel/syscall_table_32.S
18893 +++ b/arch/x86/kernel/syscall_table_32.S
18894 @@ -1,3 +1,4 @@
18895 +.section .rodata,"a",@progbits
18896 ENTRY(sys_call_table)
18897 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
18898 .long sys_exit
18899 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
18900 index e2410e2..4fe3fbc 100644
18901 --- a/arch/x86/kernel/tboot.c
18902 +++ b/arch/x86/kernel/tboot.c
18903 @@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
18904
18905 void tboot_shutdown(u32 shutdown_type)
18906 {
18907 - void (*shutdown)(void);
18908 + void (* __noreturn shutdown)(void);
18909
18910 if (!tboot_enabled())
18911 return;
18912 @@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
18913
18914 switch_to_tboot_pt();
18915
18916 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
18917 + shutdown = (void *)tboot->shutdown_entry;
18918 shutdown();
18919
18920 /* should not reach here */
18921 @@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
18922 tboot_shutdown(acpi_shutdown_map[sleep_state]);
18923 }
18924
18925 -static atomic_t ap_wfs_count;
18926 +static atomic_unchecked_t ap_wfs_count;
18927
18928 static int tboot_wait_for_aps(int num_aps)
18929 {
18930 @@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
18931 {
18932 switch (action) {
18933 case CPU_DYING:
18934 - atomic_inc(&ap_wfs_count);
18935 + atomic_inc_unchecked(&ap_wfs_count);
18936 if (num_online_cpus() == 1)
18937 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
18938 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
18939 return NOTIFY_BAD;
18940 break;
18941 }
18942 @@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
18943
18944 tboot_create_trampoline();
18945
18946 - atomic_set(&ap_wfs_count, 0);
18947 + atomic_set_unchecked(&ap_wfs_count, 0);
18948 register_hotcpu_notifier(&tboot_cpu_notifier);
18949 return 0;
18950 }
18951 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
18952 index dd5fbf4..b7f2232 100644
18953 --- a/arch/x86/kernel/time.c
18954 +++ b/arch/x86/kernel/time.c
18955 @@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
18956 {
18957 unsigned long pc = instruction_pointer(regs);
18958
18959 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
18960 + if (!user_mode(regs) && in_lock_functions(pc)) {
18961 #ifdef CONFIG_FRAME_POINTER
18962 - return *(unsigned long *)(regs->bp + sizeof(long));
18963 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
18964 #else
18965 unsigned long *sp =
18966 (unsigned long *)kernel_stack_pointer(regs);
18967 @@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
18968 * or above a saved flags. Eflags has bits 22-31 zero,
18969 * kernel addresses don't.
18970 */
18971 +
18972 +#ifdef CONFIG_PAX_KERNEXEC
18973 + return ktla_ktva(sp[0]);
18974 +#else
18975 if (sp[0] >> 22)
18976 return sp[0];
18977 if (sp[1] >> 22)
18978 return sp[1];
18979 #endif
18980 +
18981 +#endif
18982 }
18983 return pc;
18984 }
18985 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
18986 index 6bb7b85..dd853e1 100644
18987 --- a/arch/x86/kernel/tls.c
18988 +++ b/arch/x86/kernel/tls.c
18989 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
18990 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
18991 return -EINVAL;
18992
18993 +#ifdef CONFIG_PAX_SEGMEXEC
18994 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
18995 + return -EINVAL;
18996 +#endif
18997 +
18998 set_tls_desc(p, idx, &info, 1);
18999
19000 return 0;
19001 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19002 index 451c0a7..e57f551 100644
19003 --- a/arch/x86/kernel/trampoline_32.S
19004 +++ b/arch/x86/kernel/trampoline_32.S
19005 @@ -32,6 +32,12 @@
19006 #include <asm/segment.h>
19007 #include <asm/page_types.h>
19008
19009 +#ifdef CONFIG_PAX_KERNEXEC
19010 +#define ta(X) (X)
19011 +#else
19012 +#define ta(X) ((X) - __PAGE_OFFSET)
19013 +#endif
19014 +
19015 #ifdef CONFIG_SMP
19016
19017 .section ".x86_trampoline","a"
19018 @@ -62,7 +68,7 @@ r_base = .
19019 inc %ax # protected mode (PE) bit
19020 lmsw %ax # into protected mode
19021 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19022 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19023 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
19024
19025 # These need to be in the same 64K segment as the above;
19026 # hence we don't use the boot_gdt_descr defined in head.S
19027 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19028 index 09ff517..df19fbff 100644
19029 --- a/arch/x86/kernel/trampoline_64.S
19030 +++ b/arch/x86/kernel/trampoline_64.S
19031 @@ -90,7 +90,7 @@ startup_32:
19032 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19033 movl %eax, %ds
19034
19035 - movl $X86_CR4_PAE, %eax
19036 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19037 movl %eax, %cr4 # Enable PAE mode
19038
19039 # Setup trampoline 4 level pagetables
19040 @@ -138,7 +138,7 @@ tidt:
19041 # so the kernel can live anywhere
19042 .balign 4
19043 tgdt:
19044 - .short tgdt_end - tgdt # gdt limit
19045 + .short tgdt_end - tgdt - 1 # gdt limit
19046 .long tgdt - r_base
19047 .short 0
19048 .quad 0x00cf9b000000ffff # __KERNEL32_CS
19049 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19050 index 31d9d0f..e244dd9 100644
19051 --- a/arch/x86/kernel/traps.c
19052 +++ b/arch/x86/kernel/traps.c
19053 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
19054
19055 /* Do we ignore FPU interrupts ? */
19056 char ignore_fpu_irq;
19057 -
19058 -/*
19059 - * The IDT has to be page-aligned to simplify the Pentium
19060 - * F0 0F bug workaround.
19061 - */
19062 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
19063 #endif
19064
19065 DECLARE_BITMAP(used_vectors, NR_VECTORS);
19066 @@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
19067 }
19068
19069 static void __kprobes
19070 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19071 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19072 long error_code, siginfo_t *info)
19073 {
19074 struct task_struct *tsk = current;
19075
19076 #ifdef CONFIG_X86_32
19077 - if (regs->flags & X86_VM_MASK) {
19078 + if (v8086_mode(regs)) {
19079 /*
19080 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
19081 * On nmi (interrupt 2), do_trap should not be called.
19082 @@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19083 }
19084 #endif
19085
19086 - if (!user_mode(regs))
19087 + if (!user_mode_novm(regs))
19088 goto kernel_trap;
19089
19090 #ifdef CONFIG_X86_32
19091 @@ -148,7 +142,7 @@ trap_signal:
19092 printk_ratelimit()) {
19093 printk(KERN_INFO
19094 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
19095 - tsk->comm, tsk->pid, str,
19096 + tsk->comm, task_pid_nr(tsk), str,
19097 regs->ip, regs->sp, error_code);
19098 print_vma_addr(" in ", regs->ip);
19099 printk("\n");
19100 @@ -165,8 +159,20 @@ kernel_trap:
19101 if (!fixup_exception(regs)) {
19102 tsk->thread.error_code = error_code;
19103 tsk->thread.trap_no = trapnr;
19104 +
19105 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19106 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
19107 + str = "PAX: suspicious stack segment fault";
19108 +#endif
19109 +
19110 die(str, regs, error_code);
19111 }
19112 +
19113 +#ifdef CONFIG_PAX_REFCOUNT
19114 + if (trapnr == 4)
19115 + pax_report_refcount_overflow(regs);
19116 +#endif
19117 +
19118 return;
19119
19120 #ifdef CONFIG_X86_32
19121 @@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
19122 conditional_sti(regs);
19123
19124 #ifdef CONFIG_X86_32
19125 - if (regs->flags & X86_VM_MASK)
19126 + if (v8086_mode(regs))
19127 goto gp_in_vm86;
19128 #endif
19129
19130 tsk = current;
19131 - if (!user_mode(regs))
19132 + if (!user_mode_novm(regs))
19133 goto gp_in_kernel;
19134
19135 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19136 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
19137 + struct mm_struct *mm = tsk->mm;
19138 + unsigned long limit;
19139 +
19140 + down_write(&mm->mmap_sem);
19141 + limit = mm->context.user_cs_limit;
19142 + if (limit < TASK_SIZE) {
19143 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
19144 + up_write(&mm->mmap_sem);
19145 + return;
19146 + }
19147 + up_write(&mm->mmap_sem);
19148 + }
19149 +#endif
19150 +
19151 tsk->thread.error_code = error_code;
19152 tsk->thread.trap_no = 13;
19153
19154 @@ -295,6 +317,13 @@ gp_in_kernel:
19155 if (notify_die(DIE_GPF, "general protection fault", regs,
19156 error_code, 13, SIGSEGV) == NOTIFY_STOP)
19157 return;
19158 +
19159 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19160 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
19161 + die("PAX: suspicious general protection fault", regs, error_code);
19162 + else
19163 +#endif
19164 +
19165 die("general protection fault", regs, error_code);
19166 }
19167
19168 @@ -414,7 +443,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19169 /* It's safe to allow irq's after DR6 has been saved */
19170 preempt_conditional_sti(regs);
19171
19172 - if (regs->flags & X86_VM_MASK) {
19173 + if (v8086_mode(regs)) {
19174 handle_vm86_trap((struct kernel_vm86_regs *) regs,
19175 error_code, 1);
19176 preempt_conditional_cli(regs);
19177 @@ -428,7 +457,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19178 * We already checked v86 mode above, so we can check for kernel mode
19179 * by just checking the CPL of CS.
19180 */
19181 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
19182 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
19183 tsk->thread.debugreg6 &= ~DR_STEP;
19184 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
19185 regs->flags &= ~X86_EFLAGS_TF;
19186 @@ -457,7 +486,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
19187 return;
19188 conditional_sti(regs);
19189
19190 - if (!user_mode_vm(regs))
19191 + if (!user_mode(regs))
19192 {
19193 if (!fixup_exception(regs)) {
19194 task->thread.error_code = error_code;
19195 @@ -569,8 +598,8 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
19196 void __math_state_restore(struct task_struct *tsk)
19197 {
19198 /* We need a safe address that is cheap to find and that is already
19199 - in L1. We've just brought in "tsk->thread.has_fpu", so use that */
19200 -#define safe_address (tsk->thread.has_fpu)
19201 + in L1. */
19202 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
19203
19204 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
19205 is pending. Clear the x87 state here by setting it to fixed
19206 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
19207 index b9242ba..50c5edd 100644
19208 --- a/arch/x86/kernel/verify_cpu.S
19209 +++ b/arch/x86/kernel/verify_cpu.S
19210 @@ -20,6 +20,7 @@
19211 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
19212 * arch/x86/kernel/trampoline_64.S: secondary processor verification
19213 * arch/x86/kernel/head_32.S: processor startup
19214 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
19215 *
19216 * verify_cpu, returns the status of longmode and SSE in register %eax.
19217 * 0: Success 1: Failure
19218 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
19219 index 863f875..4307295 100644
19220 --- a/arch/x86/kernel/vm86_32.c
19221 +++ b/arch/x86/kernel/vm86_32.c
19222 @@ -41,6 +41,7 @@
19223 #include <linux/ptrace.h>
19224 #include <linux/audit.h>
19225 #include <linux/stddef.h>
19226 +#include <linux/grsecurity.h>
19227
19228 #include <asm/uaccess.h>
19229 #include <asm/io.h>
19230 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
19231 do_exit(SIGSEGV);
19232 }
19233
19234 - tss = &per_cpu(init_tss, get_cpu());
19235 + tss = init_tss + get_cpu();
19236 current->thread.sp0 = current->thread.saved_sp0;
19237 current->thread.sysenter_cs = __KERNEL_CS;
19238 load_sp0(tss, &current->thread);
19239 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
19240 struct task_struct *tsk;
19241 int tmp, ret = -EPERM;
19242
19243 +#ifdef CONFIG_GRKERNSEC_VM86
19244 + if (!capable(CAP_SYS_RAWIO)) {
19245 + gr_handle_vm86();
19246 + goto out;
19247 + }
19248 +#endif
19249 +
19250 tsk = current;
19251 if (tsk->thread.saved_sp0)
19252 goto out;
19253 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
19254 int tmp, ret;
19255 struct vm86plus_struct __user *v86;
19256
19257 +#ifdef CONFIG_GRKERNSEC_VM86
19258 + if (!capable(CAP_SYS_RAWIO)) {
19259 + gr_handle_vm86();
19260 + ret = -EPERM;
19261 + goto out;
19262 + }
19263 +#endif
19264 +
19265 tsk = current;
19266 switch (cmd) {
19267 case VM86_REQUEST_IRQ:
19268 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
19269 tsk->thread.saved_fs = info->regs32->fs;
19270 tsk->thread.saved_gs = get_user_gs(info->regs32);
19271
19272 - tss = &per_cpu(init_tss, get_cpu());
19273 + tss = init_tss + get_cpu();
19274 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
19275 if (cpu_has_sep)
19276 tsk->thread.sysenter_cs = 0;
19277 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
19278 goto cannot_handle;
19279 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
19280 goto cannot_handle;
19281 - intr_ptr = (unsigned long __user *) (i << 2);
19282 + intr_ptr = (__force unsigned long __user *) (i << 2);
19283 if (get_user(segoffs, intr_ptr))
19284 goto cannot_handle;
19285 if ((segoffs >> 16) == BIOSSEG)
19286 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
19287 index 0f703f1..9e15f64 100644
19288 --- a/arch/x86/kernel/vmlinux.lds.S
19289 +++ b/arch/x86/kernel/vmlinux.lds.S
19290 @@ -26,6 +26,13 @@
19291 #include <asm/page_types.h>
19292 #include <asm/cache.h>
19293 #include <asm/boot.h>
19294 +#include <asm/segment.h>
19295 +
19296 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19297 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
19298 +#else
19299 +#define __KERNEL_TEXT_OFFSET 0
19300 +#endif
19301
19302 #undef i386 /* in case the preprocessor is a 32bit one */
19303
19304 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
19305
19306 PHDRS {
19307 text PT_LOAD FLAGS(5); /* R_E */
19308 +#ifdef CONFIG_X86_32
19309 + module PT_LOAD FLAGS(5); /* R_E */
19310 +#endif
19311 +#ifdef CONFIG_XEN
19312 + rodata PT_LOAD FLAGS(5); /* R_E */
19313 +#else
19314 + rodata PT_LOAD FLAGS(4); /* R__ */
19315 +#endif
19316 data PT_LOAD FLAGS(6); /* RW_ */
19317 -#ifdef CONFIG_X86_64
19318 + init.begin PT_LOAD FLAGS(6); /* RW_ */
19319 #ifdef CONFIG_SMP
19320 percpu PT_LOAD FLAGS(6); /* RW_ */
19321 #endif
19322 + text.init PT_LOAD FLAGS(5); /* R_E */
19323 + text.exit PT_LOAD FLAGS(5); /* R_E */
19324 init PT_LOAD FLAGS(7); /* RWE */
19325 -#endif
19326 note PT_NOTE FLAGS(0); /* ___ */
19327 }
19328
19329 SECTIONS
19330 {
19331 #ifdef CONFIG_X86_32
19332 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
19333 - phys_startup_32 = startup_32 - LOAD_OFFSET;
19334 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
19335 #else
19336 - . = __START_KERNEL;
19337 - phys_startup_64 = startup_64 - LOAD_OFFSET;
19338 + . = __START_KERNEL;
19339 #endif
19340
19341 /* Text and read-only data */
19342 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
19343 - _text = .;
19344 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
19345 /* bootstrapping code */
19346 +#ifdef CONFIG_X86_32
19347 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
19348 +#else
19349 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
19350 +#endif
19351 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
19352 + _text = .;
19353 HEAD_TEXT
19354 #ifdef CONFIG_X86_32
19355 . = ALIGN(PAGE_SIZE);
19356 @@ -108,13 +128,47 @@ SECTIONS
19357 IRQENTRY_TEXT
19358 *(.fixup)
19359 *(.gnu.warning)
19360 - /* End of text section */
19361 - _etext = .;
19362 } :text = 0x9090
19363
19364 - NOTES :text :note
19365 + . += __KERNEL_TEXT_OFFSET;
19366
19367 - EXCEPTION_TABLE(16) :text = 0x9090
19368 +#ifdef CONFIG_X86_32
19369 + . = ALIGN(PAGE_SIZE);
19370 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
19371 +
19372 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
19373 + MODULES_EXEC_VADDR = .;
19374 + BYTE(0)
19375 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
19376 + . = ALIGN(HPAGE_SIZE);
19377 + MODULES_EXEC_END = . - 1;
19378 +#endif
19379 +
19380 + } :module
19381 +#endif
19382 +
19383 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
19384 + /* End of text section */
19385 + _etext = . - __KERNEL_TEXT_OFFSET;
19386 + }
19387 +
19388 +#ifdef CONFIG_X86_32
19389 + . = ALIGN(PAGE_SIZE);
19390 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
19391 + *(.idt)
19392 + . = ALIGN(PAGE_SIZE);
19393 + *(.empty_zero_page)
19394 + *(.initial_pg_fixmap)
19395 + *(.initial_pg_pmd)
19396 + *(.initial_page_table)
19397 + *(.swapper_pg_dir)
19398 + } :rodata
19399 +#endif
19400 +
19401 + . = ALIGN(PAGE_SIZE);
19402 + NOTES :rodata :note
19403 +
19404 + EXCEPTION_TABLE(16) :rodata
19405
19406 #if defined(CONFIG_DEBUG_RODATA)
19407 /* .text should occupy whole number of pages */
19408 @@ -126,16 +180,20 @@ SECTIONS
19409
19410 /* Data */
19411 .data : AT(ADDR(.data) - LOAD_OFFSET) {
19412 +
19413 +#ifdef CONFIG_PAX_KERNEXEC
19414 + . = ALIGN(HPAGE_SIZE);
19415 +#else
19416 + . = ALIGN(PAGE_SIZE);
19417 +#endif
19418 +
19419 /* Start of data section */
19420 _sdata = .;
19421
19422 /* init_task */
19423 INIT_TASK_DATA(THREAD_SIZE)
19424
19425 -#ifdef CONFIG_X86_32
19426 - /* 32 bit has nosave before _edata */
19427 NOSAVE_DATA
19428 -#endif
19429
19430 PAGE_ALIGNED_DATA(PAGE_SIZE)
19431
19432 @@ -176,12 +234,19 @@ SECTIONS
19433 #endif /* CONFIG_X86_64 */
19434
19435 /* Init code and data - will be freed after init */
19436 - . = ALIGN(PAGE_SIZE);
19437 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
19438 + BYTE(0)
19439 +
19440 +#ifdef CONFIG_PAX_KERNEXEC
19441 + . = ALIGN(HPAGE_SIZE);
19442 +#else
19443 + . = ALIGN(PAGE_SIZE);
19444 +#endif
19445 +
19446 __init_begin = .; /* paired with __init_end */
19447 - }
19448 + } :init.begin
19449
19450 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
19451 +#ifdef CONFIG_SMP
19452 /*
19453 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
19454 * output PHDR, so the next output section - .init.text - should
19455 @@ -190,12 +255,27 @@ SECTIONS
19456 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
19457 #endif
19458
19459 - INIT_TEXT_SECTION(PAGE_SIZE)
19460 -#ifdef CONFIG_X86_64
19461 - :init
19462 -#endif
19463 + . = ALIGN(PAGE_SIZE);
19464 + init_begin = .;
19465 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
19466 + VMLINUX_SYMBOL(_sinittext) = .;
19467 + INIT_TEXT
19468 + VMLINUX_SYMBOL(_einittext) = .;
19469 + . = ALIGN(PAGE_SIZE);
19470 + } :text.init
19471
19472 - INIT_DATA_SECTION(16)
19473 + /*
19474 + * .exit.text is discard at runtime, not link time, to deal with
19475 + * references from .altinstructions and .eh_frame
19476 + */
19477 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
19478 + EXIT_TEXT
19479 + . = ALIGN(16);
19480 + } :text.exit
19481 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
19482 +
19483 + . = ALIGN(PAGE_SIZE);
19484 + INIT_DATA_SECTION(16) :init
19485
19486 /*
19487 * Code and data for a variety of lowlevel trampolines, to be
19488 @@ -269,19 +349,12 @@ SECTIONS
19489 }
19490
19491 . = ALIGN(8);
19492 - /*
19493 - * .exit.text is discard at runtime, not link time, to deal with
19494 - * references from .altinstructions and .eh_frame
19495 - */
19496 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
19497 - EXIT_TEXT
19498 - }
19499
19500 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
19501 EXIT_DATA
19502 }
19503
19504 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
19505 +#ifndef CONFIG_SMP
19506 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
19507 #endif
19508
19509 @@ -300,16 +373,10 @@ SECTIONS
19510 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
19511 __smp_locks = .;
19512 *(.smp_locks)
19513 - . = ALIGN(PAGE_SIZE);
19514 __smp_locks_end = .;
19515 + . = ALIGN(PAGE_SIZE);
19516 }
19517
19518 -#ifdef CONFIG_X86_64
19519 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
19520 - NOSAVE_DATA
19521 - }
19522 -#endif
19523 -
19524 /* BSS */
19525 . = ALIGN(PAGE_SIZE);
19526 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
19527 @@ -325,6 +392,7 @@ SECTIONS
19528 __brk_base = .;
19529 . += 64 * 1024; /* 64k alignment slop space */
19530 *(.brk_reservation) /* areas brk users have reserved */
19531 + . = ALIGN(HPAGE_SIZE);
19532 __brk_limit = .;
19533 }
19534
19535 @@ -351,13 +419,12 @@ SECTIONS
19536 * for the boot processor.
19537 */
19538 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
19539 -INIT_PER_CPU(gdt_page);
19540 INIT_PER_CPU(irq_stack_union);
19541
19542 /*
19543 * Build-time check on the image size:
19544 */
19545 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
19546 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
19547 "kernel image bigger than KERNEL_IMAGE_SIZE");
19548
19549 #ifdef CONFIG_SMP
19550 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
19551 index e4d4a22..47ee71f 100644
19552 --- a/arch/x86/kernel/vsyscall_64.c
19553 +++ b/arch/x86/kernel/vsyscall_64.c
19554 @@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
19555 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
19556 };
19557
19558 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
19559 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
19560
19561 static int __init vsyscall_setup(char *str)
19562 {
19563 if (str) {
19564 if (!strcmp("emulate", str))
19565 vsyscall_mode = EMULATE;
19566 - else if (!strcmp("native", str))
19567 - vsyscall_mode = NATIVE;
19568 else if (!strcmp("none", str))
19569 vsyscall_mode = NONE;
19570 else
19571 @@ -178,7 +176,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
19572
19573 tsk = current;
19574 if (seccomp_mode(&tsk->seccomp))
19575 - do_exit(SIGKILL);
19576 + do_group_exit(SIGKILL);
19577
19578 switch (vsyscall_nr) {
19579 case 0:
19580 @@ -220,8 +218,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
19581 return true;
19582
19583 sigsegv:
19584 - force_sig(SIGSEGV, current);
19585 - return true;
19586 + do_group_exit(SIGKILL);
19587 }
19588
19589 /*
19590 @@ -274,10 +271,7 @@ void __init map_vsyscall(void)
19591 extern char __vvar_page;
19592 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
19593
19594 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
19595 - vsyscall_mode == NATIVE
19596 - ? PAGE_KERNEL_VSYSCALL
19597 - : PAGE_KERNEL_VVAR);
19598 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
19599 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
19600 (unsigned long)VSYSCALL_START);
19601
19602 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
19603 index 9796c2f..f686fbf 100644
19604 --- a/arch/x86/kernel/x8664_ksyms_64.c
19605 +++ b/arch/x86/kernel/x8664_ksyms_64.c
19606 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
19607 EXPORT_SYMBOL(copy_user_generic_string);
19608 EXPORT_SYMBOL(copy_user_generic_unrolled);
19609 EXPORT_SYMBOL(__copy_user_nocache);
19610 -EXPORT_SYMBOL(_copy_from_user);
19611 -EXPORT_SYMBOL(_copy_to_user);
19612
19613 EXPORT_SYMBOL(copy_page);
19614 EXPORT_SYMBOL(clear_page);
19615 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
19616 index 7110911..e8cdee5 100644
19617 --- a/arch/x86/kernel/xsave.c
19618 +++ b/arch/x86/kernel/xsave.c
19619 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
19620 fx_sw_user->xstate_size > fx_sw_user->extended_size)
19621 return -EINVAL;
19622
19623 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
19624 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
19625 fx_sw_user->extended_size -
19626 FP_XSTATE_MAGIC2_SIZE));
19627 if (err)
19628 @@ -266,7 +266,7 @@ fx_only:
19629 * the other extended state.
19630 */
19631 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
19632 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
19633 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
19634 }
19635
19636 /*
19637 @@ -295,7 +295,7 @@ int restore_i387_xstate(void __user *buf)
19638 if (use_xsave())
19639 err = restore_user_xstate(buf);
19640 else
19641 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
19642 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
19643 buf);
19644 if (unlikely(err)) {
19645 /*
19646 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
19647 index f1e3be1..588efc8 100644
19648 --- a/arch/x86/kvm/emulate.c
19649 +++ b/arch/x86/kvm/emulate.c
19650 @@ -249,6 +249,7 @@ struct gprefix {
19651
19652 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
19653 do { \
19654 + unsigned long _tmp; \
19655 __asm__ __volatile__ ( \
19656 _PRE_EFLAGS("0", "4", "2") \
19657 _op _suffix " %"_x"3,%1; " \
19658 @@ -263,8 +264,6 @@ struct gprefix {
19659 /* Raw emulation: instruction has two explicit operands. */
19660 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
19661 do { \
19662 - unsigned long _tmp; \
19663 - \
19664 switch ((ctxt)->dst.bytes) { \
19665 case 2: \
19666 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
19667 @@ -280,7 +279,6 @@ struct gprefix {
19668
19669 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
19670 do { \
19671 - unsigned long _tmp; \
19672 switch ((ctxt)->dst.bytes) { \
19673 case 1: \
19674 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
19675 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
19676 index 54abb40..a192606 100644
19677 --- a/arch/x86/kvm/lapic.c
19678 +++ b/arch/x86/kvm/lapic.c
19679 @@ -53,7 +53,7 @@
19680 #define APIC_BUS_CYCLE_NS 1
19681
19682 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
19683 -#define apic_debug(fmt, arg...)
19684 +#define apic_debug(fmt, arg...) do {} while (0)
19685
19686 #define APIC_LVT_NUM 6
19687 /* 14 is the version for Xeon and Pentium 8.4.8*/
19688 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
19689 index f1b36cf..af8a124 100644
19690 --- a/arch/x86/kvm/mmu.c
19691 +++ b/arch/x86/kvm/mmu.c
19692 @@ -3555,7 +3555,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
19693
19694 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
19695
19696 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
19697 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
19698
19699 /*
19700 * Assume that the pte write on a page table of the same type
19701 @@ -3587,7 +3587,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
19702 }
19703
19704 spin_lock(&vcpu->kvm->mmu_lock);
19705 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
19706 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
19707 gentry = 0;
19708 kvm_mmu_free_some_pages(vcpu);
19709 ++vcpu->kvm->stat.mmu_pte_write;
19710 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
19711 index 9299410..ade2f9b 100644
19712 --- a/arch/x86/kvm/paging_tmpl.h
19713 +++ b/arch/x86/kvm/paging_tmpl.h
19714 @@ -197,7 +197,7 @@ retry_walk:
19715 if (unlikely(kvm_is_error_hva(host_addr)))
19716 goto error;
19717
19718 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
19719 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
19720 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
19721 goto error;
19722
19723 @@ -705,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
19724 if (need_flush)
19725 kvm_flush_remote_tlbs(vcpu->kvm);
19726
19727 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
19728 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
19729
19730 spin_unlock(&vcpu->kvm->mmu_lock);
19731
19732 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
19733 index 94a4672..5c6b853 100644
19734 --- a/arch/x86/kvm/svm.c
19735 +++ b/arch/x86/kvm/svm.c
19736 @@ -3405,7 +3405,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
19737 int cpu = raw_smp_processor_id();
19738
19739 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
19740 +
19741 + pax_open_kernel();
19742 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
19743 + pax_close_kernel();
19744 +
19745 load_TR_desc();
19746 }
19747
19748 @@ -3783,6 +3787,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
19749 #endif
19750 #endif
19751
19752 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19753 + __set_fs(current_thread_info()->addr_limit);
19754 +#endif
19755 +
19756 reload_tss(vcpu);
19757
19758 local_irq_disable();
19759 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
19760 index 4ea7678..b3a7084 100644
19761 --- a/arch/x86/kvm/vmx.c
19762 +++ b/arch/x86/kvm/vmx.c
19763 @@ -1305,7 +1305,11 @@ static void reload_tss(void)
19764 struct desc_struct *descs;
19765
19766 descs = (void *)gdt->address;
19767 +
19768 + pax_open_kernel();
19769 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
19770 + pax_close_kernel();
19771 +
19772 load_TR_desc();
19773 }
19774
19775 @@ -2633,8 +2637,11 @@ static __init int hardware_setup(void)
19776 if (!cpu_has_vmx_flexpriority())
19777 flexpriority_enabled = 0;
19778
19779 - if (!cpu_has_vmx_tpr_shadow())
19780 - kvm_x86_ops->update_cr8_intercept = NULL;
19781 + if (!cpu_has_vmx_tpr_shadow()) {
19782 + pax_open_kernel();
19783 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
19784 + pax_close_kernel();
19785 + }
19786
19787 if (enable_ept && !cpu_has_vmx_ept_2m_page())
19788 kvm_disable_largepages();
19789 @@ -3648,7 +3655,7 @@ static void vmx_set_constant_host_state(void)
19790 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
19791
19792 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
19793 - vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
19794 + vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
19795
19796 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
19797 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
19798 @@ -6169,6 +6176,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
19799 "jmp .Lkvm_vmx_return \n\t"
19800 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
19801 ".Lkvm_vmx_return: "
19802 +
19803 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19804 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
19805 + ".Lkvm_vmx_return2: "
19806 +#endif
19807 +
19808 /* Save guest registers, load host registers, keep flags */
19809 "mov %0, %c[wordsize](%%"R"sp) \n\t"
19810 "pop %0 \n\t"
19811 @@ -6217,6 +6230,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
19812 #endif
19813 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
19814 [wordsize]"i"(sizeof(ulong))
19815 +
19816 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19817 + ,[cs]"i"(__KERNEL_CS)
19818 +#endif
19819 +
19820 : "cc", "memory"
19821 , R"ax", R"bx", R"di", R"si"
19822 #ifdef CONFIG_X86_64
19823 @@ -6245,7 +6263,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
19824 }
19825 }
19826
19827 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
19828 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
19829 +
19830 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19831 + loadsegment(fs, __KERNEL_PERCPU);
19832 +#endif
19833 +
19834 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19835 + __set_fs(current_thread_info()->addr_limit);
19836 +#endif
19837 +
19838 vmx->loaded_vmcs->launched = 1;
19839
19840 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
19841 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
19842 index 4c938da..4ddef65 100644
19843 --- a/arch/x86/kvm/x86.c
19844 +++ b/arch/x86/kvm/x86.c
19845 @@ -1345,8 +1345,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
19846 {
19847 struct kvm *kvm = vcpu->kvm;
19848 int lm = is_long_mode(vcpu);
19849 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
19850 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
19851 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
19852 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
19853 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
19854 : kvm->arch.xen_hvm_config.blob_size_32;
19855 u32 page_num = data & ~PAGE_MASK;
19856 @@ -2165,6 +2165,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
19857 if (n < msr_list.nmsrs)
19858 goto out;
19859 r = -EFAULT;
19860 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
19861 + goto out;
19862 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
19863 num_msrs_to_save * sizeof(u32)))
19864 goto out;
19865 @@ -2340,15 +2342,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
19866 struct kvm_cpuid2 *cpuid,
19867 struct kvm_cpuid_entry2 __user *entries)
19868 {
19869 - int r;
19870 + int r, i;
19871
19872 r = -E2BIG;
19873 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
19874 goto out;
19875 r = -EFAULT;
19876 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
19877 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
19878 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
19879 goto out;
19880 + for (i = 0; i < cpuid->nent; ++i) {
19881 + struct kvm_cpuid_entry2 cpuid_entry;
19882 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
19883 + goto out;
19884 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
19885 + }
19886 vcpu->arch.cpuid_nent = cpuid->nent;
19887 kvm_apic_set_version(vcpu);
19888 kvm_x86_ops->cpuid_update(vcpu);
19889 @@ -2363,15 +2370,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
19890 struct kvm_cpuid2 *cpuid,
19891 struct kvm_cpuid_entry2 __user *entries)
19892 {
19893 - int r;
19894 + int r, i;
19895
19896 r = -E2BIG;
19897 if (cpuid->nent < vcpu->arch.cpuid_nent)
19898 goto out;
19899 r = -EFAULT;
19900 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
19901 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
19902 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
19903 goto out;
19904 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
19905 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
19906 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
19907 + goto out;
19908 + }
19909 return 0;
19910
19911 out:
19912 @@ -2746,7 +2757,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
19913 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
19914 struct kvm_interrupt *irq)
19915 {
19916 - if (irq->irq < 0 || irq->irq >= 256)
19917 + if (irq->irq >= 256)
19918 return -EINVAL;
19919 if (irqchip_in_kernel(vcpu->kvm))
19920 return -ENXIO;
19921 @@ -5162,7 +5173,7 @@ static void kvm_set_mmio_spte_mask(void)
19922 kvm_mmu_set_mmio_spte_mask(mask);
19923 }
19924
19925 -int kvm_arch_init(void *opaque)
19926 +int kvm_arch_init(const void *opaque)
19927 {
19928 int r;
19929 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
19930 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
19931 index cf4603b..7cdde38 100644
19932 --- a/arch/x86/lguest/boot.c
19933 +++ b/arch/x86/lguest/boot.c
19934 @@ -1195,9 +1195,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
19935 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
19936 * Launcher to reboot us.
19937 */
19938 -static void lguest_restart(char *reason)
19939 +static __noreturn void lguest_restart(char *reason)
19940 {
19941 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
19942 + BUG();
19943 }
19944
19945 /*G:050
19946 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
19947 index 042f682..c92afb6 100644
19948 --- a/arch/x86/lib/atomic64_32.c
19949 +++ b/arch/x86/lib/atomic64_32.c
19950 @@ -8,18 +8,30 @@
19951
19952 long long atomic64_read_cx8(long long, const atomic64_t *v);
19953 EXPORT_SYMBOL(atomic64_read_cx8);
19954 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
19955 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
19956 long long atomic64_set_cx8(long long, const atomic64_t *v);
19957 EXPORT_SYMBOL(atomic64_set_cx8);
19958 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
19959 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
19960 long long atomic64_xchg_cx8(long long, unsigned high);
19961 EXPORT_SYMBOL(atomic64_xchg_cx8);
19962 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
19963 EXPORT_SYMBOL(atomic64_add_return_cx8);
19964 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
19965 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
19966 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
19967 EXPORT_SYMBOL(atomic64_sub_return_cx8);
19968 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
19969 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
19970 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
19971 EXPORT_SYMBOL(atomic64_inc_return_cx8);
19972 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
19973 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
19974 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
19975 EXPORT_SYMBOL(atomic64_dec_return_cx8);
19976 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
19977 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
19978 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
19979 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
19980 int atomic64_inc_not_zero_cx8(atomic64_t *v);
19981 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
19982 #ifndef CONFIG_X86_CMPXCHG64
19983 long long atomic64_read_386(long long, const atomic64_t *v);
19984 EXPORT_SYMBOL(atomic64_read_386);
19985 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
19986 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
19987 long long atomic64_set_386(long long, const atomic64_t *v);
19988 EXPORT_SYMBOL(atomic64_set_386);
19989 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
19990 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
19991 long long atomic64_xchg_386(long long, unsigned high);
19992 EXPORT_SYMBOL(atomic64_xchg_386);
19993 long long atomic64_add_return_386(long long a, atomic64_t *v);
19994 EXPORT_SYMBOL(atomic64_add_return_386);
19995 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
19996 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
19997 long long atomic64_sub_return_386(long long a, atomic64_t *v);
19998 EXPORT_SYMBOL(atomic64_sub_return_386);
19999 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20000 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
20001 long long atomic64_inc_return_386(long long a, atomic64_t *v);
20002 EXPORT_SYMBOL(atomic64_inc_return_386);
20003 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20004 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
20005 long long atomic64_dec_return_386(long long a, atomic64_t *v);
20006 EXPORT_SYMBOL(atomic64_dec_return_386);
20007 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20008 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
20009 long long atomic64_add_386(long long a, atomic64_t *v);
20010 EXPORT_SYMBOL(atomic64_add_386);
20011 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
20012 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
20013 long long atomic64_sub_386(long long a, atomic64_t *v);
20014 EXPORT_SYMBOL(atomic64_sub_386);
20015 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
20016 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
20017 long long atomic64_inc_386(long long a, atomic64_t *v);
20018 EXPORT_SYMBOL(atomic64_inc_386);
20019 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
20020 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
20021 long long atomic64_dec_386(long long a, atomic64_t *v);
20022 EXPORT_SYMBOL(atomic64_dec_386);
20023 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
20024 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
20025 long long atomic64_dec_if_positive_386(atomic64_t *v);
20026 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
20027 int atomic64_inc_not_zero_386(atomic64_t *v);
20028 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
20029 index e8e7e0d..56fd1b0 100644
20030 --- a/arch/x86/lib/atomic64_386_32.S
20031 +++ b/arch/x86/lib/atomic64_386_32.S
20032 @@ -48,6 +48,10 @@ BEGIN(read)
20033 movl (v), %eax
20034 movl 4(v), %edx
20035 RET_ENDP
20036 +BEGIN(read_unchecked)
20037 + movl (v), %eax
20038 + movl 4(v), %edx
20039 +RET_ENDP
20040 #undef v
20041
20042 #define v %esi
20043 @@ -55,6 +59,10 @@ BEGIN(set)
20044 movl %ebx, (v)
20045 movl %ecx, 4(v)
20046 RET_ENDP
20047 +BEGIN(set_unchecked)
20048 + movl %ebx, (v)
20049 + movl %ecx, 4(v)
20050 +RET_ENDP
20051 #undef v
20052
20053 #define v %esi
20054 @@ -70,6 +78,20 @@ RET_ENDP
20055 BEGIN(add)
20056 addl %eax, (v)
20057 adcl %edx, 4(v)
20058 +
20059 +#ifdef CONFIG_PAX_REFCOUNT
20060 + jno 0f
20061 + subl %eax, (v)
20062 + sbbl %edx, 4(v)
20063 + int $4
20064 +0:
20065 + _ASM_EXTABLE(0b, 0b)
20066 +#endif
20067 +
20068 +RET_ENDP
20069 +BEGIN(add_unchecked)
20070 + addl %eax, (v)
20071 + adcl %edx, 4(v)
20072 RET_ENDP
20073 #undef v
20074
20075 @@ -77,6 +99,24 @@ RET_ENDP
20076 BEGIN(add_return)
20077 addl (v), %eax
20078 adcl 4(v), %edx
20079 +
20080 +#ifdef CONFIG_PAX_REFCOUNT
20081 + into
20082 +1234:
20083 + _ASM_EXTABLE(1234b, 2f)
20084 +#endif
20085 +
20086 + movl %eax, (v)
20087 + movl %edx, 4(v)
20088 +
20089 +#ifdef CONFIG_PAX_REFCOUNT
20090 +2:
20091 +#endif
20092 +
20093 +RET_ENDP
20094 +BEGIN(add_return_unchecked)
20095 + addl (v), %eax
20096 + adcl 4(v), %edx
20097 movl %eax, (v)
20098 movl %edx, 4(v)
20099 RET_ENDP
20100 @@ -86,6 +126,20 @@ RET_ENDP
20101 BEGIN(sub)
20102 subl %eax, (v)
20103 sbbl %edx, 4(v)
20104 +
20105 +#ifdef CONFIG_PAX_REFCOUNT
20106 + jno 0f
20107 + addl %eax, (v)
20108 + adcl %edx, 4(v)
20109 + int $4
20110 +0:
20111 + _ASM_EXTABLE(0b, 0b)
20112 +#endif
20113 +
20114 +RET_ENDP
20115 +BEGIN(sub_unchecked)
20116 + subl %eax, (v)
20117 + sbbl %edx, 4(v)
20118 RET_ENDP
20119 #undef v
20120
20121 @@ -96,6 +150,27 @@ BEGIN(sub_return)
20122 sbbl $0, %edx
20123 addl (v), %eax
20124 adcl 4(v), %edx
20125 +
20126 +#ifdef CONFIG_PAX_REFCOUNT
20127 + into
20128 +1234:
20129 + _ASM_EXTABLE(1234b, 2f)
20130 +#endif
20131 +
20132 + movl %eax, (v)
20133 + movl %edx, 4(v)
20134 +
20135 +#ifdef CONFIG_PAX_REFCOUNT
20136 +2:
20137 +#endif
20138 +
20139 +RET_ENDP
20140 +BEGIN(sub_return_unchecked)
20141 + negl %edx
20142 + negl %eax
20143 + sbbl $0, %edx
20144 + addl (v), %eax
20145 + adcl 4(v), %edx
20146 movl %eax, (v)
20147 movl %edx, 4(v)
20148 RET_ENDP
20149 @@ -105,6 +180,20 @@ RET_ENDP
20150 BEGIN(inc)
20151 addl $1, (v)
20152 adcl $0, 4(v)
20153 +
20154 +#ifdef CONFIG_PAX_REFCOUNT
20155 + jno 0f
20156 + subl $1, (v)
20157 + sbbl $0, 4(v)
20158 + int $4
20159 +0:
20160 + _ASM_EXTABLE(0b, 0b)
20161 +#endif
20162 +
20163 +RET_ENDP
20164 +BEGIN(inc_unchecked)
20165 + addl $1, (v)
20166 + adcl $0, 4(v)
20167 RET_ENDP
20168 #undef v
20169
20170 @@ -114,6 +203,26 @@ BEGIN(inc_return)
20171 movl 4(v), %edx
20172 addl $1, %eax
20173 adcl $0, %edx
20174 +
20175 +#ifdef CONFIG_PAX_REFCOUNT
20176 + into
20177 +1234:
20178 + _ASM_EXTABLE(1234b, 2f)
20179 +#endif
20180 +
20181 + movl %eax, (v)
20182 + movl %edx, 4(v)
20183 +
20184 +#ifdef CONFIG_PAX_REFCOUNT
20185 +2:
20186 +#endif
20187 +
20188 +RET_ENDP
20189 +BEGIN(inc_return_unchecked)
20190 + movl (v), %eax
20191 + movl 4(v), %edx
20192 + addl $1, %eax
20193 + adcl $0, %edx
20194 movl %eax, (v)
20195 movl %edx, 4(v)
20196 RET_ENDP
20197 @@ -123,6 +232,20 @@ RET_ENDP
20198 BEGIN(dec)
20199 subl $1, (v)
20200 sbbl $0, 4(v)
20201 +
20202 +#ifdef CONFIG_PAX_REFCOUNT
20203 + jno 0f
20204 + addl $1, (v)
20205 + adcl $0, 4(v)
20206 + int $4
20207 +0:
20208 + _ASM_EXTABLE(0b, 0b)
20209 +#endif
20210 +
20211 +RET_ENDP
20212 +BEGIN(dec_unchecked)
20213 + subl $1, (v)
20214 + sbbl $0, 4(v)
20215 RET_ENDP
20216 #undef v
20217
20218 @@ -132,6 +255,26 @@ BEGIN(dec_return)
20219 movl 4(v), %edx
20220 subl $1, %eax
20221 sbbl $0, %edx
20222 +
20223 +#ifdef CONFIG_PAX_REFCOUNT
20224 + into
20225 +1234:
20226 + _ASM_EXTABLE(1234b, 2f)
20227 +#endif
20228 +
20229 + movl %eax, (v)
20230 + movl %edx, 4(v)
20231 +
20232 +#ifdef CONFIG_PAX_REFCOUNT
20233 +2:
20234 +#endif
20235 +
20236 +RET_ENDP
20237 +BEGIN(dec_return_unchecked)
20238 + movl (v), %eax
20239 + movl 4(v), %edx
20240 + subl $1, %eax
20241 + sbbl $0, %edx
20242 movl %eax, (v)
20243 movl %edx, 4(v)
20244 RET_ENDP
20245 @@ -143,6 +286,13 @@ BEGIN(add_unless)
20246 adcl %edx, %edi
20247 addl (v), %eax
20248 adcl 4(v), %edx
20249 +
20250 +#ifdef CONFIG_PAX_REFCOUNT
20251 + into
20252 +1234:
20253 + _ASM_EXTABLE(1234b, 2f)
20254 +#endif
20255 +
20256 cmpl %eax, %esi
20257 je 3f
20258 1:
20259 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
20260 1:
20261 addl $1, %eax
20262 adcl $0, %edx
20263 +
20264 +#ifdef CONFIG_PAX_REFCOUNT
20265 + into
20266 +1234:
20267 + _ASM_EXTABLE(1234b, 2f)
20268 +#endif
20269 +
20270 movl %eax, (v)
20271 movl %edx, 4(v)
20272 movl $1, %eax
20273 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
20274 movl 4(v), %edx
20275 subl $1, %eax
20276 sbbl $0, %edx
20277 +
20278 +#ifdef CONFIG_PAX_REFCOUNT
20279 + into
20280 +1234:
20281 + _ASM_EXTABLE(1234b, 1f)
20282 +#endif
20283 +
20284 js 1f
20285 movl %eax, (v)
20286 movl %edx, 4(v)
20287 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
20288 index 391a083..d658e9f 100644
20289 --- a/arch/x86/lib/atomic64_cx8_32.S
20290 +++ b/arch/x86/lib/atomic64_cx8_32.S
20291 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
20292 CFI_STARTPROC
20293
20294 read64 %ecx
20295 + pax_force_retaddr
20296 ret
20297 CFI_ENDPROC
20298 ENDPROC(atomic64_read_cx8)
20299
20300 +ENTRY(atomic64_read_unchecked_cx8)
20301 + CFI_STARTPROC
20302 +
20303 + read64 %ecx
20304 + pax_force_retaddr
20305 + ret
20306 + CFI_ENDPROC
20307 +ENDPROC(atomic64_read_unchecked_cx8)
20308 +
20309 ENTRY(atomic64_set_cx8)
20310 CFI_STARTPROC
20311
20312 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
20313 cmpxchg8b (%esi)
20314 jne 1b
20315
20316 + pax_force_retaddr
20317 ret
20318 CFI_ENDPROC
20319 ENDPROC(atomic64_set_cx8)
20320
20321 +ENTRY(atomic64_set_unchecked_cx8)
20322 + CFI_STARTPROC
20323 +
20324 +1:
20325 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
20326 + * are atomic on 586 and newer */
20327 + cmpxchg8b (%esi)
20328 + jne 1b
20329 +
20330 + pax_force_retaddr
20331 + ret
20332 + CFI_ENDPROC
20333 +ENDPROC(atomic64_set_unchecked_cx8)
20334 +
20335 ENTRY(atomic64_xchg_cx8)
20336 CFI_STARTPROC
20337
20338 @@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
20339 cmpxchg8b (%esi)
20340 jne 1b
20341
20342 + pax_force_retaddr
20343 ret
20344 CFI_ENDPROC
20345 ENDPROC(atomic64_xchg_cx8)
20346
20347 -.macro addsub_return func ins insc
20348 -ENTRY(atomic64_\func\()_return_cx8)
20349 +.macro addsub_return func ins insc unchecked=""
20350 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
20351 CFI_STARTPROC
20352 SAVE ebp
20353 SAVE ebx
20354 @@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
20355 movl %edx, %ecx
20356 \ins\()l %esi, %ebx
20357 \insc\()l %edi, %ecx
20358 +
20359 +.ifb \unchecked
20360 +#ifdef CONFIG_PAX_REFCOUNT
20361 + into
20362 +2:
20363 + _ASM_EXTABLE(2b, 3f)
20364 +#endif
20365 +.endif
20366 +
20367 LOCK_PREFIX
20368 cmpxchg8b (%ebp)
20369 jne 1b
20370 -
20371 -10:
20372 movl %ebx, %eax
20373 movl %ecx, %edx
20374 +
20375 +.ifb \unchecked
20376 +#ifdef CONFIG_PAX_REFCOUNT
20377 +3:
20378 +#endif
20379 +.endif
20380 +
20381 RESTORE edi
20382 RESTORE esi
20383 RESTORE ebx
20384 RESTORE ebp
20385 + pax_force_retaddr
20386 ret
20387 CFI_ENDPROC
20388 -ENDPROC(atomic64_\func\()_return_cx8)
20389 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
20390 .endm
20391
20392 addsub_return add add adc
20393 addsub_return sub sub sbb
20394 +addsub_return add add adc _unchecked
20395 +addsub_return sub sub sbb _unchecked
20396
20397 -.macro incdec_return func ins insc
20398 -ENTRY(atomic64_\func\()_return_cx8)
20399 +.macro incdec_return func ins insc unchecked
20400 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
20401 CFI_STARTPROC
20402 SAVE ebx
20403
20404 @@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
20405 movl %edx, %ecx
20406 \ins\()l $1, %ebx
20407 \insc\()l $0, %ecx
20408 +
20409 +.ifb \unchecked
20410 +#ifdef CONFIG_PAX_REFCOUNT
20411 + into
20412 +2:
20413 + _ASM_EXTABLE(2b, 3f)
20414 +#endif
20415 +.endif
20416 +
20417 LOCK_PREFIX
20418 cmpxchg8b (%esi)
20419 jne 1b
20420
20421 -10:
20422 movl %ebx, %eax
20423 movl %ecx, %edx
20424 +
20425 +.ifb \unchecked
20426 +#ifdef CONFIG_PAX_REFCOUNT
20427 +3:
20428 +#endif
20429 +.endif
20430 +
20431 RESTORE ebx
20432 + pax_force_retaddr
20433 ret
20434 CFI_ENDPROC
20435 -ENDPROC(atomic64_\func\()_return_cx8)
20436 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
20437 .endm
20438
20439 incdec_return inc add adc
20440 incdec_return dec sub sbb
20441 +incdec_return inc add adc _unchecked
20442 +incdec_return dec sub sbb _unchecked
20443
20444 ENTRY(atomic64_dec_if_positive_cx8)
20445 CFI_STARTPROC
20446 @@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
20447 movl %edx, %ecx
20448 subl $1, %ebx
20449 sbb $0, %ecx
20450 +
20451 +#ifdef CONFIG_PAX_REFCOUNT
20452 + into
20453 +1234:
20454 + _ASM_EXTABLE(1234b, 2f)
20455 +#endif
20456 +
20457 js 2f
20458 LOCK_PREFIX
20459 cmpxchg8b (%esi)
20460 @@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
20461 movl %ebx, %eax
20462 movl %ecx, %edx
20463 RESTORE ebx
20464 + pax_force_retaddr
20465 ret
20466 CFI_ENDPROC
20467 ENDPROC(atomic64_dec_if_positive_cx8)
20468 @@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
20469 movl %edx, %ecx
20470 addl %esi, %ebx
20471 adcl %edi, %ecx
20472 +
20473 +#ifdef CONFIG_PAX_REFCOUNT
20474 + into
20475 +1234:
20476 + _ASM_EXTABLE(1234b, 3f)
20477 +#endif
20478 +
20479 LOCK_PREFIX
20480 cmpxchg8b (%ebp)
20481 jne 1b
20482 @@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
20483 CFI_ADJUST_CFA_OFFSET -8
20484 RESTORE ebx
20485 RESTORE ebp
20486 + pax_force_retaddr
20487 ret
20488 4:
20489 cmpl %edx, 4(%esp)
20490 @@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
20491 movl %edx, %ecx
20492 addl $1, %ebx
20493 adcl $0, %ecx
20494 +
20495 +#ifdef CONFIG_PAX_REFCOUNT
20496 + into
20497 +1234:
20498 + _ASM_EXTABLE(1234b, 3f)
20499 +#endif
20500 +
20501 LOCK_PREFIX
20502 cmpxchg8b (%esi)
20503 jne 1b
20504 @@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
20505 movl $1, %eax
20506 3:
20507 RESTORE ebx
20508 + pax_force_retaddr
20509 ret
20510 4:
20511 testl %edx, %edx
20512 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
20513 index 78d16a5..fbcf666 100644
20514 --- a/arch/x86/lib/checksum_32.S
20515 +++ b/arch/x86/lib/checksum_32.S
20516 @@ -28,7 +28,8 @@
20517 #include <linux/linkage.h>
20518 #include <asm/dwarf2.h>
20519 #include <asm/errno.h>
20520 -
20521 +#include <asm/segment.h>
20522 +
20523 /*
20524 * computes a partial checksum, e.g. for TCP/UDP fragments
20525 */
20526 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
20527
20528 #define ARGBASE 16
20529 #define FP 12
20530 -
20531 -ENTRY(csum_partial_copy_generic)
20532 +
20533 +ENTRY(csum_partial_copy_generic_to_user)
20534 CFI_STARTPROC
20535 +
20536 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20537 + pushl_cfi %gs
20538 + popl_cfi %es
20539 + jmp csum_partial_copy_generic
20540 +#endif
20541 +
20542 +ENTRY(csum_partial_copy_generic_from_user)
20543 +
20544 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20545 + pushl_cfi %gs
20546 + popl_cfi %ds
20547 +#endif
20548 +
20549 +ENTRY(csum_partial_copy_generic)
20550 subl $4,%esp
20551 CFI_ADJUST_CFA_OFFSET 4
20552 pushl_cfi %edi
20553 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
20554 jmp 4f
20555 SRC(1: movw (%esi), %bx )
20556 addl $2, %esi
20557 -DST( movw %bx, (%edi) )
20558 +DST( movw %bx, %es:(%edi) )
20559 addl $2, %edi
20560 addw %bx, %ax
20561 adcl $0, %eax
20562 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
20563 SRC(1: movl (%esi), %ebx )
20564 SRC( movl 4(%esi), %edx )
20565 adcl %ebx, %eax
20566 -DST( movl %ebx, (%edi) )
20567 +DST( movl %ebx, %es:(%edi) )
20568 adcl %edx, %eax
20569 -DST( movl %edx, 4(%edi) )
20570 +DST( movl %edx, %es:4(%edi) )
20571
20572 SRC( movl 8(%esi), %ebx )
20573 SRC( movl 12(%esi), %edx )
20574 adcl %ebx, %eax
20575 -DST( movl %ebx, 8(%edi) )
20576 +DST( movl %ebx, %es:8(%edi) )
20577 adcl %edx, %eax
20578 -DST( movl %edx, 12(%edi) )
20579 +DST( movl %edx, %es:12(%edi) )
20580
20581 SRC( movl 16(%esi), %ebx )
20582 SRC( movl 20(%esi), %edx )
20583 adcl %ebx, %eax
20584 -DST( movl %ebx, 16(%edi) )
20585 +DST( movl %ebx, %es:16(%edi) )
20586 adcl %edx, %eax
20587 -DST( movl %edx, 20(%edi) )
20588 +DST( movl %edx, %es:20(%edi) )
20589
20590 SRC( movl 24(%esi), %ebx )
20591 SRC( movl 28(%esi), %edx )
20592 adcl %ebx, %eax
20593 -DST( movl %ebx, 24(%edi) )
20594 +DST( movl %ebx, %es:24(%edi) )
20595 adcl %edx, %eax
20596 -DST( movl %edx, 28(%edi) )
20597 +DST( movl %edx, %es:28(%edi) )
20598
20599 lea 32(%esi), %esi
20600 lea 32(%edi), %edi
20601 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
20602 shrl $2, %edx # This clears CF
20603 SRC(3: movl (%esi), %ebx )
20604 adcl %ebx, %eax
20605 -DST( movl %ebx, (%edi) )
20606 +DST( movl %ebx, %es:(%edi) )
20607 lea 4(%esi), %esi
20608 lea 4(%edi), %edi
20609 dec %edx
20610 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
20611 jb 5f
20612 SRC( movw (%esi), %cx )
20613 leal 2(%esi), %esi
20614 -DST( movw %cx, (%edi) )
20615 +DST( movw %cx, %es:(%edi) )
20616 leal 2(%edi), %edi
20617 je 6f
20618 shll $16,%ecx
20619 SRC(5: movb (%esi), %cl )
20620 -DST( movb %cl, (%edi) )
20621 +DST( movb %cl, %es:(%edi) )
20622 6: addl %ecx, %eax
20623 adcl $0, %eax
20624 7:
20625 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
20626
20627 6001:
20628 movl ARGBASE+20(%esp), %ebx # src_err_ptr
20629 - movl $-EFAULT, (%ebx)
20630 + movl $-EFAULT, %ss:(%ebx)
20631
20632 # zero the complete destination - computing the rest
20633 # is too much work
20634 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
20635
20636 6002:
20637 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
20638 - movl $-EFAULT,(%ebx)
20639 + movl $-EFAULT,%ss:(%ebx)
20640 jmp 5000b
20641
20642 .previous
20643
20644 + pushl_cfi %ss
20645 + popl_cfi %ds
20646 + pushl_cfi %ss
20647 + popl_cfi %es
20648 popl_cfi %ebx
20649 CFI_RESTORE ebx
20650 popl_cfi %esi
20651 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
20652 popl_cfi %ecx # equivalent to addl $4,%esp
20653 ret
20654 CFI_ENDPROC
20655 -ENDPROC(csum_partial_copy_generic)
20656 +ENDPROC(csum_partial_copy_generic_to_user)
20657
20658 #else
20659
20660 /* Version for PentiumII/PPro */
20661
20662 #define ROUND1(x) \
20663 + nop; nop; nop; \
20664 SRC(movl x(%esi), %ebx ) ; \
20665 addl %ebx, %eax ; \
20666 - DST(movl %ebx, x(%edi) ) ;
20667 + DST(movl %ebx, %es:x(%edi)) ;
20668
20669 #define ROUND(x) \
20670 + nop; nop; nop; \
20671 SRC(movl x(%esi), %ebx ) ; \
20672 adcl %ebx, %eax ; \
20673 - DST(movl %ebx, x(%edi) ) ;
20674 + DST(movl %ebx, %es:x(%edi)) ;
20675
20676 #define ARGBASE 12
20677 -
20678 -ENTRY(csum_partial_copy_generic)
20679 +
20680 +ENTRY(csum_partial_copy_generic_to_user)
20681 CFI_STARTPROC
20682 +
20683 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20684 + pushl_cfi %gs
20685 + popl_cfi %es
20686 + jmp csum_partial_copy_generic
20687 +#endif
20688 +
20689 +ENTRY(csum_partial_copy_generic_from_user)
20690 +
20691 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20692 + pushl_cfi %gs
20693 + popl_cfi %ds
20694 +#endif
20695 +
20696 +ENTRY(csum_partial_copy_generic)
20697 pushl_cfi %ebx
20698 CFI_REL_OFFSET ebx, 0
20699 pushl_cfi %edi
20700 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
20701 subl %ebx, %edi
20702 lea -1(%esi),%edx
20703 andl $-32,%edx
20704 - lea 3f(%ebx,%ebx), %ebx
20705 + lea 3f(%ebx,%ebx,2), %ebx
20706 testl %esi, %esi
20707 jmp *%ebx
20708 1: addl $64,%esi
20709 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
20710 jb 5f
20711 SRC( movw (%esi), %dx )
20712 leal 2(%esi), %esi
20713 -DST( movw %dx, (%edi) )
20714 +DST( movw %dx, %es:(%edi) )
20715 leal 2(%edi), %edi
20716 je 6f
20717 shll $16,%edx
20718 5:
20719 SRC( movb (%esi), %dl )
20720 -DST( movb %dl, (%edi) )
20721 +DST( movb %dl, %es:(%edi) )
20722 6: addl %edx, %eax
20723 adcl $0, %eax
20724 7:
20725 .section .fixup, "ax"
20726 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
20727 - movl $-EFAULT, (%ebx)
20728 + movl $-EFAULT, %ss:(%ebx)
20729 # zero the complete destination (computing the rest is too much work)
20730 movl ARGBASE+8(%esp),%edi # dst
20731 movl ARGBASE+12(%esp),%ecx # len
20732 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
20733 rep; stosb
20734 jmp 7b
20735 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
20736 - movl $-EFAULT, (%ebx)
20737 + movl $-EFAULT, %ss:(%ebx)
20738 jmp 7b
20739 .previous
20740
20741 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20742 + pushl_cfi %ss
20743 + popl_cfi %ds
20744 + pushl_cfi %ss
20745 + popl_cfi %es
20746 +#endif
20747 +
20748 popl_cfi %esi
20749 CFI_RESTORE esi
20750 popl_cfi %edi
20751 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
20752 CFI_RESTORE ebx
20753 ret
20754 CFI_ENDPROC
20755 -ENDPROC(csum_partial_copy_generic)
20756 +ENDPROC(csum_partial_copy_generic_to_user)
20757
20758 #undef ROUND
20759 #undef ROUND1
20760 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
20761 index f2145cf..cea889d 100644
20762 --- a/arch/x86/lib/clear_page_64.S
20763 +++ b/arch/x86/lib/clear_page_64.S
20764 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
20765 movl $4096/8,%ecx
20766 xorl %eax,%eax
20767 rep stosq
20768 + pax_force_retaddr
20769 ret
20770 CFI_ENDPROC
20771 ENDPROC(clear_page_c)
20772 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
20773 movl $4096,%ecx
20774 xorl %eax,%eax
20775 rep stosb
20776 + pax_force_retaddr
20777 ret
20778 CFI_ENDPROC
20779 ENDPROC(clear_page_c_e)
20780 @@ -43,6 +45,7 @@ ENTRY(clear_page)
20781 leaq 64(%rdi),%rdi
20782 jnz .Lloop
20783 nop
20784 + pax_force_retaddr
20785 ret
20786 CFI_ENDPROC
20787 .Lclear_page_end:
20788 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
20789
20790 #include <asm/cpufeature.h>
20791
20792 - .section .altinstr_replacement,"ax"
20793 + .section .altinstr_replacement,"a"
20794 1: .byte 0xeb /* jmp <disp8> */
20795 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
20796 2: .byte 0xeb /* jmp <disp8> */
20797 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
20798 index 1e572c5..2a162cd 100644
20799 --- a/arch/x86/lib/cmpxchg16b_emu.S
20800 +++ b/arch/x86/lib/cmpxchg16b_emu.S
20801 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
20802
20803 popf
20804 mov $1, %al
20805 + pax_force_retaddr
20806 ret
20807
20808 not_same:
20809 popf
20810 xor %al,%al
20811 + pax_force_retaddr
20812 ret
20813
20814 CFI_ENDPROC
20815 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
20816 index 01c805b..dccb07f 100644
20817 --- a/arch/x86/lib/copy_page_64.S
20818 +++ b/arch/x86/lib/copy_page_64.S
20819 @@ -9,6 +9,7 @@ copy_page_c:
20820 CFI_STARTPROC
20821 movl $4096/8,%ecx
20822 rep movsq
20823 + pax_force_retaddr
20824 ret
20825 CFI_ENDPROC
20826 ENDPROC(copy_page_c)
20827 @@ -39,7 +40,7 @@ ENTRY(copy_page)
20828 movq 16 (%rsi), %rdx
20829 movq 24 (%rsi), %r8
20830 movq 32 (%rsi), %r9
20831 - movq 40 (%rsi), %r10
20832 + movq 40 (%rsi), %r13
20833 movq 48 (%rsi), %r11
20834 movq 56 (%rsi), %r12
20835
20836 @@ -50,7 +51,7 @@ ENTRY(copy_page)
20837 movq %rdx, 16 (%rdi)
20838 movq %r8, 24 (%rdi)
20839 movq %r9, 32 (%rdi)
20840 - movq %r10, 40 (%rdi)
20841 + movq %r13, 40 (%rdi)
20842 movq %r11, 48 (%rdi)
20843 movq %r12, 56 (%rdi)
20844
20845 @@ -69,7 +70,7 @@ ENTRY(copy_page)
20846 movq 16 (%rsi), %rdx
20847 movq 24 (%rsi), %r8
20848 movq 32 (%rsi), %r9
20849 - movq 40 (%rsi), %r10
20850 + movq 40 (%rsi), %r13
20851 movq 48 (%rsi), %r11
20852 movq 56 (%rsi), %r12
20853
20854 @@ -78,7 +79,7 @@ ENTRY(copy_page)
20855 movq %rdx, 16 (%rdi)
20856 movq %r8, 24 (%rdi)
20857 movq %r9, 32 (%rdi)
20858 - movq %r10, 40 (%rdi)
20859 + movq %r13, 40 (%rdi)
20860 movq %r11, 48 (%rdi)
20861 movq %r12, 56 (%rdi)
20862
20863 @@ -95,6 +96,7 @@ ENTRY(copy_page)
20864 CFI_RESTORE r13
20865 addq $3*8,%rsp
20866 CFI_ADJUST_CFA_OFFSET -3*8
20867 + pax_force_retaddr
20868 ret
20869 .Lcopy_page_end:
20870 CFI_ENDPROC
20871 @@ -105,7 +107,7 @@ ENDPROC(copy_page)
20872
20873 #include <asm/cpufeature.h>
20874
20875 - .section .altinstr_replacement,"ax"
20876 + .section .altinstr_replacement,"a"
20877 1: .byte 0xeb /* jmp <disp8> */
20878 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
20879 2:
20880 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
20881 index 0248402..821c786 100644
20882 --- a/arch/x86/lib/copy_user_64.S
20883 +++ b/arch/x86/lib/copy_user_64.S
20884 @@ -16,6 +16,7 @@
20885 #include <asm/thread_info.h>
20886 #include <asm/cpufeature.h>
20887 #include <asm/alternative-asm.h>
20888 +#include <asm/pgtable.h>
20889
20890 /*
20891 * By placing feature2 after feature1 in altinstructions section, we logically
20892 @@ -29,7 +30,7 @@
20893 .byte 0xe9 /* 32bit jump */
20894 .long \orig-1f /* by default jump to orig */
20895 1:
20896 - .section .altinstr_replacement,"ax"
20897 + .section .altinstr_replacement,"a"
20898 2: .byte 0xe9 /* near jump with 32bit immediate */
20899 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
20900 3: .byte 0xe9 /* near jump with 32bit immediate */
20901 @@ -71,47 +72,20 @@
20902 #endif
20903 .endm
20904
20905 -/* Standard copy_to_user with segment limit checking */
20906 -ENTRY(_copy_to_user)
20907 - CFI_STARTPROC
20908 - GET_THREAD_INFO(%rax)
20909 - movq %rdi,%rcx
20910 - addq %rdx,%rcx
20911 - jc bad_to_user
20912 - cmpq TI_addr_limit(%rax),%rcx
20913 - ja bad_to_user
20914 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
20915 - copy_user_generic_unrolled,copy_user_generic_string, \
20916 - copy_user_enhanced_fast_string
20917 - CFI_ENDPROC
20918 -ENDPROC(_copy_to_user)
20919 -
20920 -/* Standard copy_from_user with segment limit checking */
20921 -ENTRY(_copy_from_user)
20922 - CFI_STARTPROC
20923 - GET_THREAD_INFO(%rax)
20924 - movq %rsi,%rcx
20925 - addq %rdx,%rcx
20926 - jc bad_from_user
20927 - cmpq TI_addr_limit(%rax),%rcx
20928 - ja bad_from_user
20929 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
20930 - copy_user_generic_unrolled,copy_user_generic_string, \
20931 - copy_user_enhanced_fast_string
20932 - CFI_ENDPROC
20933 -ENDPROC(_copy_from_user)
20934 -
20935 .section .fixup,"ax"
20936 /* must zero dest */
20937 ENTRY(bad_from_user)
20938 bad_from_user:
20939 CFI_STARTPROC
20940 + testl %edx,%edx
20941 + js bad_to_user
20942 movl %edx,%ecx
20943 xorl %eax,%eax
20944 rep
20945 stosb
20946 bad_to_user:
20947 movl %edx,%eax
20948 + pax_force_retaddr
20949 ret
20950 CFI_ENDPROC
20951 ENDPROC(bad_from_user)
20952 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
20953 jz 17f
20954 1: movq (%rsi),%r8
20955 2: movq 1*8(%rsi),%r9
20956 -3: movq 2*8(%rsi),%r10
20957 +3: movq 2*8(%rsi),%rax
20958 4: movq 3*8(%rsi),%r11
20959 5: movq %r8,(%rdi)
20960 6: movq %r9,1*8(%rdi)
20961 -7: movq %r10,2*8(%rdi)
20962 +7: movq %rax,2*8(%rdi)
20963 8: movq %r11,3*8(%rdi)
20964 9: movq 4*8(%rsi),%r8
20965 10: movq 5*8(%rsi),%r9
20966 -11: movq 6*8(%rsi),%r10
20967 +11: movq 6*8(%rsi),%rax
20968 12: movq 7*8(%rsi),%r11
20969 13: movq %r8,4*8(%rdi)
20970 14: movq %r9,5*8(%rdi)
20971 -15: movq %r10,6*8(%rdi)
20972 +15: movq %rax,6*8(%rdi)
20973 16: movq %r11,7*8(%rdi)
20974 leaq 64(%rsi),%rsi
20975 leaq 64(%rdi),%rdi
20976 @@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
20977 decl %ecx
20978 jnz 21b
20979 23: xor %eax,%eax
20980 + pax_force_retaddr
20981 ret
20982
20983 .section .fixup,"ax"
20984 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
20985 3: rep
20986 movsb
20987 4: xorl %eax,%eax
20988 + pax_force_retaddr
20989 ret
20990
20991 .section .fixup,"ax"
20992 @@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
20993 1: rep
20994 movsb
20995 2: xorl %eax,%eax
20996 + pax_force_retaddr
20997 ret
20998
20999 .section .fixup,"ax"
21000 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
21001 index cb0c112..e3a6895 100644
21002 --- a/arch/x86/lib/copy_user_nocache_64.S
21003 +++ b/arch/x86/lib/copy_user_nocache_64.S
21004 @@ -8,12 +8,14 @@
21005
21006 #include <linux/linkage.h>
21007 #include <asm/dwarf2.h>
21008 +#include <asm/alternative-asm.h>
21009
21010 #define FIX_ALIGNMENT 1
21011
21012 #include <asm/current.h>
21013 #include <asm/asm-offsets.h>
21014 #include <asm/thread_info.h>
21015 +#include <asm/pgtable.h>
21016
21017 .macro ALIGN_DESTINATION
21018 #ifdef FIX_ALIGNMENT
21019 @@ -50,6 +52,15 @@
21020 */
21021 ENTRY(__copy_user_nocache)
21022 CFI_STARTPROC
21023 +
21024 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21025 + mov $PAX_USER_SHADOW_BASE,%rcx
21026 + cmp %rcx,%rsi
21027 + jae 1f
21028 + add %rcx,%rsi
21029 +1:
21030 +#endif
21031 +
21032 cmpl $8,%edx
21033 jb 20f /* less then 8 bytes, go to byte copy loop */
21034 ALIGN_DESTINATION
21035 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
21036 jz 17f
21037 1: movq (%rsi),%r8
21038 2: movq 1*8(%rsi),%r9
21039 -3: movq 2*8(%rsi),%r10
21040 +3: movq 2*8(%rsi),%rax
21041 4: movq 3*8(%rsi),%r11
21042 5: movnti %r8,(%rdi)
21043 6: movnti %r9,1*8(%rdi)
21044 -7: movnti %r10,2*8(%rdi)
21045 +7: movnti %rax,2*8(%rdi)
21046 8: movnti %r11,3*8(%rdi)
21047 9: movq 4*8(%rsi),%r8
21048 10: movq 5*8(%rsi),%r9
21049 -11: movq 6*8(%rsi),%r10
21050 +11: movq 6*8(%rsi),%rax
21051 12: movq 7*8(%rsi),%r11
21052 13: movnti %r8,4*8(%rdi)
21053 14: movnti %r9,5*8(%rdi)
21054 -15: movnti %r10,6*8(%rdi)
21055 +15: movnti %rax,6*8(%rdi)
21056 16: movnti %r11,7*8(%rdi)
21057 leaq 64(%rsi),%rsi
21058 leaq 64(%rdi),%rdi
21059 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
21060 jnz 21b
21061 23: xorl %eax,%eax
21062 sfence
21063 + pax_force_retaddr
21064 ret
21065
21066 .section .fixup,"ax"
21067 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
21068 index fb903b7..c92b7f7 100644
21069 --- a/arch/x86/lib/csum-copy_64.S
21070 +++ b/arch/x86/lib/csum-copy_64.S
21071 @@ -8,6 +8,7 @@
21072 #include <linux/linkage.h>
21073 #include <asm/dwarf2.h>
21074 #include <asm/errno.h>
21075 +#include <asm/alternative-asm.h>
21076
21077 /*
21078 * Checksum copy with exception handling.
21079 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
21080 CFI_RESTORE rbp
21081 addq $7*8, %rsp
21082 CFI_ADJUST_CFA_OFFSET -7*8
21083 + pax_force_retaddr 0, 1
21084 ret
21085 CFI_RESTORE_STATE
21086
21087 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
21088 index 459b58a..9570bc7 100644
21089 --- a/arch/x86/lib/csum-wrappers_64.c
21090 +++ b/arch/x86/lib/csum-wrappers_64.c
21091 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
21092 len -= 2;
21093 }
21094 }
21095 - isum = csum_partial_copy_generic((__force const void *)src,
21096 +
21097 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21098 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21099 + src += PAX_USER_SHADOW_BASE;
21100 +#endif
21101 +
21102 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
21103 dst, len, isum, errp, NULL);
21104 if (unlikely(*errp))
21105 goto out_err;
21106 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
21107 }
21108
21109 *errp = 0;
21110 - return csum_partial_copy_generic(src, (void __force *)dst,
21111 +
21112 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21113 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
21114 + dst += PAX_USER_SHADOW_BASE;
21115 +#endif
21116 +
21117 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
21118 len, isum, NULL, errp);
21119 }
21120 EXPORT_SYMBOL(csum_partial_copy_to_user);
21121 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
21122 index 51f1504..ddac4c1 100644
21123 --- a/arch/x86/lib/getuser.S
21124 +++ b/arch/x86/lib/getuser.S
21125 @@ -33,15 +33,38 @@
21126 #include <asm/asm-offsets.h>
21127 #include <asm/thread_info.h>
21128 #include <asm/asm.h>
21129 +#include <asm/segment.h>
21130 +#include <asm/pgtable.h>
21131 +#include <asm/alternative-asm.h>
21132 +
21133 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21134 +#define __copyuser_seg gs;
21135 +#else
21136 +#define __copyuser_seg
21137 +#endif
21138
21139 .text
21140 ENTRY(__get_user_1)
21141 CFI_STARTPROC
21142 +
21143 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21144 GET_THREAD_INFO(%_ASM_DX)
21145 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21146 jae bad_get_user
21147 -1: movzb (%_ASM_AX),%edx
21148 +
21149 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21150 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21151 + cmp %_ASM_DX,%_ASM_AX
21152 + jae 1234f
21153 + add %_ASM_DX,%_ASM_AX
21154 +1234:
21155 +#endif
21156 +
21157 +#endif
21158 +
21159 +1: __copyuser_seg movzb (%_ASM_AX),%edx
21160 xor %eax,%eax
21161 + pax_force_retaddr
21162 ret
21163 CFI_ENDPROC
21164 ENDPROC(__get_user_1)
21165 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
21166 ENTRY(__get_user_2)
21167 CFI_STARTPROC
21168 add $1,%_ASM_AX
21169 +
21170 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21171 jc bad_get_user
21172 GET_THREAD_INFO(%_ASM_DX)
21173 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21174 jae bad_get_user
21175 -2: movzwl -1(%_ASM_AX),%edx
21176 +
21177 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21178 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21179 + cmp %_ASM_DX,%_ASM_AX
21180 + jae 1234f
21181 + add %_ASM_DX,%_ASM_AX
21182 +1234:
21183 +#endif
21184 +
21185 +#endif
21186 +
21187 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
21188 xor %eax,%eax
21189 + pax_force_retaddr
21190 ret
21191 CFI_ENDPROC
21192 ENDPROC(__get_user_2)
21193 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
21194 ENTRY(__get_user_4)
21195 CFI_STARTPROC
21196 add $3,%_ASM_AX
21197 +
21198 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21199 jc bad_get_user
21200 GET_THREAD_INFO(%_ASM_DX)
21201 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21202 jae bad_get_user
21203 -3: mov -3(%_ASM_AX),%edx
21204 +
21205 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21206 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21207 + cmp %_ASM_DX,%_ASM_AX
21208 + jae 1234f
21209 + add %_ASM_DX,%_ASM_AX
21210 +1234:
21211 +#endif
21212 +
21213 +#endif
21214 +
21215 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
21216 xor %eax,%eax
21217 + pax_force_retaddr
21218 ret
21219 CFI_ENDPROC
21220 ENDPROC(__get_user_4)
21221 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
21222 GET_THREAD_INFO(%_ASM_DX)
21223 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21224 jae bad_get_user
21225 +
21226 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21227 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21228 + cmp %_ASM_DX,%_ASM_AX
21229 + jae 1234f
21230 + add %_ASM_DX,%_ASM_AX
21231 +1234:
21232 +#endif
21233 +
21234 4: movq -7(%_ASM_AX),%_ASM_DX
21235 xor %eax,%eax
21236 + pax_force_retaddr
21237 ret
21238 CFI_ENDPROC
21239 ENDPROC(__get_user_8)
21240 @@ -91,6 +152,7 @@ bad_get_user:
21241 CFI_STARTPROC
21242 xor %edx,%edx
21243 mov $(-EFAULT),%_ASM_AX
21244 + pax_force_retaddr
21245 ret
21246 CFI_ENDPROC
21247 END(bad_get_user)
21248 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
21249 index 374562e..a75830b 100644
21250 --- a/arch/x86/lib/insn.c
21251 +++ b/arch/x86/lib/insn.c
21252 @@ -21,6 +21,11 @@
21253 #include <linux/string.h>
21254 #include <asm/inat.h>
21255 #include <asm/insn.h>
21256 +#ifdef __KERNEL__
21257 +#include <asm/pgtable_types.h>
21258 +#else
21259 +#define ktla_ktva(addr) addr
21260 +#endif
21261
21262 /* Verify next sizeof(t) bytes can be on the same instruction */
21263 #define validate_next(t, insn, n) \
21264 @@ -49,8 +54,8 @@
21265 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
21266 {
21267 memset(insn, 0, sizeof(*insn));
21268 - insn->kaddr = kaddr;
21269 - insn->next_byte = kaddr;
21270 + insn->kaddr = ktla_ktva(kaddr);
21271 + insn->next_byte = ktla_ktva(kaddr);
21272 insn->x86_64 = x86_64 ? 1 : 0;
21273 insn->opnd_bytes = 4;
21274 if (x86_64)
21275 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
21276 index 05a95e7..326f2fa 100644
21277 --- a/arch/x86/lib/iomap_copy_64.S
21278 +++ b/arch/x86/lib/iomap_copy_64.S
21279 @@ -17,6 +17,7 @@
21280
21281 #include <linux/linkage.h>
21282 #include <asm/dwarf2.h>
21283 +#include <asm/alternative-asm.h>
21284
21285 /*
21286 * override generic version in lib/iomap_copy.c
21287 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
21288 CFI_STARTPROC
21289 movl %edx,%ecx
21290 rep movsd
21291 + pax_force_retaddr
21292 ret
21293 CFI_ENDPROC
21294 ENDPROC(__iowrite32_copy)
21295 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
21296 index efbf2a0..8893637 100644
21297 --- a/arch/x86/lib/memcpy_64.S
21298 +++ b/arch/x86/lib/memcpy_64.S
21299 @@ -34,6 +34,7 @@
21300 rep movsq
21301 movl %edx, %ecx
21302 rep movsb
21303 + pax_force_retaddr
21304 ret
21305 .Lmemcpy_e:
21306 .previous
21307 @@ -51,6 +52,7 @@
21308
21309 movl %edx, %ecx
21310 rep movsb
21311 + pax_force_retaddr
21312 ret
21313 .Lmemcpy_e_e:
21314 .previous
21315 @@ -81,13 +83,13 @@ ENTRY(memcpy)
21316 */
21317 movq 0*8(%rsi), %r8
21318 movq 1*8(%rsi), %r9
21319 - movq 2*8(%rsi), %r10
21320 + movq 2*8(%rsi), %rcx
21321 movq 3*8(%rsi), %r11
21322 leaq 4*8(%rsi), %rsi
21323
21324 movq %r8, 0*8(%rdi)
21325 movq %r9, 1*8(%rdi)
21326 - movq %r10, 2*8(%rdi)
21327 + movq %rcx, 2*8(%rdi)
21328 movq %r11, 3*8(%rdi)
21329 leaq 4*8(%rdi), %rdi
21330 jae .Lcopy_forward_loop
21331 @@ -110,12 +112,12 @@ ENTRY(memcpy)
21332 subq $0x20, %rdx
21333 movq -1*8(%rsi), %r8
21334 movq -2*8(%rsi), %r9
21335 - movq -3*8(%rsi), %r10
21336 + movq -3*8(%rsi), %rcx
21337 movq -4*8(%rsi), %r11
21338 leaq -4*8(%rsi), %rsi
21339 movq %r8, -1*8(%rdi)
21340 movq %r9, -2*8(%rdi)
21341 - movq %r10, -3*8(%rdi)
21342 + movq %rcx, -3*8(%rdi)
21343 movq %r11, -4*8(%rdi)
21344 leaq -4*8(%rdi), %rdi
21345 jae .Lcopy_backward_loop
21346 @@ -135,12 +137,13 @@ ENTRY(memcpy)
21347 */
21348 movq 0*8(%rsi), %r8
21349 movq 1*8(%rsi), %r9
21350 - movq -2*8(%rsi, %rdx), %r10
21351 + movq -2*8(%rsi, %rdx), %rcx
21352 movq -1*8(%rsi, %rdx), %r11
21353 movq %r8, 0*8(%rdi)
21354 movq %r9, 1*8(%rdi)
21355 - movq %r10, -2*8(%rdi, %rdx)
21356 + movq %rcx, -2*8(%rdi, %rdx)
21357 movq %r11, -1*8(%rdi, %rdx)
21358 + pax_force_retaddr
21359 retq
21360 .p2align 4
21361 .Lless_16bytes:
21362 @@ -153,6 +156,7 @@ ENTRY(memcpy)
21363 movq -1*8(%rsi, %rdx), %r9
21364 movq %r8, 0*8(%rdi)
21365 movq %r9, -1*8(%rdi, %rdx)
21366 + pax_force_retaddr
21367 retq
21368 .p2align 4
21369 .Lless_8bytes:
21370 @@ -166,6 +170,7 @@ ENTRY(memcpy)
21371 movl -4(%rsi, %rdx), %r8d
21372 movl %ecx, (%rdi)
21373 movl %r8d, -4(%rdi, %rdx)
21374 + pax_force_retaddr
21375 retq
21376 .p2align 4
21377 .Lless_3bytes:
21378 @@ -183,6 +188,7 @@ ENTRY(memcpy)
21379 jnz .Lloop_1
21380
21381 .Lend:
21382 + pax_force_retaddr
21383 retq
21384 CFI_ENDPROC
21385 ENDPROC(memcpy)
21386 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
21387 index ee16461..c39c199 100644
21388 --- a/arch/x86/lib/memmove_64.S
21389 +++ b/arch/x86/lib/memmove_64.S
21390 @@ -61,13 +61,13 @@ ENTRY(memmove)
21391 5:
21392 sub $0x20, %rdx
21393 movq 0*8(%rsi), %r11
21394 - movq 1*8(%rsi), %r10
21395 + movq 1*8(%rsi), %rcx
21396 movq 2*8(%rsi), %r9
21397 movq 3*8(%rsi), %r8
21398 leaq 4*8(%rsi), %rsi
21399
21400 movq %r11, 0*8(%rdi)
21401 - movq %r10, 1*8(%rdi)
21402 + movq %rcx, 1*8(%rdi)
21403 movq %r9, 2*8(%rdi)
21404 movq %r8, 3*8(%rdi)
21405 leaq 4*8(%rdi), %rdi
21406 @@ -81,10 +81,10 @@ ENTRY(memmove)
21407 4:
21408 movq %rdx, %rcx
21409 movq -8(%rsi, %rdx), %r11
21410 - lea -8(%rdi, %rdx), %r10
21411 + lea -8(%rdi, %rdx), %r9
21412 shrq $3, %rcx
21413 rep movsq
21414 - movq %r11, (%r10)
21415 + movq %r11, (%r9)
21416 jmp 13f
21417 .Lmemmove_end_forward:
21418
21419 @@ -95,14 +95,14 @@ ENTRY(memmove)
21420 7:
21421 movq %rdx, %rcx
21422 movq (%rsi), %r11
21423 - movq %rdi, %r10
21424 + movq %rdi, %r9
21425 leaq -8(%rsi, %rdx), %rsi
21426 leaq -8(%rdi, %rdx), %rdi
21427 shrq $3, %rcx
21428 std
21429 rep movsq
21430 cld
21431 - movq %r11, (%r10)
21432 + movq %r11, (%r9)
21433 jmp 13f
21434
21435 /*
21436 @@ -127,13 +127,13 @@ ENTRY(memmove)
21437 8:
21438 subq $0x20, %rdx
21439 movq -1*8(%rsi), %r11
21440 - movq -2*8(%rsi), %r10
21441 + movq -2*8(%rsi), %rcx
21442 movq -3*8(%rsi), %r9
21443 movq -4*8(%rsi), %r8
21444 leaq -4*8(%rsi), %rsi
21445
21446 movq %r11, -1*8(%rdi)
21447 - movq %r10, -2*8(%rdi)
21448 + movq %rcx, -2*8(%rdi)
21449 movq %r9, -3*8(%rdi)
21450 movq %r8, -4*8(%rdi)
21451 leaq -4*8(%rdi), %rdi
21452 @@ -151,11 +151,11 @@ ENTRY(memmove)
21453 * Move data from 16 bytes to 31 bytes.
21454 */
21455 movq 0*8(%rsi), %r11
21456 - movq 1*8(%rsi), %r10
21457 + movq 1*8(%rsi), %rcx
21458 movq -2*8(%rsi, %rdx), %r9
21459 movq -1*8(%rsi, %rdx), %r8
21460 movq %r11, 0*8(%rdi)
21461 - movq %r10, 1*8(%rdi)
21462 + movq %rcx, 1*8(%rdi)
21463 movq %r9, -2*8(%rdi, %rdx)
21464 movq %r8, -1*8(%rdi, %rdx)
21465 jmp 13f
21466 @@ -167,9 +167,9 @@ ENTRY(memmove)
21467 * Move data from 8 bytes to 15 bytes.
21468 */
21469 movq 0*8(%rsi), %r11
21470 - movq -1*8(%rsi, %rdx), %r10
21471 + movq -1*8(%rsi, %rdx), %r9
21472 movq %r11, 0*8(%rdi)
21473 - movq %r10, -1*8(%rdi, %rdx)
21474 + movq %r9, -1*8(%rdi, %rdx)
21475 jmp 13f
21476 10:
21477 cmpq $4, %rdx
21478 @@ -178,9 +178,9 @@ ENTRY(memmove)
21479 * Move data from 4 bytes to 7 bytes.
21480 */
21481 movl (%rsi), %r11d
21482 - movl -4(%rsi, %rdx), %r10d
21483 + movl -4(%rsi, %rdx), %r9d
21484 movl %r11d, (%rdi)
21485 - movl %r10d, -4(%rdi, %rdx)
21486 + movl %r9d, -4(%rdi, %rdx)
21487 jmp 13f
21488 11:
21489 cmp $2, %rdx
21490 @@ -189,9 +189,9 @@ ENTRY(memmove)
21491 * Move data from 2 bytes to 3 bytes.
21492 */
21493 movw (%rsi), %r11w
21494 - movw -2(%rsi, %rdx), %r10w
21495 + movw -2(%rsi, %rdx), %r9w
21496 movw %r11w, (%rdi)
21497 - movw %r10w, -2(%rdi, %rdx)
21498 + movw %r9w, -2(%rdi, %rdx)
21499 jmp 13f
21500 12:
21501 cmp $1, %rdx
21502 @@ -202,6 +202,7 @@ ENTRY(memmove)
21503 movb (%rsi), %r11b
21504 movb %r11b, (%rdi)
21505 13:
21506 + pax_force_retaddr
21507 retq
21508 CFI_ENDPROC
21509
21510 @@ -210,6 +211,7 @@ ENTRY(memmove)
21511 /* Forward moving data. */
21512 movq %rdx, %rcx
21513 rep movsb
21514 + pax_force_retaddr
21515 retq
21516 .Lmemmove_end_forward_efs:
21517 .previous
21518 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
21519 index 79bd454..dff325a 100644
21520 --- a/arch/x86/lib/memset_64.S
21521 +++ b/arch/x86/lib/memset_64.S
21522 @@ -31,6 +31,7 @@
21523 movl %r8d,%ecx
21524 rep stosb
21525 movq %r9,%rax
21526 + pax_force_retaddr
21527 ret
21528 .Lmemset_e:
21529 .previous
21530 @@ -53,6 +54,7 @@
21531 movl %edx,%ecx
21532 rep stosb
21533 movq %r9,%rax
21534 + pax_force_retaddr
21535 ret
21536 .Lmemset_e_e:
21537 .previous
21538 @@ -60,13 +62,13 @@
21539 ENTRY(memset)
21540 ENTRY(__memset)
21541 CFI_STARTPROC
21542 - movq %rdi,%r10
21543 movq %rdx,%r11
21544
21545 /* expand byte value */
21546 movzbl %sil,%ecx
21547 movabs $0x0101010101010101,%rax
21548 mul %rcx /* with rax, clobbers rdx */
21549 + movq %rdi,%rdx
21550
21551 /* align dst */
21552 movl %edi,%r9d
21553 @@ -120,7 +122,8 @@ ENTRY(__memset)
21554 jnz .Lloop_1
21555
21556 .Lende:
21557 - movq %r10,%rax
21558 + movq %rdx,%rax
21559 + pax_force_retaddr
21560 ret
21561
21562 CFI_RESTORE_STATE
21563 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
21564 index c9f2d9b..e7fd2c0 100644
21565 --- a/arch/x86/lib/mmx_32.c
21566 +++ b/arch/x86/lib/mmx_32.c
21567 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
21568 {
21569 void *p;
21570 int i;
21571 + unsigned long cr0;
21572
21573 if (unlikely(in_interrupt()))
21574 return __memcpy(to, from, len);
21575 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
21576 kernel_fpu_begin();
21577
21578 __asm__ __volatile__ (
21579 - "1: prefetch (%0)\n" /* This set is 28 bytes */
21580 - " prefetch 64(%0)\n"
21581 - " prefetch 128(%0)\n"
21582 - " prefetch 192(%0)\n"
21583 - " prefetch 256(%0)\n"
21584 + "1: prefetch (%1)\n" /* This set is 28 bytes */
21585 + " prefetch 64(%1)\n"
21586 + " prefetch 128(%1)\n"
21587 + " prefetch 192(%1)\n"
21588 + " prefetch 256(%1)\n"
21589 "2: \n"
21590 ".section .fixup, \"ax\"\n"
21591 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21592 + "3: \n"
21593 +
21594 +#ifdef CONFIG_PAX_KERNEXEC
21595 + " movl %%cr0, %0\n"
21596 + " movl %0, %%eax\n"
21597 + " andl $0xFFFEFFFF, %%eax\n"
21598 + " movl %%eax, %%cr0\n"
21599 +#endif
21600 +
21601 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21602 +
21603 +#ifdef CONFIG_PAX_KERNEXEC
21604 + " movl %0, %%cr0\n"
21605 +#endif
21606 +
21607 " jmp 2b\n"
21608 ".previous\n"
21609 _ASM_EXTABLE(1b, 3b)
21610 - : : "r" (from));
21611 + : "=&r" (cr0) : "r" (from) : "ax");
21612
21613 for ( ; i > 5; i--) {
21614 __asm__ __volatile__ (
21615 - "1: prefetch 320(%0)\n"
21616 - "2: movq (%0), %%mm0\n"
21617 - " movq 8(%0), %%mm1\n"
21618 - " movq 16(%0), %%mm2\n"
21619 - " movq 24(%0), %%mm3\n"
21620 - " movq %%mm0, (%1)\n"
21621 - " movq %%mm1, 8(%1)\n"
21622 - " movq %%mm2, 16(%1)\n"
21623 - " movq %%mm3, 24(%1)\n"
21624 - " movq 32(%0), %%mm0\n"
21625 - " movq 40(%0), %%mm1\n"
21626 - " movq 48(%0), %%mm2\n"
21627 - " movq 56(%0), %%mm3\n"
21628 - " movq %%mm0, 32(%1)\n"
21629 - " movq %%mm1, 40(%1)\n"
21630 - " movq %%mm2, 48(%1)\n"
21631 - " movq %%mm3, 56(%1)\n"
21632 + "1: prefetch 320(%1)\n"
21633 + "2: movq (%1), %%mm0\n"
21634 + " movq 8(%1), %%mm1\n"
21635 + " movq 16(%1), %%mm2\n"
21636 + " movq 24(%1), %%mm3\n"
21637 + " movq %%mm0, (%2)\n"
21638 + " movq %%mm1, 8(%2)\n"
21639 + " movq %%mm2, 16(%2)\n"
21640 + " movq %%mm3, 24(%2)\n"
21641 + " movq 32(%1), %%mm0\n"
21642 + " movq 40(%1), %%mm1\n"
21643 + " movq 48(%1), %%mm2\n"
21644 + " movq 56(%1), %%mm3\n"
21645 + " movq %%mm0, 32(%2)\n"
21646 + " movq %%mm1, 40(%2)\n"
21647 + " movq %%mm2, 48(%2)\n"
21648 + " movq %%mm3, 56(%2)\n"
21649 ".section .fixup, \"ax\"\n"
21650 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21651 + "3:\n"
21652 +
21653 +#ifdef CONFIG_PAX_KERNEXEC
21654 + " movl %%cr0, %0\n"
21655 + " movl %0, %%eax\n"
21656 + " andl $0xFFFEFFFF, %%eax\n"
21657 + " movl %%eax, %%cr0\n"
21658 +#endif
21659 +
21660 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21661 +
21662 +#ifdef CONFIG_PAX_KERNEXEC
21663 + " movl %0, %%cr0\n"
21664 +#endif
21665 +
21666 " jmp 2b\n"
21667 ".previous\n"
21668 _ASM_EXTABLE(1b, 3b)
21669 - : : "r" (from), "r" (to) : "memory");
21670 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
21671
21672 from += 64;
21673 to += 64;
21674 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
21675 static void fast_copy_page(void *to, void *from)
21676 {
21677 int i;
21678 + unsigned long cr0;
21679
21680 kernel_fpu_begin();
21681
21682 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
21683 * but that is for later. -AV
21684 */
21685 __asm__ __volatile__(
21686 - "1: prefetch (%0)\n"
21687 - " prefetch 64(%0)\n"
21688 - " prefetch 128(%0)\n"
21689 - " prefetch 192(%0)\n"
21690 - " prefetch 256(%0)\n"
21691 + "1: prefetch (%1)\n"
21692 + " prefetch 64(%1)\n"
21693 + " prefetch 128(%1)\n"
21694 + " prefetch 192(%1)\n"
21695 + " prefetch 256(%1)\n"
21696 "2: \n"
21697 ".section .fixup, \"ax\"\n"
21698 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21699 + "3: \n"
21700 +
21701 +#ifdef CONFIG_PAX_KERNEXEC
21702 + " movl %%cr0, %0\n"
21703 + " movl %0, %%eax\n"
21704 + " andl $0xFFFEFFFF, %%eax\n"
21705 + " movl %%eax, %%cr0\n"
21706 +#endif
21707 +
21708 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21709 +
21710 +#ifdef CONFIG_PAX_KERNEXEC
21711 + " movl %0, %%cr0\n"
21712 +#endif
21713 +
21714 " jmp 2b\n"
21715 ".previous\n"
21716 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
21717 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
21718
21719 for (i = 0; i < (4096-320)/64; i++) {
21720 __asm__ __volatile__ (
21721 - "1: prefetch 320(%0)\n"
21722 - "2: movq (%0), %%mm0\n"
21723 - " movntq %%mm0, (%1)\n"
21724 - " movq 8(%0), %%mm1\n"
21725 - " movntq %%mm1, 8(%1)\n"
21726 - " movq 16(%0), %%mm2\n"
21727 - " movntq %%mm2, 16(%1)\n"
21728 - " movq 24(%0), %%mm3\n"
21729 - " movntq %%mm3, 24(%1)\n"
21730 - " movq 32(%0), %%mm4\n"
21731 - " movntq %%mm4, 32(%1)\n"
21732 - " movq 40(%0), %%mm5\n"
21733 - " movntq %%mm5, 40(%1)\n"
21734 - " movq 48(%0), %%mm6\n"
21735 - " movntq %%mm6, 48(%1)\n"
21736 - " movq 56(%0), %%mm7\n"
21737 - " movntq %%mm7, 56(%1)\n"
21738 + "1: prefetch 320(%1)\n"
21739 + "2: movq (%1), %%mm0\n"
21740 + " movntq %%mm0, (%2)\n"
21741 + " movq 8(%1), %%mm1\n"
21742 + " movntq %%mm1, 8(%2)\n"
21743 + " movq 16(%1), %%mm2\n"
21744 + " movntq %%mm2, 16(%2)\n"
21745 + " movq 24(%1), %%mm3\n"
21746 + " movntq %%mm3, 24(%2)\n"
21747 + " movq 32(%1), %%mm4\n"
21748 + " movntq %%mm4, 32(%2)\n"
21749 + " movq 40(%1), %%mm5\n"
21750 + " movntq %%mm5, 40(%2)\n"
21751 + " movq 48(%1), %%mm6\n"
21752 + " movntq %%mm6, 48(%2)\n"
21753 + " movq 56(%1), %%mm7\n"
21754 + " movntq %%mm7, 56(%2)\n"
21755 ".section .fixup, \"ax\"\n"
21756 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21757 + "3:\n"
21758 +
21759 +#ifdef CONFIG_PAX_KERNEXEC
21760 + " movl %%cr0, %0\n"
21761 + " movl %0, %%eax\n"
21762 + " andl $0xFFFEFFFF, %%eax\n"
21763 + " movl %%eax, %%cr0\n"
21764 +#endif
21765 +
21766 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21767 +
21768 +#ifdef CONFIG_PAX_KERNEXEC
21769 + " movl %0, %%cr0\n"
21770 +#endif
21771 +
21772 " jmp 2b\n"
21773 ".previous\n"
21774 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
21775 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
21776
21777 from += 64;
21778 to += 64;
21779 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
21780 static void fast_copy_page(void *to, void *from)
21781 {
21782 int i;
21783 + unsigned long cr0;
21784
21785 kernel_fpu_begin();
21786
21787 __asm__ __volatile__ (
21788 - "1: prefetch (%0)\n"
21789 - " prefetch 64(%0)\n"
21790 - " prefetch 128(%0)\n"
21791 - " prefetch 192(%0)\n"
21792 - " prefetch 256(%0)\n"
21793 + "1: prefetch (%1)\n"
21794 + " prefetch 64(%1)\n"
21795 + " prefetch 128(%1)\n"
21796 + " prefetch 192(%1)\n"
21797 + " prefetch 256(%1)\n"
21798 "2: \n"
21799 ".section .fixup, \"ax\"\n"
21800 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21801 + "3: \n"
21802 +
21803 +#ifdef CONFIG_PAX_KERNEXEC
21804 + " movl %%cr0, %0\n"
21805 + " movl %0, %%eax\n"
21806 + " andl $0xFFFEFFFF, %%eax\n"
21807 + " movl %%eax, %%cr0\n"
21808 +#endif
21809 +
21810 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21811 +
21812 +#ifdef CONFIG_PAX_KERNEXEC
21813 + " movl %0, %%cr0\n"
21814 +#endif
21815 +
21816 " jmp 2b\n"
21817 ".previous\n"
21818 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
21819 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
21820
21821 for (i = 0; i < 4096/64; i++) {
21822 __asm__ __volatile__ (
21823 - "1: prefetch 320(%0)\n"
21824 - "2: movq (%0), %%mm0\n"
21825 - " movq 8(%0), %%mm1\n"
21826 - " movq 16(%0), %%mm2\n"
21827 - " movq 24(%0), %%mm3\n"
21828 - " movq %%mm0, (%1)\n"
21829 - " movq %%mm1, 8(%1)\n"
21830 - " movq %%mm2, 16(%1)\n"
21831 - " movq %%mm3, 24(%1)\n"
21832 - " movq 32(%0), %%mm0\n"
21833 - " movq 40(%0), %%mm1\n"
21834 - " movq 48(%0), %%mm2\n"
21835 - " movq 56(%0), %%mm3\n"
21836 - " movq %%mm0, 32(%1)\n"
21837 - " movq %%mm1, 40(%1)\n"
21838 - " movq %%mm2, 48(%1)\n"
21839 - " movq %%mm3, 56(%1)\n"
21840 + "1: prefetch 320(%1)\n"
21841 + "2: movq (%1), %%mm0\n"
21842 + " movq 8(%1), %%mm1\n"
21843 + " movq 16(%1), %%mm2\n"
21844 + " movq 24(%1), %%mm3\n"
21845 + " movq %%mm0, (%2)\n"
21846 + " movq %%mm1, 8(%2)\n"
21847 + " movq %%mm2, 16(%2)\n"
21848 + " movq %%mm3, 24(%2)\n"
21849 + " movq 32(%1), %%mm0\n"
21850 + " movq 40(%1), %%mm1\n"
21851 + " movq 48(%1), %%mm2\n"
21852 + " movq 56(%1), %%mm3\n"
21853 + " movq %%mm0, 32(%2)\n"
21854 + " movq %%mm1, 40(%2)\n"
21855 + " movq %%mm2, 48(%2)\n"
21856 + " movq %%mm3, 56(%2)\n"
21857 ".section .fixup, \"ax\"\n"
21858 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21859 + "3:\n"
21860 +
21861 +#ifdef CONFIG_PAX_KERNEXEC
21862 + " movl %%cr0, %0\n"
21863 + " movl %0, %%eax\n"
21864 + " andl $0xFFFEFFFF, %%eax\n"
21865 + " movl %%eax, %%cr0\n"
21866 +#endif
21867 +
21868 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21869 +
21870 +#ifdef CONFIG_PAX_KERNEXEC
21871 + " movl %0, %%cr0\n"
21872 +#endif
21873 +
21874 " jmp 2b\n"
21875 ".previous\n"
21876 _ASM_EXTABLE(1b, 3b)
21877 - : : "r" (from), "r" (to) : "memory");
21878 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
21879
21880 from += 64;
21881 to += 64;
21882 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
21883 index 69fa106..adda88b 100644
21884 --- a/arch/x86/lib/msr-reg.S
21885 +++ b/arch/x86/lib/msr-reg.S
21886 @@ -3,6 +3,7 @@
21887 #include <asm/dwarf2.h>
21888 #include <asm/asm.h>
21889 #include <asm/msr.h>
21890 +#include <asm/alternative-asm.h>
21891
21892 #ifdef CONFIG_X86_64
21893 /*
21894 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
21895 CFI_STARTPROC
21896 pushq_cfi %rbx
21897 pushq_cfi %rbp
21898 - movq %rdi, %r10 /* Save pointer */
21899 + movq %rdi, %r9 /* Save pointer */
21900 xorl %r11d, %r11d /* Return value */
21901 movl (%rdi), %eax
21902 movl 4(%rdi), %ecx
21903 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
21904 movl 28(%rdi), %edi
21905 CFI_REMEMBER_STATE
21906 1: \op
21907 -2: movl %eax, (%r10)
21908 +2: movl %eax, (%r9)
21909 movl %r11d, %eax /* Return value */
21910 - movl %ecx, 4(%r10)
21911 - movl %edx, 8(%r10)
21912 - movl %ebx, 12(%r10)
21913 - movl %ebp, 20(%r10)
21914 - movl %esi, 24(%r10)
21915 - movl %edi, 28(%r10)
21916 + movl %ecx, 4(%r9)
21917 + movl %edx, 8(%r9)
21918 + movl %ebx, 12(%r9)
21919 + movl %ebp, 20(%r9)
21920 + movl %esi, 24(%r9)
21921 + movl %edi, 28(%r9)
21922 popq_cfi %rbp
21923 popq_cfi %rbx
21924 + pax_force_retaddr
21925 ret
21926 3:
21927 CFI_RESTORE_STATE
21928 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
21929 index 36b0d15..d381858 100644
21930 --- a/arch/x86/lib/putuser.S
21931 +++ b/arch/x86/lib/putuser.S
21932 @@ -15,7 +15,9 @@
21933 #include <asm/thread_info.h>
21934 #include <asm/errno.h>
21935 #include <asm/asm.h>
21936 -
21937 +#include <asm/segment.h>
21938 +#include <asm/pgtable.h>
21939 +#include <asm/alternative-asm.h>
21940
21941 /*
21942 * __put_user_X
21943 @@ -29,52 +31,119 @@
21944 * as they get called from within inline assembly.
21945 */
21946
21947 -#define ENTER CFI_STARTPROC ; \
21948 - GET_THREAD_INFO(%_ASM_BX)
21949 -#define EXIT ret ; \
21950 +#define ENTER CFI_STARTPROC
21951 +#define EXIT pax_force_retaddr; ret ; \
21952 CFI_ENDPROC
21953
21954 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21955 +#define _DEST %_ASM_CX,%_ASM_BX
21956 +#else
21957 +#define _DEST %_ASM_CX
21958 +#endif
21959 +
21960 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21961 +#define __copyuser_seg gs;
21962 +#else
21963 +#define __copyuser_seg
21964 +#endif
21965 +
21966 .text
21967 ENTRY(__put_user_1)
21968 ENTER
21969 +
21970 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21971 + GET_THREAD_INFO(%_ASM_BX)
21972 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
21973 jae bad_put_user
21974 -1: movb %al,(%_ASM_CX)
21975 +
21976 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21977 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
21978 + cmp %_ASM_BX,%_ASM_CX
21979 + jb 1234f
21980 + xor %ebx,%ebx
21981 +1234:
21982 +#endif
21983 +
21984 +#endif
21985 +
21986 +1: __copyuser_seg movb %al,(_DEST)
21987 xor %eax,%eax
21988 EXIT
21989 ENDPROC(__put_user_1)
21990
21991 ENTRY(__put_user_2)
21992 ENTER
21993 +
21994 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21995 + GET_THREAD_INFO(%_ASM_BX)
21996 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
21997 sub $1,%_ASM_BX
21998 cmp %_ASM_BX,%_ASM_CX
21999 jae bad_put_user
22000 -2: movw %ax,(%_ASM_CX)
22001 +
22002 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22003 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22004 + cmp %_ASM_BX,%_ASM_CX
22005 + jb 1234f
22006 + xor %ebx,%ebx
22007 +1234:
22008 +#endif
22009 +
22010 +#endif
22011 +
22012 +2: __copyuser_seg movw %ax,(_DEST)
22013 xor %eax,%eax
22014 EXIT
22015 ENDPROC(__put_user_2)
22016
22017 ENTRY(__put_user_4)
22018 ENTER
22019 +
22020 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22021 + GET_THREAD_INFO(%_ASM_BX)
22022 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22023 sub $3,%_ASM_BX
22024 cmp %_ASM_BX,%_ASM_CX
22025 jae bad_put_user
22026 -3: movl %eax,(%_ASM_CX)
22027 +
22028 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22029 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22030 + cmp %_ASM_BX,%_ASM_CX
22031 + jb 1234f
22032 + xor %ebx,%ebx
22033 +1234:
22034 +#endif
22035 +
22036 +#endif
22037 +
22038 +3: __copyuser_seg movl %eax,(_DEST)
22039 xor %eax,%eax
22040 EXIT
22041 ENDPROC(__put_user_4)
22042
22043 ENTRY(__put_user_8)
22044 ENTER
22045 +
22046 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22047 + GET_THREAD_INFO(%_ASM_BX)
22048 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22049 sub $7,%_ASM_BX
22050 cmp %_ASM_BX,%_ASM_CX
22051 jae bad_put_user
22052 -4: mov %_ASM_AX,(%_ASM_CX)
22053 +
22054 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22055 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22056 + cmp %_ASM_BX,%_ASM_CX
22057 + jb 1234f
22058 + xor %ebx,%ebx
22059 +1234:
22060 +#endif
22061 +
22062 +#endif
22063 +
22064 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
22065 #ifdef CONFIG_X86_32
22066 -5: movl %edx,4(%_ASM_CX)
22067 +5: __copyuser_seg movl %edx,4(_DEST)
22068 #endif
22069 xor %eax,%eax
22070 EXIT
22071 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
22072 index 1cad221..de671ee 100644
22073 --- a/arch/x86/lib/rwlock.S
22074 +++ b/arch/x86/lib/rwlock.S
22075 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
22076 FRAME
22077 0: LOCK_PREFIX
22078 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22079 +
22080 +#ifdef CONFIG_PAX_REFCOUNT
22081 + jno 1234f
22082 + LOCK_PREFIX
22083 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22084 + int $4
22085 +1234:
22086 + _ASM_EXTABLE(1234b, 1234b)
22087 +#endif
22088 +
22089 1: rep; nop
22090 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
22091 jne 1b
22092 LOCK_PREFIX
22093 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22094 +
22095 +#ifdef CONFIG_PAX_REFCOUNT
22096 + jno 1234f
22097 + LOCK_PREFIX
22098 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22099 + int $4
22100 +1234:
22101 + _ASM_EXTABLE(1234b, 1234b)
22102 +#endif
22103 +
22104 jnz 0b
22105 ENDFRAME
22106 + pax_force_retaddr
22107 ret
22108 CFI_ENDPROC
22109 END(__write_lock_failed)
22110 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
22111 FRAME
22112 0: LOCK_PREFIX
22113 READ_LOCK_SIZE(inc) (%__lock_ptr)
22114 +
22115 +#ifdef CONFIG_PAX_REFCOUNT
22116 + jno 1234f
22117 + LOCK_PREFIX
22118 + READ_LOCK_SIZE(dec) (%__lock_ptr)
22119 + int $4
22120 +1234:
22121 + _ASM_EXTABLE(1234b, 1234b)
22122 +#endif
22123 +
22124 1: rep; nop
22125 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
22126 js 1b
22127 LOCK_PREFIX
22128 READ_LOCK_SIZE(dec) (%__lock_ptr)
22129 +
22130 +#ifdef CONFIG_PAX_REFCOUNT
22131 + jno 1234f
22132 + LOCK_PREFIX
22133 + READ_LOCK_SIZE(inc) (%__lock_ptr)
22134 + int $4
22135 +1234:
22136 + _ASM_EXTABLE(1234b, 1234b)
22137 +#endif
22138 +
22139 js 0b
22140 ENDFRAME
22141 + pax_force_retaddr
22142 ret
22143 CFI_ENDPROC
22144 END(__read_lock_failed)
22145 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
22146 index 5dff5f0..cadebf4 100644
22147 --- a/arch/x86/lib/rwsem.S
22148 +++ b/arch/x86/lib/rwsem.S
22149 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
22150 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22151 CFI_RESTORE __ASM_REG(dx)
22152 restore_common_regs
22153 + pax_force_retaddr
22154 ret
22155 CFI_ENDPROC
22156 ENDPROC(call_rwsem_down_read_failed)
22157 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
22158 movq %rax,%rdi
22159 call rwsem_down_write_failed
22160 restore_common_regs
22161 + pax_force_retaddr
22162 ret
22163 CFI_ENDPROC
22164 ENDPROC(call_rwsem_down_write_failed)
22165 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
22166 movq %rax,%rdi
22167 call rwsem_wake
22168 restore_common_regs
22169 -1: ret
22170 +1: pax_force_retaddr
22171 + ret
22172 CFI_ENDPROC
22173 ENDPROC(call_rwsem_wake)
22174
22175 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
22176 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22177 CFI_RESTORE __ASM_REG(dx)
22178 restore_common_regs
22179 + pax_force_retaddr
22180 ret
22181 CFI_ENDPROC
22182 ENDPROC(call_rwsem_downgrade_wake)
22183 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
22184 index a63efd6..ccecad8 100644
22185 --- a/arch/x86/lib/thunk_64.S
22186 +++ b/arch/x86/lib/thunk_64.S
22187 @@ -8,6 +8,7 @@
22188 #include <linux/linkage.h>
22189 #include <asm/dwarf2.h>
22190 #include <asm/calling.h>
22191 +#include <asm/alternative-asm.h>
22192
22193 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
22194 .macro THUNK name, func, put_ret_addr_in_rdi=0
22195 @@ -41,5 +42,6 @@
22196 SAVE_ARGS
22197 restore:
22198 RESTORE_ARGS
22199 + pax_force_retaddr
22200 ret
22201 CFI_ENDPROC
22202 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
22203 index e218d5d..35679b4 100644
22204 --- a/arch/x86/lib/usercopy_32.c
22205 +++ b/arch/x86/lib/usercopy_32.c
22206 @@ -43,7 +43,7 @@ do { \
22207 __asm__ __volatile__( \
22208 " testl %1,%1\n" \
22209 " jz 2f\n" \
22210 - "0: lodsb\n" \
22211 + "0: "__copyuser_seg"lodsb\n" \
22212 " stosb\n" \
22213 " testb %%al,%%al\n" \
22214 " jz 1f\n" \
22215 @@ -128,10 +128,12 @@ do { \
22216 int __d0; \
22217 might_fault(); \
22218 __asm__ __volatile__( \
22219 + __COPYUSER_SET_ES \
22220 "0: rep; stosl\n" \
22221 " movl %2,%0\n" \
22222 "1: rep; stosb\n" \
22223 "2:\n" \
22224 + __COPYUSER_RESTORE_ES \
22225 ".section .fixup,\"ax\"\n" \
22226 "3: lea 0(%2,%0,4),%0\n" \
22227 " jmp 2b\n" \
22228 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
22229 might_fault();
22230
22231 __asm__ __volatile__(
22232 + __COPYUSER_SET_ES
22233 " testl %0, %0\n"
22234 " jz 3f\n"
22235 " andl %0,%%ecx\n"
22236 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
22237 " subl %%ecx,%0\n"
22238 " addl %0,%%eax\n"
22239 "1:\n"
22240 + __COPYUSER_RESTORE_ES
22241 ".section .fixup,\"ax\"\n"
22242 "2: xorl %%eax,%%eax\n"
22243 " jmp 1b\n"
22244 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
22245
22246 #ifdef CONFIG_X86_INTEL_USERCOPY
22247 static unsigned long
22248 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
22249 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
22250 {
22251 int d0, d1;
22252 __asm__ __volatile__(
22253 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22254 " .align 2,0x90\n"
22255 "3: movl 0(%4), %%eax\n"
22256 "4: movl 4(%4), %%edx\n"
22257 - "5: movl %%eax, 0(%3)\n"
22258 - "6: movl %%edx, 4(%3)\n"
22259 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
22260 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
22261 "7: movl 8(%4), %%eax\n"
22262 "8: movl 12(%4),%%edx\n"
22263 - "9: movl %%eax, 8(%3)\n"
22264 - "10: movl %%edx, 12(%3)\n"
22265 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
22266 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
22267 "11: movl 16(%4), %%eax\n"
22268 "12: movl 20(%4), %%edx\n"
22269 - "13: movl %%eax, 16(%3)\n"
22270 - "14: movl %%edx, 20(%3)\n"
22271 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
22272 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
22273 "15: movl 24(%4), %%eax\n"
22274 "16: movl 28(%4), %%edx\n"
22275 - "17: movl %%eax, 24(%3)\n"
22276 - "18: movl %%edx, 28(%3)\n"
22277 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
22278 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
22279 "19: movl 32(%4), %%eax\n"
22280 "20: movl 36(%4), %%edx\n"
22281 - "21: movl %%eax, 32(%3)\n"
22282 - "22: movl %%edx, 36(%3)\n"
22283 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
22284 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
22285 "23: movl 40(%4), %%eax\n"
22286 "24: movl 44(%4), %%edx\n"
22287 - "25: movl %%eax, 40(%3)\n"
22288 - "26: movl %%edx, 44(%3)\n"
22289 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
22290 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
22291 "27: movl 48(%4), %%eax\n"
22292 "28: movl 52(%4), %%edx\n"
22293 - "29: movl %%eax, 48(%3)\n"
22294 - "30: movl %%edx, 52(%3)\n"
22295 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
22296 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
22297 "31: movl 56(%4), %%eax\n"
22298 "32: movl 60(%4), %%edx\n"
22299 - "33: movl %%eax, 56(%3)\n"
22300 - "34: movl %%edx, 60(%3)\n"
22301 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
22302 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
22303 " addl $-64, %0\n"
22304 " addl $64, %4\n"
22305 " addl $64, %3\n"
22306 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22307 " shrl $2, %0\n"
22308 " andl $3, %%eax\n"
22309 " cld\n"
22310 + __COPYUSER_SET_ES
22311 "99: rep; movsl\n"
22312 "36: movl %%eax, %0\n"
22313 "37: rep; movsb\n"
22314 "100:\n"
22315 + __COPYUSER_RESTORE_ES
22316 + ".section .fixup,\"ax\"\n"
22317 + "101: lea 0(%%eax,%0,4),%0\n"
22318 + " jmp 100b\n"
22319 + ".previous\n"
22320 + ".section __ex_table,\"a\"\n"
22321 + " .align 4\n"
22322 + " .long 1b,100b\n"
22323 + " .long 2b,100b\n"
22324 + " .long 3b,100b\n"
22325 + " .long 4b,100b\n"
22326 + " .long 5b,100b\n"
22327 + " .long 6b,100b\n"
22328 + " .long 7b,100b\n"
22329 + " .long 8b,100b\n"
22330 + " .long 9b,100b\n"
22331 + " .long 10b,100b\n"
22332 + " .long 11b,100b\n"
22333 + " .long 12b,100b\n"
22334 + " .long 13b,100b\n"
22335 + " .long 14b,100b\n"
22336 + " .long 15b,100b\n"
22337 + " .long 16b,100b\n"
22338 + " .long 17b,100b\n"
22339 + " .long 18b,100b\n"
22340 + " .long 19b,100b\n"
22341 + " .long 20b,100b\n"
22342 + " .long 21b,100b\n"
22343 + " .long 22b,100b\n"
22344 + " .long 23b,100b\n"
22345 + " .long 24b,100b\n"
22346 + " .long 25b,100b\n"
22347 + " .long 26b,100b\n"
22348 + " .long 27b,100b\n"
22349 + " .long 28b,100b\n"
22350 + " .long 29b,100b\n"
22351 + " .long 30b,100b\n"
22352 + " .long 31b,100b\n"
22353 + " .long 32b,100b\n"
22354 + " .long 33b,100b\n"
22355 + " .long 34b,100b\n"
22356 + " .long 35b,100b\n"
22357 + " .long 36b,100b\n"
22358 + " .long 37b,100b\n"
22359 + " .long 99b,101b\n"
22360 + ".previous"
22361 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
22362 + : "1"(to), "2"(from), "0"(size)
22363 + : "eax", "edx", "memory");
22364 + return size;
22365 +}
22366 +
22367 +static unsigned long
22368 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
22369 +{
22370 + int d0, d1;
22371 + __asm__ __volatile__(
22372 + " .align 2,0x90\n"
22373 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
22374 + " cmpl $67, %0\n"
22375 + " jbe 3f\n"
22376 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
22377 + " .align 2,0x90\n"
22378 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
22379 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
22380 + "5: movl %%eax, 0(%3)\n"
22381 + "6: movl %%edx, 4(%3)\n"
22382 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
22383 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
22384 + "9: movl %%eax, 8(%3)\n"
22385 + "10: movl %%edx, 12(%3)\n"
22386 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
22387 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
22388 + "13: movl %%eax, 16(%3)\n"
22389 + "14: movl %%edx, 20(%3)\n"
22390 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
22391 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
22392 + "17: movl %%eax, 24(%3)\n"
22393 + "18: movl %%edx, 28(%3)\n"
22394 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
22395 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
22396 + "21: movl %%eax, 32(%3)\n"
22397 + "22: movl %%edx, 36(%3)\n"
22398 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
22399 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
22400 + "25: movl %%eax, 40(%3)\n"
22401 + "26: movl %%edx, 44(%3)\n"
22402 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
22403 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
22404 + "29: movl %%eax, 48(%3)\n"
22405 + "30: movl %%edx, 52(%3)\n"
22406 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
22407 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
22408 + "33: movl %%eax, 56(%3)\n"
22409 + "34: movl %%edx, 60(%3)\n"
22410 + " addl $-64, %0\n"
22411 + " addl $64, %4\n"
22412 + " addl $64, %3\n"
22413 + " cmpl $63, %0\n"
22414 + " ja 1b\n"
22415 + "35: movl %0, %%eax\n"
22416 + " shrl $2, %0\n"
22417 + " andl $3, %%eax\n"
22418 + " cld\n"
22419 + "99: rep; "__copyuser_seg" movsl\n"
22420 + "36: movl %%eax, %0\n"
22421 + "37: rep; "__copyuser_seg" movsb\n"
22422 + "100:\n"
22423 ".section .fixup,\"ax\"\n"
22424 "101: lea 0(%%eax,%0,4),%0\n"
22425 " jmp 100b\n"
22426 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
22427 int d0, d1;
22428 __asm__ __volatile__(
22429 " .align 2,0x90\n"
22430 - "0: movl 32(%4), %%eax\n"
22431 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
22432 " cmpl $67, %0\n"
22433 " jbe 2f\n"
22434 - "1: movl 64(%4), %%eax\n"
22435 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
22436 " .align 2,0x90\n"
22437 - "2: movl 0(%4), %%eax\n"
22438 - "21: movl 4(%4), %%edx\n"
22439 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
22440 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
22441 " movl %%eax, 0(%3)\n"
22442 " movl %%edx, 4(%3)\n"
22443 - "3: movl 8(%4), %%eax\n"
22444 - "31: movl 12(%4),%%edx\n"
22445 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
22446 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
22447 " movl %%eax, 8(%3)\n"
22448 " movl %%edx, 12(%3)\n"
22449 - "4: movl 16(%4), %%eax\n"
22450 - "41: movl 20(%4), %%edx\n"
22451 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
22452 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
22453 " movl %%eax, 16(%3)\n"
22454 " movl %%edx, 20(%3)\n"
22455 - "10: movl 24(%4), %%eax\n"
22456 - "51: movl 28(%4), %%edx\n"
22457 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
22458 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
22459 " movl %%eax, 24(%3)\n"
22460 " movl %%edx, 28(%3)\n"
22461 - "11: movl 32(%4), %%eax\n"
22462 - "61: movl 36(%4), %%edx\n"
22463 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
22464 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
22465 " movl %%eax, 32(%3)\n"
22466 " movl %%edx, 36(%3)\n"
22467 - "12: movl 40(%4), %%eax\n"
22468 - "71: movl 44(%4), %%edx\n"
22469 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
22470 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
22471 " movl %%eax, 40(%3)\n"
22472 " movl %%edx, 44(%3)\n"
22473 - "13: movl 48(%4), %%eax\n"
22474 - "81: movl 52(%4), %%edx\n"
22475 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
22476 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
22477 " movl %%eax, 48(%3)\n"
22478 " movl %%edx, 52(%3)\n"
22479 - "14: movl 56(%4), %%eax\n"
22480 - "91: movl 60(%4), %%edx\n"
22481 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
22482 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
22483 " movl %%eax, 56(%3)\n"
22484 " movl %%edx, 60(%3)\n"
22485 " addl $-64, %0\n"
22486 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
22487 " shrl $2, %0\n"
22488 " andl $3, %%eax\n"
22489 " cld\n"
22490 - "6: rep; movsl\n"
22491 + "6: rep; "__copyuser_seg" movsl\n"
22492 " movl %%eax,%0\n"
22493 - "7: rep; movsb\n"
22494 + "7: rep; "__copyuser_seg" movsb\n"
22495 "8:\n"
22496 ".section .fixup,\"ax\"\n"
22497 "9: lea 0(%%eax,%0,4),%0\n"
22498 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
22499
22500 __asm__ __volatile__(
22501 " .align 2,0x90\n"
22502 - "0: movl 32(%4), %%eax\n"
22503 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
22504 " cmpl $67, %0\n"
22505 " jbe 2f\n"
22506 - "1: movl 64(%4), %%eax\n"
22507 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
22508 " .align 2,0x90\n"
22509 - "2: movl 0(%4), %%eax\n"
22510 - "21: movl 4(%4), %%edx\n"
22511 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
22512 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
22513 " movnti %%eax, 0(%3)\n"
22514 " movnti %%edx, 4(%3)\n"
22515 - "3: movl 8(%4), %%eax\n"
22516 - "31: movl 12(%4),%%edx\n"
22517 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
22518 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
22519 " movnti %%eax, 8(%3)\n"
22520 " movnti %%edx, 12(%3)\n"
22521 - "4: movl 16(%4), %%eax\n"
22522 - "41: movl 20(%4), %%edx\n"
22523 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
22524 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
22525 " movnti %%eax, 16(%3)\n"
22526 " movnti %%edx, 20(%3)\n"
22527 - "10: movl 24(%4), %%eax\n"
22528 - "51: movl 28(%4), %%edx\n"
22529 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
22530 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
22531 " movnti %%eax, 24(%3)\n"
22532 " movnti %%edx, 28(%3)\n"
22533 - "11: movl 32(%4), %%eax\n"
22534 - "61: movl 36(%4), %%edx\n"
22535 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
22536 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
22537 " movnti %%eax, 32(%3)\n"
22538 " movnti %%edx, 36(%3)\n"
22539 - "12: movl 40(%4), %%eax\n"
22540 - "71: movl 44(%4), %%edx\n"
22541 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
22542 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
22543 " movnti %%eax, 40(%3)\n"
22544 " movnti %%edx, 44(%3)\n"
22545 - "13: movl 48(%4), %%eax\n"
22546 - "81: movl 52(%4), %%edx\n"
22547 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
22548 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
22549 " movnti %%eax, 48(%3)\n"
22550 " movnti %%edx, 52(%3)\n"
22551 - "14: movl 56(%4), %%eax\n"
22552 - "91: movl 60(%4), %%edx\n"
22553 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
22554 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
22555 " movnti %%eax, 56(%3)\n"
22556 " movnti %%edx, 60(%3)\n"
22557 " addl $-64, %0\n"
22558 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
22559 " shrl $2, %0\n"
22560 " andl $3, %%eax\n"
22561 " cld\n"
22562 - "6: rep; movsl\n"
22563 + "6: rep; "__copyuser_seg" movsl\n"
22564 " movl %%eax,%0\n"
22565 - "7: rep; movsb\n"
22566 + "7: rep; "__copyuser_seg" movsb\n"
22567 "8:\n"
22568 ".section .fixup,\"ax\"\n"
22569 "9: lea 0(%%eax,%0,4),%0\n"
22570 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
22571
22572 __asm__ __volatile__(
22573 " .align 2,0x90\n"
22574 - "0: movl 32(%4), %%eax\n"
22575 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
22576 " cmpl $67, %0\n"
22577 " jbe 2f\n"
22578 - "1: movl 64(%4), %%eax\n"
22579 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
22580 " .align 2,0x90\n"
22581 - "2: movl 0(%4), %%eax\n"
22582 - "21: movl 4(%4), %%edx\n"
22583 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
22584 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
22585 " movnti %%eax, 0(%3)\n"
22586 " movnti %%edx, 4(%3)\n"
22587 - "3: movl 8(%4), %%eax\n"
22588 - "31: movl 12(%4),%%edx\n"
22589 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
22590 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
22591 " movnti %%eax, 8(%3)\n"
22592 " movnti %%edx, 12(%3)\n"
22593 - "4: movl 16(%4), %%eax\n"
22594 - "41: movl 20(%4), %%edx\n"
22595 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
22596 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
22597 " movnti %%eax, 16(%3)\n"
22598 " movnti %%edx, 20(%3)\n"
22599 - "10: movl 24(%4), %%eax\n"
22600 - "51: movl 28(%4), %%edx\n"
22601 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
22602 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
22603 " movnti %%eax, 24(%3)\n"
22604 " movnti %%edx, 28(%3)\n"
22605 - "11: movl 32(%4), %%eax\n"
22606 - "61: movl 36(%4), %%edx\n"
22607 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
22608 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
22609 " movnti %%eax, 32(%3)\n"
22610 " movnti %%edx, 36(%3)\n"
22611 - "12: movl 40(%4), %%eax\n"
22612 - "71: movl 44(%4), %%edx\n"
22613 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
22614 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
22615 " movnti %%eax, 40(%3)\n"
22616 " movnti %%edx, 44(%3)\n"
22617 - "13: movl 48(%4), %%eax\n"
22618 - "81: movl 52(%4), %%edx\n"
22619 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
22620 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
22621 " movnti %%eax, 48(%3)\n"
22622 " movnti %%edx, 52(%3)\n"
22623 - "14: movl 56(%4), %%eax\n"
22624 - "91: movl 60(%4), %%edx\n"
22625 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
22626 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
22627 " movnti %%eax, 56(%3)\n"
22628 " movnti %%edx, 60(%3)\n"
22629 " addl $-64, %0\n"
22630 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
22631 " shrl $2, %0\n"
22632 " andl $3, %%eax\n"
22633 " cld\n"
22634 - "6: rep; movsl\n"
22635 + "6: rep; "__copyuser_seg" movsl\n"
22636 " movl %%eax,%0\n"
22637 - "7: rep; movsb\n"
22638 + "7: rep; "__copyuser_seg" movsb\n"
22639 "8:\n"
22640 ".section .fixup,\"ax\"\n"
22641 "9: lea 0(%%eax,%0,4),%0\n"
22642 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
22643 */
22644 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
22645 unsigned long size);
22646 -unsigned long __copy_user_intel(void __user *to, const void *from,
22647 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
22648 + unsigned long size);
22649 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
22650 unsigned long size);
22651 unsigned long __copy_user_zeroing_intel_nocache(void *to,
22652 const void __user *from, unsigned long size);
22653 #endif /* CONFIG_X86_INTEL_USERCOPY */
22654
22655 /* Generic arbitrary sized copy. */
22656 -#define __copy_user(to, from, size) \
22657 +#define __copy_user(to, from, size, prefix, set, restore) \
22658 do { \
22659 int __d0, __d1, __d2; \
22660 __asm__ __volatile__( \
22661 + set \
22662 " cmp $7,%0\n" \
22663 " jbe 1f\n" \
22664 " movl %1,%0\n" \
22665 " negl %0\n" \
22666 " andl $7,%0\n" \
22667 " subl %0,%3\n" \
22668 - "4: rep; movsb\n" \
22669 + "4: rep; "prefix"movsb\n" \
22670 " movl %3,%0\n" \
22671 " shrl $2,%0\n" \
22672 " andl $3,%3\n" \
22673 " .align 2,0x90\n" \
22674 - "0: rep; movsl\n" \
22675 + "0: rep; "prefix"movsl\n" \
22676 " movl %3,%0\n" \
22677 - "1: rep; movsb\n" \
22678 + "1: rep; "prefix"movsb\n" \
22679 "2:\n" \
22680 + restore \
22681 ".section .fixup,\"ax\"\n" \
22682 "5: addl %3,%0\n" \
22683 " jmp 2b\n" \
22684 @@ -682,14 +799,14 @@ do { \
22685 " negl %0\n" \
22686 " andl $7,%0\n" \
22687 " subl %0,%3\n" \
22688 - "4: rep; movsb\n" \
22689 + "4: rep; "__copyuser_seg"movsb\n" \
22690 " movl %3,%0\n" \
22691 " shrl $2,%0\n" \
22692 " andl $3,%3\n" \
22693 " .align 2,0x90\n" \
22694 - "0: rep; movsl\n" \
22695 + "0: rep; "__copyuser_seg"movsl\n" \
22696 " movl %3,%0\n" \
22697 - "1: rep; movsb\n" \
22698 + "1: rep; "__copyuser_seg"movsb\n" \
22699 "2:\n" \
22700 ".section .fixup,\"ax\"\n" \
22701 "5: addl %3,%0\n" \
22702 @@ -775,9 +892,9 @@ survive:
22703 }
22704 #endif
22705 if (movsl_is_ok(to, from, n))
22706 - __copy_user(to, from, n);
22707 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
22708 else
22709 - n = __copy_user_intel(to, from, n);
22710 + n = __generic_copy_to_user_intel(to, from, n);
22711 return n;
22712 }
22713 EXPORT_SYMBOL(__copy_to_user_ll);
22714 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
22715 unsigned long n)
22716 {
22717 if (movsl_is_ok(to, from, n))
22718 - __copy_user(to, from, n);
22719 + __copy_user(to, from, n, __copyuser_seg, "", "");
22720 else
22721 - n = __copy_user_intel((void __user *)to,
22722 - (const void *)from, n);
22723 + n = __generic_copy_from_user_intel(to, from, n);
22724 return n;
22725 }
22726 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
22727 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
22728 if (n > 64 && cpu_has_xmm2)
22729 n = __copy_user_intel_nocache(to, from, n);
22730 else
22731 - __copy_user(to, from, n);
22732 + __copy_user(to, from, n, __copyuser_seg, "", "");
22733 #else
22734 - __copy_user(to, from, n);
22735 + __copy_user(to, from, n, __copyuser_seg, "", "");
22736 #endif
22737 return n;
22738 }
22739 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
22740
22741 -/**
22742 - * copy_to_user: - Copy a block of data into user space.
22743 - * @to: Destination address, in user space.
22744 - * @from: Source address, in kernel space.
22745 - * @n: Number of bytes to copy.
22746 - *
22747 - * Context: User context only. This function may sleep.
22748 - *
22749 - * Copy data from kernel space to user space.
22750 - *
22751 - * Returns number of bytes that could not be copied.
22752 - * On success, this will be zero.
22753 - */
22754 -unsigned long
22755 -copy_to_user(void __user *to, const void *from, unsigned long n)
22756 -{
22757 - if (access_ok(VERIFY_WRITE, to, n))
22758 - n = __copy_to_user(to, from, n);
22759 - return n;
22760 -}
22761 -EXPORT_SYMBOL(copy_to_user);
22762 -
22763 -/**
22764 - * copy_from_user: - Copy a block of data from user space.
22765 - * @to: Destination address, in kernel space.
22766 - * @from: Source address, in user space.
22767 - * @n: Number of bytes to copy.
22768 - *
22769 - * Context: User context only. This function may sleep.
22770 - *
22771 - * Copy data from user space to kernel space.
22772 - *
22773 - * Returns number of bytes that could not be copied.
22774 - * On success, this will be zero.
22775 - *
22776 - * If some data could not be copied, this function will pad the copied
22777 - * data to the requested size using zero bytes.
22778 - */
22779 -unsigned long
22780 -_copy_from_user(void *to, const void __user *from, unsigned long n)
22781 -{
22782 - if (access_ok(VERIFY_READ, from, n))
22783 - n = __copy_from_user(to, from, n);
22784 - else
22785 - memset(to, 0, n);
22786 - return n;
22787 -}
22788 -EXPORT_SYMBOL(_copy_from_user);
22789 -
22790 void copy_from_user_overflow(void)
22791 {
22792 WARN(1, "Buffer overflow detected!\n");
22793 }
22794 EXPORT_SYMBOL(copy_from_user_overflow);
22795 +
22796 +void copy_to_user_overflow(void)
22797 +{
22798 + WARN(1, "Buffer overflow detected!\n");
22799 +}
22800 +EXPORT_SYMBOL(copy_to_user_overflow);
22801 +
22802 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22803 +void __set_fs(mm_segment_t x)
22804 +{
22805 + switch (x.seg) {
22806 + case 0:
22807 + loadsegment(gs, 0);
22808 + break;
22809 + case TASK_SIZE_MAX:
22810 + loadsegment(gs, __USER_DS);
22811 + break;
22812 + case -1UL:
22813 + loadsegment(gs, __KERNEL_DS);
22814 + break;
22815 + default:
22816 + BUG();
22817 + }
22818 + return;
22819 +}
22820 +EXPORT_SYMBOL(__set_fs);
22821 +
22822 +void set_fs(mm_segment_t x)
22823 +{
22824 + current_thread_info()->addr_limit = x;
22825 + __set_fs(x);
22826 +}
22827 +EXPORT_SYMBOL(set_fs);
22828 +#endif
22829 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
22830 index b7c2849..8633ad8 100644
22831 --- a/arch/x86/lib/usercopy_64.c
22832 +++ b/arch/x86/lib/usercopy_64.c
22833 @@ -42,6 +42,12 @@ long
22834 __strncpy_from_user(char *dst, const char __user *src, long count)
22835 {
22836 long res;
22837 +
22838 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22839 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
22840 + src += PAX_USER_SHADOW_BASE;
22841 +#endif
22842 +
22843 __do_strncpy_from_user(dst, src, count, res);
22844 return res;
22845 }
22846 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
22847 {
22848 long __d0;
22849 might_fault();
22850 +
22851 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22852 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
22853 + addr += PAX_USER_SHADOW_BASE;
22854 +#endif
22855 +
22856 /* no memory constraint because it doesn't change any memory gcc knows
22857 about */
22858 asm volatile(
22859 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
22860 }
22861 EXPORT_SYMBOL(strlen_user);
22862
22863 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
22864 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
22865 {
22866 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
22867 - return copy_user_generic((__force void *)to, (__force void *)from, len);
22868 - }
22869 - return len;
22870 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
22871 +
22872 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22873 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
22874 + to += PAX_USER_SHADOW_BASE;
22875 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
22876 + from += PAX_USER_SHADOW_BASE;
22877 +#endif
22878 +
22879 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
22880 + }
22881 + return len;
22882 }
22883 EXPORT_SYMBOL(copy_in_user);
22884
22885 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
22886 * it is not necessary to optimize tail handling.
22887 */
22888 unsigned long
22889 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
22890 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
22891 {
22892 char c;
22893 unsigned zero_len;
22894 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
22895 index d0474ad..36e9257 100644
22896 --- a/arch/x86/mm/extable.c
22897 +++ b/arch/x86/mm/extable.c
22898 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
22899 const struct exception_table_entry *fixup;
22900
22901 #ifdef CONFIG_PNPBIOS
22902 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
22903 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
22904 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
22905 extern u32 pnp_bios_is_utter_crap;
22906 pnp_bios_is_utter_crap = 1;
22907 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
22908 index 5db0490..2ddce45 100644
22909 --- a/arch/x86/mm/fault.c
22910 +++ b/arch/x86/mm/fault.c
22911 @@ -13,11 +13,18 @@
22912 #include <linux/perf_event.h> /* perf_sw_event */
22913 #include <linux/hugetlb.h> /* hstate_index_to_shift */
22914 #include <linux/prefetch.h> /* prefetchw */
22915 +#include <linux/unistd.h>
22916 +#include <linux/compiler.h>
22917
22918 #include <asm/traps.h> /* dotraplinkage, ... */
22919 #include <asm/pgalloc.h> /* pgd_*(), ... */
22920 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
22921 #include <asm/fixmap.h> /* VSYSCALL_START */
22922 +#include <asm/tlbflush.h>
22923 +
22924 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22925 +#include <asm/stacktrace.h>
22926 +#endif
22927
22928 /*
22929 * Page fault error code bits:
22930 @@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
22931 int ret = 0;
22932
22933 /* kprobe_running() needs smp_processor_id() */
22934 - if (kprobes_built_in() && !user_mode_vm(regs)) {
22935 + if (kprobes_built_in() && !user_mode(regs)) {
22936 preempt_disable();
22937 if (kprobe_running() && kprobe_fault_handler(regs, 14))
22938 ret = 1;
22939 @@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
22940 return !instr_lo || (instr_lo>>1) == 1;
22941 case 0x00:
22942 /* Prefetch instruction is 0x0F0D or 0x0F18 */
22943 - if (probe_kernel_address(instr, opcode))
22944 + if (user_mode(regs)) {
22945 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
22946 + return 0;
22947 + } else if (probe_kernel_address(instr, opcode))
22948 return 0;
22949
22950 *prefetch = (instr_lo == 0xF) &&
22951 @@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
22952 while (instr < max_instr) {
22953 unsigned char opcode;
22954
22955 - if (probe_kernel_address(instr, opcode))
22956 + if (user_mode(regs)) {
22957 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
22958 + break;
22959 + } else if (probe_kernel_address(instr, opcode))
22960 break;
22961
22962 instr++;
22963 @@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
22964 force_sig_info(si_signo, &info, tsk);
22965 }
22966
22967 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22968 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
22969 +#endif
22970 +
22971 +#ifdef CONFIG_PAX_EMUTRAMP
22972 +static int pax_handle_fetch_fault(struct pt_regs *regs);
22973 +#endif
22974 +
22975 +#ifdef CONFIG_PAX_PAGEEXEC
22976 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
22977 +{
22978 + pgd_t *pgd;
22979 + pud_t *pud;
22980 + pmd_t *pmd;
22981 +
22982 + pgd = pgd_offset(mm, address);
22983 + if (!pgd_present(*pgd))
22984 + return NULL;
22985 + pud = pud_offset(pgd, address);
22986 + if (!pud_present(*pud))
22987 + return NULL;
22988 + pmd = pmd_offset(pud, address);
22989 + if (!pmd_present(*pmd))
22990 + return NULL;
22991 + return pmd;
22992 +}
22993 +#endif
22994 +
22995 DEFINE_SPINLOCK(pgd_lock);
22996 LIST_HEAD(pgd_list);
22997
22998 @@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
22999 for (address = VMALLOC_START & PMD_MASK;
23000 address >= TASK_SIZE && address < FIXADDR_TOP;
23001 address += PMD_SIZE) {
23002 +
23003 +#ifdef CONFIG_PAX_PER_CPU_PGD
23004 + unsigned long cpu;
23005 +#else
23006 struct page *page;
23007 +#endif
23008
23009 spin_lock(&pgd_lock);
23010 +
23011 +#ifdef CONFIG_PAX_PER_CPU_PGD
23012 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23013 + pgd_t *pgd = get_cpu_pgd(cpu);
23014 + pmd_t *ret;
23015 +#else
23016 list_for_each_entry(page, &pgd_list, lru) {
23017 + pgd_t *pgd = page_address(page);
23018 spinlock_t *pgt_lock;
23019 pmd_t *ret;
23020
23021 @@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
23022 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23023
23024 spin_lock(pgt_lock);
23025 - ret = vmalloc_sync_one(page_address(page), address);
23026 +#endif
23027 +
23028 + ret = vmalloc_sync_one(pgd, address);
23029 +
23030 +#ifndef CONFIG_PAX_PER_CPU_PGD
23031 spin_unlock(pgt_lock);
23032 +#endif
23033
23034 if (!ret)
23035 break;
23036 @@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23037 * an interrupt in the middle of a task switch..
23038 */
23039 pgd_paddr = read_cr3();
23040 +
23041 +#ifdef CONFIG_PAX_PER_CPU_PGD
23042 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23043 +#endif
23044 +
23045 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23046 if (!pmd_k)
23047 return -1;
23048 @@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23049 * happen within a race in page table update. In the later
23050 * case just flush:
23051 */
23052 +
23053 +#ifdef CONFIG_PAX_PER_CPU_PGD
23054 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23055 + pgd = pgd_offset_cpu(smp_processor_id(), address);
23056 +#else
23057 pgd = pgd_offset(current->active_mm, address);
23058 +#endif
23059 +
23060 pgd_ref = pgd_offset_k(address);
23061 if (pgd_none(*pgd_ref))
23062 return -1;
23063 @@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23064 static int is_errata100(struct pt_regs *regs, unsigned long address)
23065 {
23066 #ifdef CONFIG_X86_64
23067 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23068 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23069 return 1;
23070 #endif
23071 return 0;
23072 @@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23073 }
23074
23075 static const char nx_warning[] = KERN_CRIT
23076 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23077 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23078
23079 static void
23080 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23081 @@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23082 if (!oops_may_print())
23083 return;
23084
23085 - if (error_code & PF_INSTR) {
23086 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
23087 unsigned int level;
23088
23089 pte_t *pte = lookup_address(address, &level);
23090
23091 if (pte && pte_present(*pte) && !pte_exec(*pte))
23092 - printk(nx_warning, current_uid());
23093 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
23094 }
23095
23096 +#ifdef CONFIG_PAX_KERNEXEC
23097 + if (init_mm.start_code <= address && address < init_mm.end_code) {
23098 + if (current->signal->curr_ip)
23099 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23100 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
23101 + else
23102 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23103 + current->comm, task_pid_nr(current), current_uid(), current_euid());
23104 + }
23105 +#endif
23106 +
23107 printk(KERN_ALERT "BUG: unable to handle kernel ");
23108 if (address < PAGE_SIZE)
23109 printk(KERN_CONT "NULL pointer dereference");
23110 @@ -739,6 +820,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23111 }
23112 #endif
23113
23114 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23115 + if (pax_is_fetch_fault(regs, error_code, address)) {
23116 +
23117 +#ifdef CONFIG_PAX_EMUTRAMP
23118 + switch (pax_handle_fetch_fault(regs)) {
23119 + case 2:
23120 + return;
23121 + }
23122 +#endif
23123 +
23124 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23125 + do_group_exit(SIGKILL);
23126 + }
23127 +#endif
23128 +
23129 if (unlikely(show_unhandled_signals))
23130 show_signal_msg(regs, error_code, address, tsk);
23131
23132 @@ -835,7 +931,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
23133 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
23134 printk(KERN_ERR
23135 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
23136 - tsk->comm, tsk->pid, address);
23137 + tsk->comm, task_pid_nr(tsk), address);
23138 code = BUS_MCEERR_AR;
23139 }
23140 #endif
23141 @@ -890,6 +986,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
23142 return 1;
23143 }
23144
23145 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23146 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
23147 +{
23148 + pte_t *pte;
23149 + pmd_t *pmd;
23150 + spinlock_t *ptl;
23151 + unsigned char pte_mask;
23152 +
23153 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
23154 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
23155 + return 0;
23156 +
23157 + /* PaX: it's our fault, let's handle it if we can */
23158 +
23159 + /* PaX: take a look at read faults before acquiring any locks */
23160 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
23161 + /* instruction fetch attempt from a protected page in user mode */
23162 + up_read(&mm->mmap_sem);
23163 +
23164 +#ifdef CONFIG_PAX_EMUTRAMP
23165 + switch (pax_handle_fetch_fault(regs)) {
23166 + case 2:
23167 + return 1;
23168 + }
23169 +#endif
23170 +
23171 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23172 + do_group_exit(SIGKILL);
23173 + }
23174 +
23175 + pmd = pax_get_pmd(mm, address);
23176 + if (unlikely(!pmd))
23177 + return 0;
23178 +
23179 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
23180 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
23181 + pte_unmap_unlock(pte, ptl);
23182 + return 0;
23183 + }
23184 +
23185 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
23186 + /* write attempt to a protected page in user mode */
23187 + pte_unmap_unlock(pte, ptl);
23188 + return 0;
23189 + }
23190 +
23191 +#ifdef CONFIG_SMP
23192 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
23193 +#else
23194 + if (likely(address > get_limit(regs->cs)))
23195 +#endif
23196 + {
23197 + set_pte(pte, pte_mkread(*pte));
23198 + __flush_tlb_one(address);
23199 + pte_unmap_unlock(pte, ptl);
23200 + up_read(&mm->mmap_sem);
23201 + return 1;
23202 + }
23203 +
23204 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
23205 +
23206 + /*
23207 + * PaX: fill DTLB with user rights and retry
23208 + */
23209 + __asm__ __volatile__ (
23210 + "orb %2,(%1)\n"
23211 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
23212 +/*
23213 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
23214 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
23215 + * page fault when examined during a TLB load attempt. this is true not only
23216 + * for PTEs holding a non-present entry but also present entries that will
23217 + * raise a page fault (such as those set up by PaX, or the copy-on-write
23218 + * mechanism). in effect it means that we do *not* need to flush the TLBs
23219 + * for our target pages since their PTEs are simply not in the TLBs at all.
23220 +
23221 + * the best thing in omitting it is that we gain around 15-20% speed in the
23222 + * fast path of the page fault handler and can get rid of tracing since we
23223 + * can no longer flush unintended entries.
23224 + */
23225 + "invlpg (%0)\n"
23226 +#endif
23227 + __copyuser_seg"testb $0,(%0)\n"
23228 + "xorb %3,(%1)\n"
23229 + :
23230 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
23231 + : "memory", "cc");
23232 + pte_unmap_unlock(pte, ptl);
23233 + up_read(&mm->mmap_sem);
23234 + return 1;
23235 +}
23236 +#endif
23237 +
23238 /*
23239 * Handle a spurious fault caused by a stale TLB entry.
23240 *
23241 @@ -962,6 +1151,9 @@ int show_unhandled_signals = 1;
23242 static inline int
23243 access_error(unsigned long error_code, struct vm_area_struct *vma)
23244 {
23245 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
23246 + return 1;
23247 +
23248 if (error_code & PF_WRITE) {
23249 /* write, present and write, not present: */
23250 if (unlikely(!(vma->vm_flags & VM_WRITE)))
23251 @@ -995,18 +1187,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23252 {
23253 struct vm_area_struct *vma;
23254 struct task_struct *tsk;
23255 - unsigned long address;
23256 struct mm_struct *mm;
23257 int fault;
23258 int write = error_code & PF_WRITE;
23259 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
23260 (write ? FAULT_FLAG_WRITE : 0);
23261
23262 - tsk = current;
23263 - mm = tsk->mm;
23264 -
23265 /* Get the faulting address: */
23266 - address = read_cr2();
23267 + unsigned long address = read_cr2();
23268 +
23269 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23270 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
23271 + if (!search_exception_tables(regs->ip)) {
23272 + bad_area_nosemaphore(regs, error_code, address);
23273 + return;
23274 + }
23275 + if (address < PAX_USER_SHADOW_BASE) {
23276 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23277 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
23278 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
23279 + } else
23280 + address -= PAX_USER_SHADOW_BASE;
23281 + }
23282 +#endif
23283 +
23284 + tsk = current;
23285 + mm = tsk->mm;
23286
23287 /*
23288 * Detect and handle instructions that would cause a page fault for
23289 @@ -1067,7 +1273,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23290 * User-mode registers count as a user access even for any
23291 * potential system fault or CPU buglet:
23292 */
23293 - if (user_mode_vm(regs)) {
23294 + if (user_mode(regs)) {
23295 local_irq_enable();
23296 error_code |= PF_USER;
23297 } else {
23298 @@ -1122,6 +1328,11 @@ retry:
23299 might_sleep();
23300 }
23301
23302 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23303 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
23304 + return;
23305 +#endif
23306 +
23307 vma = find_vma(mm, address);
23308 if (unlikely(!vma)) {
23309 bad_area(regs, error_code, address);
23310 @@ -1133,18 +1344,24 @@ retry:
23311 bad_area(regs, error_code, address);
23312 return;
23313 }
23314 - if (error_code & PF_USER) {
23315 - /*
23316 - * Accessing the stack below %sp is always a bug.
23317 - * The large cushion allows instructions like enter
23318 - * and pusha to work. ("enter $65535, $31" pushes
23319 - * 32 pointers and then decrements %sp by 65535.)
23320 - */
23321 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
23322 - bad_area(regs, error_code, address);
23323 - return;
23324 - }
23325 + /*
23326 + * Accessing the stack below %sp is always a bug.
23327 + * The large cushion allows instructions like enter
23328 + * and pusha to work. ("enter $65535, $31" pushes
23329 + * 32 pointers and then decrements %sp by 65535.)
23330 + */
23331 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
23332 + bad_area(regs, error_code, address);
23333 + return;
23334 }
23335 +
23336 +#ifdef CONFIG_PAX_SEGMEXEC
23337 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
23338 + bad_area(regs, error_code, address);
23339 + return;
23340 + }
23341 +#endif
23342 +
23343 if (unlikely(expand_stack(vma, address))) {
23344 bad_area(regs, error_code, address);
23345 return;
23346 @@ -1199,3 +1416,292 @@ good_area:
23347
23348 up_read(&mm->mmap_sem);
23349 }
23350 +
23351 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23352 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
23353 +{
23354 + struct mm_struct *mm = current->mm;
23355 + unsigned long ip = regs->ip;
23356 +
23357 + if (v8086_mode(regs))
23358 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
23359 +
23360 +#ifdef CONFIG_PAX_PAGEEXEC
23361 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
23362 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
23363 + return true;
23364 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
23365 + return true;
23366 + return false;
23367 + }
23368 +#endif
23369 +
23370 +#ifdef CONFIG_PAX_SEGMEXEC
23371 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
23372 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
23373 + return true;
23374 + return false;
23375 + }
23376 +#endif
23377 +
23378 + return false;
23379 +}
23380 +#endif
23381 +
23382 +#ifdef CONFIG_PAX_EMUTRAMP
23383 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
23384 +{
23385 + int err;
23386 +
23387 + do { /* PaX: libffi trampoline emulation */
23388 + unsigned char mov, jmp;
23389 + unsigned int addr1, addr2;
23390 +
23391 +#ifdef CONFIG_X86_64
23392 + if ((regs->ip + 9) >> 32)
23393 + break;
23394 +#endif
23395 +
23396 + err = get_user(mov, (unsigned char __user *)regs->ip);
23397 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
23398 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
23399 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
23400 +
23401 + if (err)
23402 + break;
23403 +
23404 + if (mov == 0xB8 && jmp == 0xE9) {
23405 + regs->ax = addr1;
23406 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
23407 + return 2;
23408 + }
23409 + } while (0);
23410 +
23411 + do { /* PaX: gcc trampoline emulation #1 */
23412 + unsigned char mov1, mov2;
23413 + unsigned short jmp;
23414 + unsigned int addr1, addr2;
23415 +
23416 +#ifdef CONFIG_X86_64
23417 + if ((regs->ip + 11) >> 32)
23418 + break;
23419 +#endif
23420 +
23421 + err = get_user(mov1, (unsigned char __user *)regs->ip);
23422 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
23423 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
23424 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
23425 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
23426 +
23427 + if (err)
23428 + break;
23429 +
23430 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
23431 + regs->cx = addr1;
23432 + regs->ax = addr2;
23433 + regs->ip = addr2;
23434 + return 2;
23435 + }
23436 + } while (0);
23437 +
23438 + do { /* PaX: gcc trampoline emulation #2 */
23439 + unsigned char mov, jmp;
23440 + unsigned int addr1, addr2;
23441 +
23442 +#ifdef CONFIG_X86_64
23443 + if ((regs->ip + 9) >> 32)
23444 + break;
23445 +#endif
23446 +
23447 + err = get_user(mov, (unsigned char __user *)regs->ip);
23448 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
23449 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
23450 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
23451 +
23452 + if (err)
23453 + break;
23454 +
23455 + if (mov == 0xB9 && jmp == 0xE9) {
23456 + regs->cx = addr1;
23457 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
23458 + return 2;
23459 + }
23460 + } while (0);
23461 +
23462 + return 1; /* PaX in action */
23463 +}
23464 +
23465 +#ifdef CONFIG_X86_64
23466 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
23467 +{
23468 + int err;
23469 +
23470 + do { /* PaX: libffi trampoline emulation */
23471 + unsigned short mov1, mov2, jmp1;
23472 + unsigned char stcclc, jmp2;
23473 + unsigned long addr1, addr2;
23474 +
23475 + err = get_user(mov1, (unsigned short __user *)regs->ip);
23476 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
23477 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
23478 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
23479 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
23480 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
23481 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
23482 +
23483 + if (err)
23484 + break;
23485 +
23486 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
23487 + regs->r11 = addr1;
23488 + regs->r10 = addr2;
23489 + if (stcclc == 0xF8)
23490 + regs->flags &= ~X86_EFLAGS_CF;
23491 + else
23492 + regs->flags |= X86_EFLAGS_CF;
23493 + regs->ip = addr1;
23494 + return 2;
23495 + }
23496 + } while (0);
23497 +
23498 + do { /* PaX: gcc trampoline emulation #1 */
23499 + unsigned short mov1, mov2, jmp1;
23500 + unsigned char jmp2;
23501 + unsigned int addr1;
23502 + unsigned long addr2;
23503 +
23504 + err = get_user(mov1, (unsigned short __user *)regs->ip);
23505 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
23506 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
23507 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
23508 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
23509 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
23510 +
23511 + if (err)
23512 + break;
23513 +
23514 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
23515 + regs->r11 = addr1;
23516 + regs->r10 = addr2;
23517 + regs->ip = addr1;
23518 + return 2;
23519 + }
23520 + } while (0);
23521 +
23522 + do { /* PaX: gcc trampoline emulation #2 */
23523 + unsigned short mov1, mov2, jmp1;
23524 + unsigned char jmp2;
23525 + unsigned long addr1, addr2;
23526 +
23527 + err = get_user(mov1, (unsigned short __user *)regs->ip);
23528 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
23529 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
23530 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
23531 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
23532 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
23533 +
23534 + if (err)
23535 + break;
23536 +
23537 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
23538 + regs->r11 = addr1;
23539 + regs->r10 = addr2;
23540 + regs->ip = addr1;
23541 + return 2;
23542 + }
23543 + } while (0);
23544 +
23545 + return 1; /* PaX in action */
23546 +}
23547 +#endif
23548 +
23549 +/*
23550 + * PaX: decide what to do with offenders (regs->ip = fault address)
23551 + *
23552 + * returns 1 when task should be killed
23553 + * 2 when gcc trampoline was detected
23554 + */
23555 +static int pax_handle_fetch_fault(struct pt_regs *regs)
23556 +{
23557 + if (v8086_mode(regs))
23558 + return 1;
23559 +
23560 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
23561 + return 1;
23562 +
23563 +#ifdef CONFIG_X86_32
23564 + return pax_handle_fetch_fault_32(regs);
23565 +#else
23566 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
23567 + return pax_handle_fetch_fault_32(regs);
23568 + else
23569 + return pax_handle_fetch_fault_64(regs);
23570 +#endif
23571 +}
23572 +#endif
23573 +
23574 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23575 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
23576 +{
23577 + long i;
23578 +
23579 + printk(KERN_ERR "PAX: bytes at PC: ");
23580 + for (i = 0; i < 20; i++) {
23581 + unsigned char c;
23582 + if (get_user(c, (unsigned char __force_user *)pc+i))
23583 + printk(KERN_CONT "?? ");
23584 + else
23585 + printk(KERN_CONT "%02x ", c);
23586 + }
23587 + printk("\n");
23588 +
23589 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
23590 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
23591 + unsigned long c;
23592 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
23593 +#ifdef CONFIG_X86_32
23594 + printk(KERN_CONT "???????? ");
23595 +#else
23596 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
23597 + printk(KERN_CONT "???????? ???????? ");
23598 + else
23599 + printk(KERN_CONT "???????????????? ");
23600 +#endif
23601 + } else {
23602 +#ifdef CONFIG_X86_64
23603 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
23604 + printk(KERN_CONT "%08x ", (unsigned int)c);
23605 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
23606 + } else
23607 +#endif
23608 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
23609 + }
23610 + }
23611 + printk("\n");
23612 +}
23613 +#endif
23614 +
23615 +/**
23616 + * probe_kernel_write(): safely attempt to write to a location
23617 + * @dst: address to write to
23618 + * @src: pointer to the data that shall be written
23619 + * @size: size of the data chunk
23620 + *
23621 + * Safely write to address @dst from the buffer at @src. If a kernel fault
23622 + * happens, handle that and return -EFAULT.
23623 + */
23624 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
23625 +{
23626 + long ret;
23627 + mm_segment_t old_fs = get_fs();
23628 +
23629 + set_fs(KERNEL_DS);
23630 + pagefault_disable();
23631 + pax_open_kernel();
23632 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
23633 + pax_close_kernel();
23634 + pagefault_enable();
23635 + set_fs(old_fs);
23636 +
23637 + return ret ? -EFAULT : 0;
23638 +}
23639 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
23640 index dd74e46..7d26398 100644
23641 --- a/arch/x86/mm/gup.c
23642 +++ b/arch/x86/mm/gup.c
23643 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
23644 addr = start;
23645 len = (unsigned long) nr_pages << PAGE_SHIFT;
23646 end = start + len;
23647 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
23648 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
23649 (void __user *)start, len)))
23650 return 0;
23651
23652 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
23653 index f4f29b1..5cac4fb 100644
23654 --- a/arch/x86/mm/highmem_32.c
23655 +++ b/arch/x86/mm/highmem_32.c
23656 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
23657 idx = type + KM_TYPE_NR*smp_processor_id();
23658 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
23659 BUG_ON(!pte_none(*(kmap_pte-idx)));
23660 +
23661 + pax_open_kernel();
23662 set_pte(kmap_pte-idx, mk_pte(page, prot));
23663 + pax_close_kernel();
23664 +
23665 arch_flush_lazy_mmu_mode();
23666
23667 return (void *)vaddr;
23668 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
23669 index f581a18..29efd37 100644
23670 --- a/arch/x86/mm/hugetlbpage.c
23671 +++ b/arch/x86/mm/hugetlbpage.c
23672 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
23673 struct hstate *h = hstate_file(file);
23674 struct mm_struct *mm = current->mm;
23675 struct vm_area_struct *vma;
23676 - unsigned long start_addr;
23677 + unsigned long start_addr, pax_task_size = TASK_SIZE;
23678 +
23679 +#ifdef CONFIG_PAX_SEGMEXEC
23680 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23681 + pax_task_size = SEGMEXEC_TASK_SIZE;
23682 +#endif
23683 +
23684 + pax_task_size -= PAGE_SIZE;
23685
23686 if (len > mm->cached_hole_size) {
23687 - start_addr = mm->free_area_cache;
23688 + start_addr = mm->free_area_cache;
23689 } else {
23690 - start_addr = TASK_UNMAPPED_BASE;
23691 - mm->cached_hole_size = 0;
23692 + start_addr = mm->mmap_base;
23693 + mm->cached_hole_size = 0;
23694 }
23695
23696 full_search:
23697 @@ -280,26 +287,27 @@ full_search:
23698
23699 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
23700 /* At this point: (!vma || addr < vma->vm_end). */
23701 - if (TASK_SIZE - len < addr) {
23702 + if (pax_task_size - len < addr) {
23703 /*
23704 * Start a new search - just in case we missed
23705 * some holes.
23706 */
23707 - if (start_addr != TASK_UNMAPPED_BASE) {
23708 - start_addr = TASK_UNMAPPED_BASE;
23709 + if (start_addr != mm->mmap_base) {
23710 + start_addr = mm->mmap_base;
23711 mm->cached_hole_size = 0;
23712 goto full_search;
23713 }
23714 return -ENOMEM;
23715 }
23716 - if (!vma || addr + len <= vma->vm_start) {
23717 - mm->free_area_cache = addr + len;
23718 - return addr;
23719 - }
23720 + if (check_heap_stack_gap(vma, addr, len))
23721 + break;
23722 if (addr + mm->cached_hole_size < vma->vm_start)
23723 mm->cached_hole_size = vma->vm_start - addr;
23724 addr = ALIGN(vma->vm_end, huge_page_size(h));
23725 }
23726 +
23727 + mm->free_area_cache = addr + len;
23728 + return addr;
23729 }
23730
23731 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
23732 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
23733 {
23734 struct hstate *h = hstate_file(file);
23735 struct mm_struct *mm = current->mm;
23736 - struct vm_area_struct *vma, *prev_vma;
23737 - unsigned long base = mm->mmap_base, addr = addr0;
23738 + struct vm_area_struct *vma;
23739 + unsigned long base = mm->mmap_base, addr;
23740 unsigned long largest_hole = mm->cached_hole_size;
23741 - int first_time = 1;
23742
23743 /* don't allow allocations above current base */
23744 if (mm->free_area_cache > base)
23745 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
23746 largest_hole = 0;
23747 mm->free_area_cache = base;
23748 }
23749 -try_again:
23750 +
23751 /* make sure it can fit in the remaining address space */
23752 if (mm->free_area_cache < len)
23753 goto fail;
23754
23755 /* either no address requested or can't fit in requested address hole */
23756 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
23757 + addr = (mm->free_area_cache - len);
23758 do {
23759 + addr &= huge_page_mask(h);
23760 + vma = find_vma(mm, addr);
23761 /*
23762 * Lookup failure means no vma is above this address,
23763 * i.e. return with success:
23764 - */
23765 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
23766 - return addr;
23767 -
23768 - /*
23769 * new region fits between prev_vma->vm_end and
23770 * vma->vm_start, use it:
23771 */
23772 - if (addr + len <= vma->vm_start &&
23773 - (!prev_vma || (addr >= prev_vma->vm_end))) {
23774 + if (check_heap_stack_gap(vma, addr, len)) {
23775 /* remember the address as a hint for next time */
23776 - mm->cached_hole_size = largest_hole;
23777 - return (mm->free_area_cache = addr);
23778 - } else {
23779 - /* pull free_area_cache down to the first hole */
23780 - if (mm->free_area_cache == vma->vm_end) {
23781 - mm->free_area_cache = vma->vm_start;
23782 - mm->cached_hole_size = largest_hole;
23783 - }
23784 + mm->cached_hole_size = largest_hole;
23785 + return (mm->free_area_cache = addr);
23786 + }
23787 + /* pull free_area_cache down to the first hole */
23788 + if (mm->free_area_cache == vma->vm_end) {
23789 + mm->free_area_cache = vma->vm_start;
23790 + mm->cached_hole_size = largest_hole;
23791 }
23792
23793 /* remember the largest hole we saw so far */
23794 if (addr + largest_hole < vma->vm_start)
23795 - largest_hole = vma->vm_start - addr;
23796 + largest_hole = vma->vm_start - addr;
23797
23798 /* try just below the current vma->vm_start */
23799 - addr = (vma->vm_start - len) & huge_page_mask(h);
23800 - } while (len <= vma->vm_start);
23801 + addr = skip_heap_stack_gap(vma, len);
23802 + } while (!IS_ERR_VALUE(addr));
23803
23804 fail:
23805 /*
23806 - * if hint left us with no space for the requested
23807 - * mapping then try again:
23808 - */
23809 - if (first_time) {
23810 - mm->free_area_cache = base;
23811 - largest_hole = 0;
23812 - first_time = 0;
23813 - goto try_again;
23814 - }
23815 - /*
23816 * A failed mmap() very likely causes application failure,
23817 * so fall back to the bottom-up function here. This scenario
23818 * can happen with large stack limits and large mmap()
23819 * allocations.
23820 */
23821 - mm->free_area_cache = TASK_UNMAPPED_BASE;
23822 +
23823 +#ifdef CONFIG_PAX_SEGMEXEC
23824 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23825 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
23826 + else
23827 +#endif
23828 +
23829 + mm->mmap_base = TASK_UNMAPPED_BASE;
23830 +
23831 +#ifdef CONFIG_PAX_RANDMMAP
23832 + if (mm->pax_flags & MF_PAX_RANDMMAP)
23833 + mm->mmap_base += mm->delta_mmap;
23834 +#endif
23835 +
23836 + mm->free_area_cache = mm->mmap_base;
23837 mm->cached_hole_size = ~0UL;
23838 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
23839 len, pgoff, flags);
23840 @@ -386,6 +392,7 @@ fail:
23841 /*
23842 * Restore the topdown base:
23843 */
23844 + mm->mmap_base = base;
23845 mm->free_area_cache = base;
23846 mm->cached_hole_size = ~0UL;
23847
23848 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
23849 struct hstate *h = hstate_file(file);
23850 struct mm_struct *mm = current->mm;
23851 struct vm_area_struct *vma;
23852 + unsigned long pax_task_size = TASK_SIZE;
23853
23854 if (len & ~huge_page_mask(h))
23855 return -EINVAL;
23856 - if (len > TASK_SIZE)
23857 +
23858 +#ifdef CONFIG_PAX_SEGMEXEC
23859 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23860 + pax_task_size = SEGMEXEC_TASK_SIZE;
23861 +#endif
23862 +
23863 + pax_task_size -= PAGE_SIZE;
23864 +
23865 + if (len > pax_task_size)
23866 return -ENOMEM;
23867
23868 if (flags & MAP_FIXED) {
23869 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
23870 if (addr) {
23871 addr = ALIGN(addr, huge_page_size(h));
23872 vma = find_vma(mm, addr);
23873 - if (TASK_SIZE - len >= addr &&
23874 - (!vma || addr + len <= vma->vm_start))
23875 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
23876 return addr;
23877 }
23878 if (mm->get_unmapped_area == arch_get_unmapped_area)
23879 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
23880 index 87488b9..399f416 100644
23881 --- a/arch/x86/mm/init.c
23882 +++ b/arch/x86/mm/init.c
23883 @@ -15,6 +15,7 @@
23884 #include <asm/tlbflush.h>
23885 #include <asm/tlb.h>
23886 #include <asm/proto.h>
23887 +#include <asm/desc.h>
23888
23889 unsigned long __initdata pgt_buf_start;
23890 unsigned long __meminitdata pgt_buf_end;
23891 @@ -31,7 +32,7 @@ int direct_gbpages
23892 static void __init find_early_table_space(unsigned long end, int use_pse,
23893 int use_gbpages)
23894 {
23895 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
23896 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
23897 phys_addr_t base;
23898
23899 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
23900 @@ -312,8 +313,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
23901 */
23902 int devmem_is_allowed(unsigned long pagenr)
23903 {
23904 +#ifdef CONFIG_GRKERNSEC_KMEM
23905 + /* allow BDA */
23906 + if (!pagenr)
23907 + return 1;
23908 + /* allow EBDA */
23909 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
23910 + return 1;
23911 +#else
23912 + if (!pagenr)
23913 + return 1;
23914 +#ifdef CONFIG_VM86
23915 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
23916 + return 1;
23917 +#endif
23918 +#endif
23919 +
23920 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
23921 + return 1;
23922 +#ifdef CONFIG_GRKERNSEC_KMEM
23923 + /* throw out everything else below 1MB */
23924 if (pagenr <= 256)
23925 - return 1;
23926 + return 0;
23927 +#endif
23928 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
23929 return 0;
23930 if (!page_is_ram(pagenr))
23931 @@ -372,6 +394,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
23932
23933 void free_initmem(void)
23934 {
23935 +
23936 +#ifdef CONFIG_PAX_KERNEXEC
23937 +#ifdef CONFIG_X86_32
23938 + /* PaX: limit KERNEL_CS to actual size */
23939 + unsigned long addr, limit;
23940 + struct desc_struct d;
23941 + int cpu;
23942 +
23943 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
23944 + limit = (limit - 1UL) >> PAGE_SHIFT;
23945 +
23946 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
23947 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
23948 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
23949 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
23950 + }
23951 +
23952 + /* PaX: make KERNEL_CS read-only */
23953 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
23954 + if (!paravirt_enabled())
23955 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
23956 +/*
23957 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
23958 + pgd = pgd_offset_k(addr);
23959 + pud = pud_offset(pgd, addr);
23960 + pmd = pmd_offset(pud, addr);
23961 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
23962 + }
23963 +*/
23964 +#ifdef CONFIG_X86_PAE
23965 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
23966 +/*
23967 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
23968 + pgd = pgd_offset_k(addr);
23969 + pud = pud_offset(pgd, addr);
23970 + pmd = pmd_offset(pud, addr);
23971 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
23972 + }
23973 +*/
23974 +#endif
23975 +
23976 +#ifdef CONFIG_MODULES
23977 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
23978 +#endif
23979 +
23980 +#else
23981 + pgd_t *pgd;
23982 + pud_t *pud;
23983 + pmd_t *pmd;
23984 + unsigned long addr, end;
23985 +
23986 + /* PaX: make kernel code/rodata read-only, rest non-executable */
23987 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
23988 + pgd = pgd_offset_k(addr);
23989 + pud = pud_offset(pgd, addr);
23990 + pmd = pmd_offset(pud, addr);
23991 + if (!pmd_present(*pmd))
23992 + continue;
23993 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
23994 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
23995 + else
23996 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
23997 + }
23998 +
23999 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24000 + end = addr + KERNEL_IMAGE_SIZE;
24001 + for (; addr < end; addr += PMD_SIZE) {
24002 + pgd = pgd_offset_k(addr);
24003 + pud = pud_offset(pgd, addr);
24004 + pmd = pmd_offset(pud, addr);
24005 + if (!pmd_present(*pmd))
24006 + continue;
24007 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24008 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24009 + }
24010 +#endif
24011 +
24012 + flush_tlb_all();
24013 +#endif
24014 +
24015 free_init_pages("unused kernel memory",
24016 (unsigned long)(&__init_begin),
24017 (unsigned long)(&__init_end));
24018 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24019 index 29f7c6d..b46b35b 100644
24020 --- a/arch/x86/mm/init_32.c
24021 +++ b/arch/x86/mm/init_32.c
24022 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
24023 }
24024
24025 /*
24026 - * Creates a middle page table and puts a pointer to it in the
24027 - * given global directory entry. This only returns the gd entry
24028 - * in non-PAE compilation mode, since the middle layer is folded.
24029 - */
24030 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
24031 -{
24032 - pud_t *pud;
24033 - pmd_t *pmd_table;
24034 -
24035 -#ifdef CONFIG_X86_PAE
24036 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24037 - if (after_bootmem)
24038 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24039 - else
24040 - pmd_table = (pmd_t *)alloc_low_page();
24041 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24042 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24043 - pud = pud_offset(pgd, 0);
24044 - BUG_ON(pmd_table != pmd_offset(pud, 0));
24045 -
24046 - return pmd_table;
24047 - }
24048 -#endif
24049 - pud = pud_offset(pgd, 0);
24050 - pmd_table = pmd_offset(pud, 0);
24051 -
24052 - return pmd_table;
24053 -}
24054 -
24055 -/*
24056 * Create a page table and place a pointer to it in a middle page
24057 * directory entry:
24058 */
24059 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24060 page_table = (pte_t *)alloc_low_page();
24061
24062 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24063 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24064 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24065 +#else
24066 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24067 +#endif
24068 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24069 }
24070
24071 return pte_offset_kernel(pmd, 0);
24072 }
24073
24074 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
24075 +{
24076 + pud_t *pud;
24077 + pmd_t *pmd_table;
24078 +
24079 + pud = pud_offset(pgd, 0);
24080 + pmd_table = pmd_offset(pud, 0);
24081 +
24082 + return pmd_table;
24083 +}
24084 +
24085 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24086 {
24087 int pgd_idx = pgd_index(vaddr);
24088 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24089 int pgd_idx, pmd_idx;
24090 unsigned long vaddr;
24091 pgd_t *pgd;
24092 + pud_t *pud;
24093 pmd_t *pmd;
24094 pte_t *pte = NULL;
24095
24096 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24097 pgd = pgd_base + pgd_idx;
24098
24099 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24100 - pmd = one_md_table_init(pgd);
24101 - pmd = pmd + pmd_index(vaddr);
24102 + pud = pud_offset(pgd, vaddr);
24103 + pmd = pmd_offset(pud, vaddr);
24104 +
24105 +#ifdef CONFIG_X86_PAE
24106 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24107 +#endif
24108 +
24109 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24110 pmd++, pmd_idx++) {
24111 pte = page_table_kmap_check(one_page_table_init(pmd),
24112 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24113 }
24114 }
24115
24116 -static inline int is_kernel_text(unsigned long addr)
24117 +static inline int is_kernel_text(unsigned long start, unsigned long end)
24118 {
24119 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
24120 - return 1;
24121 - return 0;
24122 + if ((start > ktla_ktva((unsigned long)_etext) ||
24123 + end <= ktla_ktva((unsigned long)_stext)) &&
24124 + (start > ktla_ktva((unsigned long)_einittext) ||
24125 + end <= ktla_ktva((unsigned long)_sinittext)) &&
24126 +
24127 +#ifdef CONFIG_ACPI_SLEEP
24128 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24129 +#endif
24130 +
24131 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24132 + return 0;
24133 + return 1;
24134 }
24135
24136 /*
24137 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
24138 unsigned long last_map_addr = end;
24139 unsigned long start_pfn, end_pfn;
24140 pgd_t *pgd_base = swapper_pg_dir;
24141 - int pgd_idx, pmd_idx, pte_ofs;
24142 + unsigned int pgd_idx, pmd_idx, pte_ofs;
24143 unsigned long pfn;
24144 pgd_t *pgd;
24145 + pud_t *pud;
24146 pmd_t *pmd;
24147 pte_t *pte;
24148 unsigned pages_2m, pages_4k;
24149 @@ -281,8 +282,13 @@ repeat:
24150 pfn = start_pfn;
24151 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24152 pgd = pgd_base + pgd_idx;
24153 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
24154 - pmd = one_md_table_init(pgd);
24155 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
24156 + pud = pud_offset(pgd, 0);
24157 + pmd = pmd_offset(pud, 0);
24158 +
24159 +#ifdef CONFIG_X86_PAE
24160 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24161 +#endif
24162
24163 if (pfn >= end_pfn)
24164 continue;
24165 @@ -294,14 +300,13 @@ repeat:
24166 #endif
24167 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
24168 pmd++, pmd_idx++) {
24169 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
24170 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
24171
24172 /*
24173 * Map with big pages if possible, otherwise
24174 * create normal page tables:
24175 */
24176 if (use_pse) {
24177 - unsigned int addr2;
24178 pgprot_t prot = PAGE_KERNEL_LARGE;
24179 /*
24180 * first pass will use the same initial
24181 @@ -311,11 +316,7 @@ repeat:
24182 __pgprot(PTE_IDENT_ATTR |
24183 _PAGE_PSE);
24184
24185 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
24186 - PAGE_OFFSET + PAGE_SIZE-1;
24187 -
24188 - if (is_kernel_text(addr) ||
24189 - is_kernel_text(addr2))
24190 + if (is_kernel_text(address, address + PMD_SIZE))
24191 prot = PAGE_KERNEL_LARGE_EXEC;
24192
24193 pages_2m++;
24194 @@ -332,7 +333,7 @@ repeat:
24195 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24196 pte += pte_ofs;
24197 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
24198 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
24199 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
24200 pgprot_t prot = PAGE_KERNEL;
24201 /*
24202 * first pass will use the same initial
24203 @@ -340,7 +341,7 @@ repeat:
24204 */
24205 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
24206
24207 - if (is_kernel_text(addr))
24208 + if (is_kernel_text(address, address + PAGE_SIZE))
24209 prot = PAGE_KERNEL_EXEC;
24210
24211 pages_4k++;
24212 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
24213
24214 pud = pud_offset(pgd, va);
24215 pmd = pmd_offset(pud, va);
24216 - if (!pmd_present(*pmd))
24217 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
24218 break;
24219
24220 pte = pte_offset_kernel(pmd, va);
24221 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_range_init(void)
24222
24223 static void __init pagetable_init(void)
24224 {
24225 - pgd_t *pgd_base = swapper_pg_dir;
24226 -
24227 - permanent_kmaps_init(pgd_base);
24228 + permanent_kmaps_init(swapper_pg_dir);
24229 }
24230
24231 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24232 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24233 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24234
24235 /* user-defined highmem size */
24236 @@ -757,6 +756,12 @@ void __init mem_init(void)
24237
24238 pci_iommu_alloc();
24239
24240 +#ifdef CONFIG_PAX_PER_CPU_PGD
24241 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24242 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24243 + KERNEL_PGD_PTRS);
24244 +#endif
24245 +
24246 #ifdef CONFIG_FLATMEM
24247 BUG_ON(!mem_map);
24248 #endif
24249 @@ -774,7 +779,7 @@ void __init mem_init(void)
24250 set_highmem_pages_init();
24251
24252 codesize = (unsigned long) &_etext - (unsigned long) &_text;
24253 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
24254 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
24255 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
24256
24257 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
24258 @@ -815,10 +820,10 @@ void __init mem_init(void)
24259 ((unsigned long)&__init_end -
24260 (unsigned long)&__init_begin) >> 10,
24261
24262 - (unsigned long)&_etext, (unsigned long)&_edata,
24263 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
24264 + (unsigned long)&_sdata, (unsigned long)&_edata,
24265 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
24266
24267 - (unsigned long)&_text, (unsigned long)&_etext,
24268 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
24269 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
24270
24271 /*
24272 @@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
24273 if (!kernel_set_to_readonly)
24274 return;
24275
24276 + start = ktla_ktva(start);
24277 pr_debug("Set kernel text: %lx - %lx for read write\n",
24278 start, start+size);
24279
24280 @@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
24281 if (!kernel_set_to_readonly)
24282 return;
24283
24284 + start = ktla_ktva(start);
24285 pr_debug("Set kernel text: %lx - %lx for read only\n",
24286 start, start+size);
24287
24288 @@ -938,6 +945,7 @@ void mark_rodata_ro(void)
24289 unsigned long start = PFN_ALIGN(_text);
24290 unsigned long size = PFN_ALIGN(_etext) - start;
24291
24292 + start = ktla_ktva(start);
24293 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
24294 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
24295 size >> 10);
24296 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
24297 index bbaaa00..796fa65 100644
24298 --- a/arch/x86/mm/init_64.c
24299 +++ b/arch/x86/mm/init_64.c
24300 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
24301 * around without checking the pgd every time.
24302 */
24303
24304 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
24305 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
24306 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24307
24308 int force_personality32;
24309 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24310
24311 for (address = start; address <= end; address += PGDIR_SIZE) {
24312 const pgd_t *pgd_ref = pgd_offset_k(address);
24313 +
24314 +#ifdef CONFIG_PAX_PER_CPU_PGD
24315 + unsigned long cpu;
24316 +#else
24317 struct page *page;
24318 +#endif
24319
24320 if (pgd_none(*pgd_ref))
24321 continue;
24322
24323 spin_lock(&pgd_lock);
24324 +
24325 +#ifdef CONFIG_PAX_PER_CPU_PGD
24326 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24327 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
24328 +#else
24329 list_for_each_entry(page, &pgd_list, lru) {
24330 pgd_t *pgd;
24331 spinlock_t *pgt_lock;
24332 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24333 /* the pgt_lock only for Xen */
24334 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
24335 spin_lock(pgt_lock);
24336 +#endif
24337
24338 if (pgd_none(*pgd))
24339 set_pgd(pgd, *pgd_ref);
24340 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24341 BUG_ON(pgd_page_vaddr(*pgd)
24342 != pgd_page_vaddr(*pgd_ref));
24343
24344 +#ifndef CONFIG_PAX_PER_CPU_PGD
24345 spin_unlock(pgt_lock);
24346 +#endif
24347 +
24348 }
24349 spin_unlock(&pgd_lock);
24350 }
24351 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
24352 pmd = fill_pmd(pud, vaddr);
24353 pte = fill_pte(pmd, vaddr);
24354
24355 + pax_open_kernel();
24356 set_pte(pte, new_pte);
24357 + pax_close_kernel();
24358
24359 /*
24360 * It's enough to flush this one mapping.
24361 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
24362 pgd = pgd_offset_k((unsigned long)__va(phys));
24363 if (pgd_none(*pgd)) {
24364 pud = (pud_t *) spp_getpage();
24365 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
24366 - _PAGE_USER));
24367 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
24368 }
24369 pud = pud_offset(pgd, (unsigned long)__va(phys));
24370 if (pud_none(*pud)) {
24371 pmd = (pmd_t *) spp_getpage();
24372 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
24373 - _PAGE_USER));
24374 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
24375 }
24376 pmd = pmd_offset(pud, phys);
24377 BUG_ON(!pmd_none(*pmd));
24378 @@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
24379 if (pfn >= pgt_buf_top)
24380 panic("alloc_low_page: ran out of memory");
24381
24382 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
24383 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
24384 clear_page(adr);
24385 *phys = pfn * PAGE_SIZE;
24386 return adr;
24387 @@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
24388
24389 phys = __pa(virt);
24390 left = phys & (PAGE_SIZE - 1);
24391 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
24392 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
24393 adr = (void *)(((unsigned long)adr) | left);
24394
24395 return adr;
24396 @@ -693,6 +707,12 @@ void __init mem_init(void)
24397
24398 pci_iommu_alloc();
24399
24400 +#ifdef CONFIG_PAX_PER_CPU_PGD
24401 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24402 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24403 + KERNEL_PGD_PTRS);
24404 +#endif
24405 +
24406 /* clear_bss() already clear the empty_zero_page */
24407
24408 reservedpages = 0;
24409 @@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
24410 static struct vm_area_struct gate_vma = {
24411 .vm_start = VSYSCALL_START,
24412 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
24413 - .vm_page_prot = PAGE_READONLY_EXEC,
24414 - .vm_flags = VM_READ | VM_EXEC
24415 + .vm_page_prot = PAGE_READONLY,
24416 + .vm_flags = VM_READ
24417 };
24418
24419 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
24420 @@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long addr)
24421
24422 const char *arch_vma_name(struct vm_area_struct *vma)
24423 {
24424 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24425 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24426 return "[vdso]";
24427 if (vma == &gate_vma)
24428 return "[vsyscall]";
24429 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
24430 index 7b179b4..6bd1777 100644
24431 --- a/arch/x86/mm/iomap_32.c
24432 +++ b/arch/x86/mm/iomap_32.c
24433 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
24434 type = kmap_atomic_idx_push();
24435 idx = type + KM_TYPE_NR * smp_processor_id();
24436 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24437 +
24438 + pax_open_kernel();
24439 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
24440 + pax_close_kernel();
24441 +
24442 arch_flush_lazy_mmu_mode();
24443
24444 return (void *)vaddr;
24445 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
24446 index be1ef57..55f0160 100644
24447 --- a/arch/x86/mm/ioremap.c
24448 +++ b/arch/x86/mm/ioremap.c
24449 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
24450 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
24451 int is_ram = page_is_ram(pfn);
24452
24453 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
24454 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
24455 return NULL;
24456 WARN_ON_ONCE(is_ram);
24457 }
24458 @@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
24459
24460 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
24461 if (page_is_ram(start >> PAGE_SHIFT))
24462 +#ifdef CONFIG_HIGHMEM
24463 + if ((start >> PAGE_SHIFT) < max_low_pfn)
24464 +#endif
24465 return __va(phys);
24466
24467 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
24468 @@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
24469 early_param("early_ioremap_debug", early_ioremap_debug_setup);
24470
24471 static __initdata int after_paging_init;
24472 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
24473 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
24474
24475 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
24476 {
24477 @@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
24478 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
24479
24480 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
24481 - memset(bm_pte, 0, sizeof(bm_pte));
24482 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
24483 + pmd_populate_user(&init_mm, pmd, bm_pte);
24484
24485 /*
24486 * The boot-ioremap range spans multiple pmds, for which
24487 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
24488 index d87dd6d..bf3fa66 100644
24489 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
24490 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
24491 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
24492 * memory (e.g. tracked pages)? For now, we need this to avoid
24493 * invoking kmemcheck for PnP BIOS calls.
24494 */
24495 - if (regs->flags & X86_VM_MASK)
24496 + if (v8086_mode(regs))
24497 return false;
24498 - if (regs->cs != __KERNEL_CS)
24499 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
24500 return false;
24501
24502 pte = kmemcheck_pte_lookup(address);
24503 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
24504 index 845df68..1d8d29f 100644
24505 --- a/arch/x86/mm/mmap.c
24506 +++ b/arch/x86/mm/mmap.c
24507 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
24508 * Leave an at least ~128 MB hole with possible stack randomization.
24509 */
24510 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
24511 -#define MAX_GAP (TASK_SIZE/6*5)
24512 +#define MAX_GAP (pax_task_size/6*5)
24513
24514 static int mmap_is_legacy(void)
24515 {
24516 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
24517 return rnd << PAGE_SHIFT;
24518 }
24519
24520 -static unsigned long mmap_base(void)
24521 +static unsigned long mmap_base(struct mm_struct *mm)
24522 {
24523 unsigned long gap = rlimit(RLIMIT_STACK);
24524 + unsigned long pax_task_size = TASK_SIZE;
24525 +
24526 +#ifdef CONFIG_PAX_SEGMEXEC
24527 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24528 + pax_task_size = SEGMEXEC_TASK_SIZE;
24529 +#endif
24530
24531 if (gap < MIN_GAP)
24532 gap = MIN_GAP;
24533 else if (gap > MAX_GAP)
24534 gap = MAX_GAP;
24535
24536 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
24537 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
24538 }
24539
24540 /*
24541 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
24542 * does, but not when emulating X86_32
24543 */
24544 -static unsigned long mmap_legacy_base(void)
24545 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
24546 {
24547 - if (mmap_is_ia32())
24548 + if (mmap_is_ia32()) {
24549 +
24550 +#ifdef CONFIG_PAX_SEGMEXEC
24551 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24552 + return SEGMEXEC_TASK_UNMAPPED_BASE;
24553 + else
24554 +#endif
24555 +
24556 return TASK_UNMAPPED_BASE;
24557 - else
24558 + } else
24559 return TASK_UNMAPPED_BASE + mmap_rnd();
24560 }
24561
24562 @@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
24563 void arch_pick_mmap_layout(struct mm_struct *mm)
24564 {
24565 if (mmap_is_legacy()) {
24566 - mm->mmap_base = mmap_legacy_base();
24567 + mm->mmap_base = mmap_legacy_base(mm);
24568 +
24569 +#ifdef CONFIG_PAX_RANDMMAP
24570 + if (mm->pax_flags & MF_PAX_RANDMMAP)
24571 + mm->mmap_base += mm->delta_mmap;
24572 +#endif
24573 +
24574 mm->get_unmapped_area = arch_get_unmapped_area;
24575 mm->unmap_area = arch_unmap_area;
24576 } else {
24577 - mm->mmap_base = mmap_base();
24578 + mm->mmap_base = mmap_base(mm);
24579 +
24580 +#ifdef CONFIG_PAX_RANDMMAP
24581 + if (mm->pax_flags & MF_PAX_RANDMMAP)
24582 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
24583 +#endif
24584 +
24585 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
24586 mm->unmap_area = arch_unmap_area_topdown;
24587 }
24588 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
24589 index de54b9b..799051e 100644
24590 --- a/arch/x86/mm/mmio-mod.c
24591 +++ b/arch/x86/mm/mmio-mod.c
24592 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
24593 break;
24594 default:
24595 {
24596 - unsigned char *ip = (unsigned char *)instptr;
24597 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
24598 my_trace->opcode = MMIO_UNKNOWN_OP;
24599 my_trace->width = 0;
24600 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
24601 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
24602 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
24603 void __iomem *addr)
24604 {
24605 - static atomic_t next_id;
24606 + static atomic_unchecked_t next_id;
24607 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
24608 /* These are page-unaligned. */
24609 struct mmiotrace_map map = {
24610 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
24611 .private = trace
24612 },
24613 .phys = offset,
24614 - .id = atomic_inc_return(&next_id)
24615 + .id = atomic_inc_return_unchecked(&next_id)
24616 };
24617 map.map_id = trace->id;
24618
24619 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
24620 index b008656..773eac2 100644
24621 --- a/arch/x86/mm/pageattr-test.c
24622 +++ b/arch/x86/mm/pageattr-test.c
24623 @@ -36,7 +36,7 @@ enum {
24624
24625 static int pte_testbit(pte_t pte)
24626 {
24627 - return pte_flags(pte) & _PAGE_UNUSED1;
24628 + return pte_flags(pte) & _PAGE_CPA_TEST;
24629 }
24630
24631 struct split_state {
24632 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
24633 index f9e5267..77b1a40 100644
24634 --- a/arch/x86/mm/pageattr.c
24635 +++ b/arch/x86/mm/pageattr.c
24636 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
24637 */
24638 #ifdef CONFIG_PCI_BIOS
24639 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
24640 - pgprot_val(forbidden) |= _PAGE_NX;
24641 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
24642 #endif
24643
24644 /*
24645 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
24646 * Does not cover __inittext since that is gone later on. On
24647 * 64bit we do not enforce !NX on the low mapping
24648 */
24649 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
24650 - pgprot_val(forbidden) |= _PAGE_NX;
24651 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
24652 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
24653
24654 +#ifdef CONFIG_DEBUG_RODATA
24655 /*
24656 * The .rodata section needs to be read-only. Using the pfn
24657 * catches all aliases.
24658 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
24659 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
24660 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
24661 pgprot_val(forbidden) |= _PAGE_RW;
24662 +#endif
24663
24664 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
24665 /*
24666 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
24667 }
24668 #endif
24669
24670 +#ifdef CONFIG_PAX_KERNEXEC
24671 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
24672 + pgprot_val(forbidden) |= _PAGE_RW;
24673 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
24674 + }
24675 +#endif
24676 +
24677 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
24678
24679 return prot;
24680 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
24681 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
24682 {
24683 /* change init_mm */
24684 + pax_open_kernel();
24685 set_pte_atomic(kpte, pte);
24686 +
24687 #ifdef CONFIG_X86_32
24688 if (!SHARED_KERNEL_PMD) {
24689 +
24690 +#ifdef CONFIG_PAX_PER_CPU_PGD
24691 + unsigned long cpu;
24692 +#else
24693 struct page *page;
24694 +#endif
24695
24696 +#ifdef CONFIG_PAX_PER_CPU_PGD
24697 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24698 + pgd_t *pgd = get_cpu_pgd(cpu);
24699 +#else
24700 list_for_each_entry(page, &pgd_list, lru) {
24701 - pgd_t *pgd;
24702 + pgd_t *pgd = (pgd_t *)page_address(page);
24703 +#endif
24704 +
24705 pud_t *pud;
24706 pmd_t *pmd;
24707
24708 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
24709 + pgd += pgd_index(address);
24710 pud = pud_offset(pgd, address);
24711 pmd = pmd_offset(pud, address);
24712 set_pte_atomic((pte_t *)pmd, pte);
24713 }
24714 }
24715 #endif
24716 + pax_close_kernel();
24717 }
24718
24719 static int
24720 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
24721 index f6ff57b..481690f 100644
24722 --- a/arch/x86/mm/pat.c
24723 +++ b/arch/x86/mm/pat.c
24724 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
24725
24726 if (!entry) {
24727 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
24728 - current->comm, current->pid, start, end);
24729 + current->comm, task_pid_nr(current), start, end);
24730 return -EINVAL;
24731 }
24732
24733 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
24734 while (cursor < to) {
24735 if (!devmem_is_allowed(pfn)) {
24736 printk(KERN_INFO
24737 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
24738 - current->comm, from, to);
24739 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
24740 + current->comm, from, to, cursor);
24741 return 0;
24742 }
24743 cursor += PAGE_SIZE;
24744 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
24745 printk(KERN_INFO
24746 "%s:%d ioremap_change_attr failed %s "
24747 "for %Lx-%Lx\n",
24748 - current->comm, current->pid,
24749 + current->comm, task_pid_nr(current),
24750 cattr_name(flags),
24751 base, (unsigned long long)(base + size));
24752 return -EINVAL;
24753 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
24754 if (want_flags != flags) {
24755 printk(KERN_WARNING
24756 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
24757 - current->comm, current->pid,
24758 + current->comm, task_pid_nr(current),
24759 cattr_name(want_flags),
24760 (unsigned long long)paddr,
24761 (unsigned long long)(paddr + size),
24762 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
24763 free_memtype(paddr, paddr + size);
24764 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
24765 " for %Lx-%Lx, got %s\n",
24766 - current->comm, current->pid,
24767 + current->comm, task_pid_nr(current),
24768 cattr_name(want_flags),
24769 (unsigned long long)paddr,
24770 (unsigned long long)(paddr + size),
24771 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
24772 index 9f0614d..92ae64a 100644
24773 --- a/arch/x86/mm/pf_in.c
24774 +++ b/arch/x86/mm/pf_in.c
24775 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
24776 int i;
24777 enum reason_type rv = OTHERS;
24778
24779 - p = (unsigned char *)ins_addr;
24780 + p = (unsigned char *)ktla_ktva(ins_addr);
24781 p += skip_prefix(p, &prf);
24782 p += get_opcode(p, &opcode);
24783
24784 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
24785 struct prefix_bits prf;
24786 int i;
24787
24788 - p = (unsigned char *)ins_addr;
24789 + p = (unsigned char *)ktla_ktva(ins_addr);
24790 p += skip_prefix(p, &prf);
24791 p += get_opcode(p, &opcode);
24792
24793 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
24794 struct prefix_bits prf;
24795 int i;
24796
24797 - p = (unsigned char *)ins_addr;
24798 + p = (unsigned char *)ktla_ktva(ins_addr);
24799 p += skip_prefix(p, &prf);
24800 p += get_opcode(p, &opcode);
24801
24802 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
24803 struct prefix_bits prf;
24804 int i;
24805
24806 - p = (unsigned char *)ins_addr;
24807 + p = (unsigned char *)ktla_ktva(ins_addr);
24808 p += skip_prefix(p, &prf);
24809 p += get_opcode(p, &opcode);
24810 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
24811 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
24812 struct prefix_bits prf;
24813 int i;
24814
24815 - p = (unsigned char *)ins_addr;
24816 + p = (unsigned char *)ktla_ktva(ins_addr);
24817 p += skip_prefix(p, &prf);
24818 p += get_opcode(p, &opcode);
24819 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
24820 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
24821 index 8573b83..c3b1a30 100644
24822 --- a/arch/x86/mm/pgtable.c
24823 +++ b/arch/x86/mm/pgtable.c
24824 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd)
24825 list_del(&page->lru);
24826 }
24827
24828 -#define UNSHARED_PTRS_PER_PGD \
24829 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
24830 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24831 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
24832
24833 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
24834 +{
24835 + while (count--)
24836 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
24837 +}
24838 +#endif
24839
24840 +#ifdef CONFIG_PAX_PER_CPU_PGD
24841 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
24842 +{
24843 + while (count--)
24844 +
24845 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24846 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
24847 +#else
24848 + *dst++ = *src++;
24849 +#endif
24850 +
24851 +}
24852 +#endif
24853 +
24854 +#ifdef CONFIG_X86_64
24855 +#define pxd_t pud_t
24856 +#define pyd_t pgd_t
24857 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
24858 +#define pxd_free(mm, pud) pud_free((mm), (pud))
24859 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
24860 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
24861 +#define PYD_SIZE PGDIR_SIZE
24862 +#else
24863 +#define pxd_t pmd_t
24864 +#define pyd_t pud_t
24865 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
24866 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
24867 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
24868 +#define pyd_offset(mm, address) pud_offset((mm), (address))
24869 +#define PYD_SIZE PUD_SIZE
24870 +#endif
24871 +
24872 +#ifdef CONFIG_PAX_PER_CPU_PGD
24873 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
24874 +static inline void pgd_dtor(pgd_t *pgd) {}
24875 +#else
24876 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
24877 {
24878 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
24879 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
24880 pgd_list_del(pgd);
24881 spin_unlock(&pgd_lock);
24882 }
24883 +#endif
24884
24885 /*
24886 * List of all pgd's needed for non-PAE so it can invalidate entries
24887 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
24888 * -- wli
24889 */
24890
24891 -#ifdef CONFIG_X86_PAE
24892 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
24893 /*
24894 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
24895 * updating the top-level pagetable entries to guarantee the
24896 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
24897 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
24898 * and initialize the kernel pmds here.
24899 */
24900 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
24901 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
24902
24903 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
24904 {
24905 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
24906 */
24907 flush_tlb_mm(mm);
24908 }
24909 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
24910 +#define PREALLOCATED_PXDS USER_PGD_PTRS
24911 #else /* !CONFIG_X86_PAE */
24912
24913 /* No need to prepopulate any pagetable entries in non-PAE modes. */
24914 -#define PREALLOCATED_PMDS 0
24915 +#define PREALLOCATED_PXDS 0
24916
24917 #endif /* CONFIG_X86_PAE */
24918
24919 -static void free_pmds(pmd_t *pmds[])
24920 +static void free_pxds(pxd_t *pxds[])
24921 {
24922 int i;
24923
24924 - for(i = 0; i < PREALLOCATED_PMDS; i++)
24925 - if (pmds[i])
24926 - free_page((unsigned long)pmds[i]);
24927 + for(i = 0; i < PREALLOCATED_PXDS; i++)
24928 + if (pxds[i])
24929 + free_page((unsigned long)pxds[i]);
24930 }
24931
24932 -static int preallocate_pmds(pmd_t *pmds[])
24933 +static int preallocate_pxds(pxd_t *pxds[])
24934 {
24935 int i;
24936 bool failed = false;
24937
24938 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
24939 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
24940 - if (pmd == NULL)
24941 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
24942 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
24943 + if (pxd == NULL)
24944 failed = true;
24945 - pmds[i] = pmd;
24946 + pxds[i] = pxd;
24947 }
24948
24949 if (failed) {
24950 - free_pmds(pmds);
24951 + free_pxds(pxds);
24952 return -ENOMEM;
24953 }
24954
24955 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[])
24956 * preallocate which never got a corresponding vma will need to be
24957 * freed manually.
24958 */
24959 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
24960 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
24961 {
24962 int i;
24963
24964 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
24965 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
24966 pgd_t pgd = pgdp[i];
24967
24968 if (pgd_val(pgd) != 0) {
24969 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
24970 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
24971
24972 - pgdp[i] = native_make_pgd(0);
24973 + set_pgd(pgdp + i, native_make_pgd(0));
24974
24975 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
24976 - pmd_free(mm, pmd);
24977 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
24978 + pxd_free(mm, pxd);
24979 }
24980 }
24981 }
24982
24983 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
24984 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
24985 {
24986 - pud_t *pud;
24987 + pyd_t *pyd;
24988 unsigned long addr;
24989 int i;
24990
24991 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
24992 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
24993 return;
24994
24995 - pud = pud_offset(pgd, 0);
24996 +#ifdef CONFIG_X86_64
24997 + pyd = pyd_offset(mm, 0L);
24998 +#else
24999 + pyd = pyd_offset(pgd, 0L);
25000 +#endif
25001
25002 - for (addr = i = 0; i < PREALLOCATED_PMDS;
25003 - i++, pud++, addr += PUD_SIZE) {
25004 - pmd_t *pmd = pmds[i];
25005 + for (addr = i = 0; i < PREALLOCATED_PXDS;
25006 + i++, pyd++, addr += PYD_SIZE) {
25007 + pxd_t *pxd = pxds[i];
25008
25009 if (i >= KERNEL_PGD_BOUNDARY)
25010 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25011 - sizeof(pmd_t) * PTRS_PER_PMD);
25012 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25013 + sizeof(pxd_t) * PTRS_PER_PMD);
25014
25015 - pud_populate(mm, pud, pmd);
25016 + pyd_populate(mm, pyd, pxd);
25017 }
25018 }
25019
25020 pgd_t *pgd_alloc(struct mm_struct *mm)
25021 {
25022 pgd_t *pgd;
25023 - pmd_t *pmds[PREALLOCATED_PMDS];
25024 + pxd_t *pxds[PREALLOCATED_PXDS];
25025
25026 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25027
25028 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25029
25030 mm->pgd = pgd;
25031
25032 - if (preallocate_pmds(pmds) != 0)
25033 + if (preallocate_pxds(pxds) != 0)
25034 goto out_free_pgd;
25035
25036 if (paravirt_pgd_alloc(mm) != 0)
25037 - goto out_free_pmds;
25038 + goto out_free_pxds;
25039
25040 /*
25041 * Make sure that pre-populating the pmds is atomic with
25042 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25043 spin_lock(&pgd_lock);
25044
25045 pgd_ctor(mm, pgd);
25046 - pgd_prepopulate_pmd(mm, pgd, pmds);
25047 + pgd_prepopulate_pxd(mm, pgd, pxds);
25048
25049 spin_unlock(&pgd_lock);
25050
25051 return pgd;
25052
25053 -out_free_pmds:
25054 - free_pmds(pmds);
25055 +out_free_pxds:
25056 + free_pxds(pxds);
25057 out_free_pgd:
25058 free_page((unsigned long)pgd);
25059 out:
25060 @@ -295,7 +344,7 @@ out:
25061
25062 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25063 {
25064 - pgd_mop_up_pmds(mm, pgd);
25065 + pgd_mop_up_pxds(mm, pgd);
25066 pgd_dtor(pgd);
25067 paravirt_pgd_free(mm, pgd);
25068 free_page((unsigned long)pgd);
25069 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25070 index cac7184..09a39fa 100644
25071 --- a/arch/x86/mm/pgtable_32.c
25072 +++ b/arch/x86/mm/pgtable_32.c
25073 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25074 return;
25075 }
25076 pte = pte_offset_kernel(pmd, vaddr);
25077 +
25078 + pax_open_kernel();
25079 if (pte_val(pteval))
25080 set_pte_at(&init_mm, vaddr, pte, pteval);
25081 else
25082 pte_clear(&init_mm, vaddr, pte);
25083 + pax_close_kernel();
25084
25085 /*
25086 * It's enough to flush this one mapping.
25087 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25088 index 410531d..0f16030 100644
25089 --- a/arch/x86/mm/setup_nx.c
25090 +++ b/arch/x86/mm/setup_nx.c
25091 @@ -5,8 +5,10 @@
25092 #include <asm/pgtable.h>
25093 #include <asm/proto.h>
25094
25095 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25096 static int disable_nx __cpuinitdata;
25097
25098 +#ifndef CONFIG_PAX_PAGEEXEC
25099 /*
25100 * noexec = on|off
25101 *
25102 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
25103 return 0;
25104 }
25105 early_param("noexec", noexec_setup);
25106 +#endif
25107 +
25108 +#endif
25109
25110 void __cpuinit x86_configure_nx(void)
25111 {
25112 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25113 if (cpu_has_nx && !disable_nx)
25114 __supported_pte_mask |= _PAGE_NX;
25115 else
25116 +#endif
25117 __supported_pte_mask &= ~_PAGE_NX;
25118 }
25119
25120 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
25121 index d6c0418..06a0ad5 100644
25122 --- a/arch/x86/mm/tlb.c
25123 +++ b/arch/x86/mm/tlb.c
25124 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
25125 BUG();
25126 cpumask_clear_cpu(cpu,
25127 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
25128 +
25129 +#ifndef CONFIG_PAX_PER_CPU_PGD
25130 load_cr3(swapper_pg_dir);
25131 +#endif
25132 +
25133 }
25134 EXPORT_SYMBOL_GPL(leave_mm);
25135
25136 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
25137 index 6687022..ceabcfa 100644
25138 --- a/arch/x86/net/bpf_jit.S
25139 +++ b/arch/x86/net/bpf_jit.S
25140 @@ -9,6 +9,7 @@
25141 */
25142 #include <linux/linkage.h>
25143 #include <asm/dwarf2.h>
25144 +#include <asm/alternative-asm.h>
25145
25146 /*
25147 * Calling convention :
25148 @@ -35,6 +36,7 @@ sk_load_word:
25149 jle bpf_slow_path_word
25150 mov (SKBDATA,%rsi),%eax
25151 bswap %eax /* ntohl() */
25152 + pax_force_retaddr
25153 ret
25154
25155
25156 @@ -53,6 +55,7 @@ sk_load_half:
25157 jle bpf_slow_path_half
25158 movzwl (SKBDATA,%rsi),%eax
25159 rol $8,%ax # ntohs()
25160 + pax_force_retaddr
25161 ret
25162
25163 sk_load_byte_ind:
25164 @@ -66,6 +69,7 @@ sk_load_byte:
25165 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
25166 jle bpf_slow_path_byte
25167 movzbl (SKBDATA,%rsi),%eax
25168 + pax_force_retaddr
25169 ret
25170
25171 /**
25172 @@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
25173 movzbl (SKBDATA,%rsi),%ebx
25174 and $15,%bl
25175 shl $2,%bl
25176 + pax_force_retaddr
25177 ret
25178 CFI_ENDPROC
25179 ENDPROC(sk_load_byte_msh)
25180 @@ -91,6 +96,7 @@ bpf_error:
25181 xor %eax,%eax
25182 mov -8(%rbp),%rbx
25183 leaveq
25184 + pax_force_retaddr
25185 ret
25186
25187 /* rsi contains offset and can be scratched */
25188 @@ -113,6 +119,7 @@ bpf_slow_path_word:
25189 js bpf_error
25190 mov -12(%rbp),%eax
25191 bswap %eax
25192 + pax_force_retaddr
25193 ret
25194
25195 bpf_slow_path_half:
25196 @@ -121,12 +128,14 @@ bpf_slow_path_half:
25197 mov -12(%rbp),%ax
25198 rol $8,%ax
25199 movzwl %ax,%eax
25200 + pax_force_retaddr
25201 ret
25202
25203 bpf_slow_path_byte:
25204 bpf_slow_path_common(1)
25205 js bpf_error
25206 movzbl -12(%rbp),%eax
25207 + pax_force_retaddr
25208 ret
25209
25210 bpf_slow_path_byte_msh:
25211 @@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
25212 and $15,%al
25213 shl $2,%al
25214 xchg %eax,%ebx
25215 + pax_force_retaddr
25216 ret
25217 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
25218 index 7c1b765..8c072c6 100644
25219 --- a/arch/x86/net/bpf_jit_comp.c
25220 +++ b/arch/x86/net/bpf_jit_comp.c
25221 @@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
25222 set_fs(old_fs);
25223 }
25224
25225 +struct bpf_jit_work {
25226 + struct work_struct work;
25227 + void *image;
25228 +};
25229
25230 void bpf_jit_compile(struct sk_filter *fp)
25231 {
25232 @@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
25233 if (addrs == NULL)
25234 return;
25235
25236 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
25237 + if (!fp->work)
25238 + goto out;
25239 +
25240 /* Before first pass, make a rough estimation of addrs[]
25241 * each bpf instruction is translated to less than 64 bytes
25242 */
25243 @@ -476,7 +484,7 @@ void bpf_jit_compile(struct sk_filter *fp)
25244 func = sk_load_word;
25245 common_load: seen |= SEEN_DATAREF;
25246 if ((int)K < 0)
25247 - goto out;
25248 + goto error;
25249 t_offset = func - (image + addrs[i]);
25250 EMIT1_off32(0xbe, K); /* mov imm32,%esi */
25251 EMIT1_off32(0xe8, t_offset); /* call */
25252 @@ -586,17 +594,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25253 break;
25254 default:
25255 /* hmm, too complex filter, give up with jit compiler */
25256 - goto out;
25257 + goto error;
25258 }
25259 ilen = prog - temp;
25260 if (image) {
25261 if (unlikely(proglen + ilen > oldproglen)) {
25262 pr_err("bpb_jit_compile fatal error\n");
25263 - kfree(addrs);
25264 - module_free(NULL, image);
25265 - return;
25266 + module_free_exec(NULL, image);
25267 + goto error;
25268 }
25269 + pax_open_kernel();
25270 memcpy(image + proglen, temp, ilen);
25271 + pax_close_kernel();
25272 }
25273 proglen += ilen;
25274 addrs[i] = proglen;
25275 @@ -617,11 +626,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25276 break;
25277 }
25278 if (proglen == oldproglen) {
25279 - image = module_alloc(max_t(unsigned int,
25280 - proglen,
25281 - sizeof(struct work_struct)));
25282 + image = module_alloc_exec(proglen);
25283 if (!image)
25284 - goto out;
25285 + goto error;
25286 }
25287 oldproglen = proglen;
25288 }
25289 @@ -637,7 +644,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25290 bpf_flush_icache(image, image + proglen);
25291
25292 fp->bpf_func = (void *)image;
25293 - }
25294 + } else
25295 +error:
25296 + kfree(fp->work);
25297 +
25298 out:
25299 kfree(addrs);
25300 return;
25301 @@ -645,18 +655,20 @@ out:
25302
25303 static void jit_free_defer(struct work_struct *arg)
25304 {
25305 - module_free(NULL, arg);
25306 + module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
25307 + kfree(arg);
25308 }
25309
25310 /* run from softirq, we must use a work_struct to call
25311 - * module_free() from process context
25312 + * module_free_exec() from process context
25313 */
25314 void bpf_jit_free(struct sk_filter *fp)
25315 {
25316 if (fp->bpf_func != sk_run_filter) {
25317 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
25318 + struct work_struct *work = &fp->work->work;
25319
25320 INIT_WORK(work, jit_free_defer);
25321 + fp->work->image = fp->bpf_func;
25322 schedule_work(work);
25323 }
25324 }
25325 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
25326 index bff89df..377758a 100644
25327 --- a/arch/x86/oprofile/backtrace.c
25328 +++ b/arch/x86/oprofile/backtrace.c
25329 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
25330 struct stack_frame_ia32 *fp;
25331 unsigned long bytes;
25332
25333 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
25334 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
25335 if (bytes != sizeof(bufhead))
25336 return NULL;
25337
25338 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
25339 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
25340
25341 oprofile_add_trace(bufhead[0].return_address);
25342
25343 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
25344 struct stack_frame bufhead[2];
25345 unsigned long bytes;
25346
25347 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
25348 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
25349 if (bytes != sizeof(bufhead))
25350 return NULL;
25351
25352 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
25353 {
25354 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
25355
25356 - if (!user_mode_vm(regs)) {
25357 + if (!user_mode(regs)) {
25358 unsigned long stack = kernel_stack_pointer(regs);
25359 if (depth)
25360 dump_trace(NULL, regs, (unsigned long *)stack, 0,
25361 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
25362 index cb29191..036766d 100644
25363 --- a/arch/x86/pci/mrst.c
25364 +++ b/arch/x86/pci/mrst.c
25365 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
25366 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
25367 pci_mmcfg_late_init();
25368 pcibios_enable_irq = mrst_pci_irq_enable;
25369 - pci_root_ops = pci_mrst_ops;
25370 + pax_open_kernel();
25371 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
25372 + pax_close_kernel();
25373 /* Continue with standard init */
25374 return 1;
25375 }
25376 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
25377 index db0e9a5..0372c14 100644
25378 --- a/arch/x86/pci/pcbios.c
25379 +++ b/arch/x86/pci/pcbios.c
25380 @@ -79,50 +79,93 @@ union bios32 {
25381 static struct {
25382 unsigned long address;
25383 unsigned short segment;
25384 -} bios32_indirect = { 0, __KERNEL_CS };
25385 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
25386
25387 /*
25388 * Returns the entry point for the given service, NULL on error
25389 */
25390
25391 -static unsigned long bios32_service(unsigned long service)
25392 +static unsigned long __devinit bios32_service(unsigned long service)
25393 {
25394 unsigned char return_code; /* %al */
25395 unsigned long address; /* %ebx */
25396 unsigned long length; /* %ecx */
25397 unsigned long entry; /* %edx */
25398 unsigned long flags;
25399 + struct desc_struct d, *gdt;
25400
25401 local_irq_save(flags);
25402 - __asm__("lcall *(%%edi); cld"
25403 +
25404 + gdt = get_cpu_gdt_table(smp_processor_id());
25405 +
25406 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
25407 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
25408 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
25409 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
25410 +
25411 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
25412 : "=a" (return_code),
25413 "=b" (address),
25414 "=c" (length),
25415 "=d" (entry)
25416 : "0" (service),
25417 "1" (0),
25418 - "D" (&bios32_indirect));
25419 + "D" (&bios32_indirect),
25420 + "r"(__PCIBIOS_DS)
25421 + : "memory");
25422 +
25423 + pax_open_kernel();
25424 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
25425 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
25426 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
25427 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
25428 + pax_close_kernel();
25429 +
25430 local_irq_restore(flags);
25431
25432 switch (return_code) {
25433 - case 0:
25434 - return address + entry;
25435 - case 0x80: /* Not present */
25436 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
25437 - return 0;
25438 - default: /* Shouldn't happen */
25439 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
25440 - service, return_code);
25441 + case 0: {
25442 + int cpu;
25443 + unsigned char flags;
25444 +
25445 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
25446 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
25447 + printk(KERN_WARNING "bios32_service: not valid\n");
25448 return 0;
25449 + }
25450 + address = address + PAGE_OFFSET;
25451 + length += 16UL; /* some BIOSs underreport this... */
25452 + flags = 4;
25453 + if (length >= 64*1024*1024) {
25454 + length >>= PAGE_SHIFT;
25455 + flags |= 8;
25456 + }
25457 +
25458 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
25459 + gdt = get_cpu_gdt_table(cpu);
25460 + pack_descriptor(&d, address, length, 0x9b, flags);
25461 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
25462 + pack_descriptor(&d, address, length, 0x93, flags);
25463 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
25464 + }
25465 + return entry;
25466 + }
25467 + case 0x80: /* Not present */
25468 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
25469 + return 0;
25470 + default: /* Shouldn't happen */
25471 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
25472 + service, return_code);
25473 + return 0;
25474 }
25475 }
25476
25477 static struct {
25478 unsigned long address;
25479 unsigned short segment;
25480 -} pci_indirect = { 0, __KERNEL_CS };
25481 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
25482
25483 -static int pci_bios_present;
25484 +static int pci_bios_present __read_only;
25485
25486 static int __devinit check_pcibios(void)
25487 {
25488 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
25489 unsigned long flags, pcibios_entry;
25490
25491 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
25492 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
25493 + pci_indirect.address = pcibios_entry;
25494
25495 local_irq_save(flags);
25496 - __asm__(
25497 - "lcall *(%%edi); cld\n\t"
25498 + __asm__("movw %w6, %%ds\n\t"
25499 + "lcall *%%ss:(%%edi); cld\n\t"
25500 + "push %%ss\n\t"
25501 + "pop %%ds\n\t"
25502 "jc 1f\n\t"
25503 "xor %%ah, %%ah\n"
25504 "1:"
25505 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
25506 "=b" (ebx),
25507 "=c" (ecx)
25508 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
25509 - "D" (&pci_indirect)
25510 + "D" (&pci_indirect),
25511 + "r" (__PCIBIOS_DS)
25512 : "memory");
25513 local_irq_restore(flags);
25514
25515 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25516
25517 switch (len) {
25518 case 1:
25519 - __asm__("lcall *(%%esi); cld\n\t"
25520 + __asm__("movw %w6, %%ds\n\t"
25521 + "lcall *%%ss:(%%esi); cld\n\t"
25522 + "push %%ss\n\t"
25523 + "pop %%ds\n\t"
25524 "jc 1f\n\t"
25525 "xor %%ah, %%ah\n"
25526 "1:"
25527 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25528 : "1" (PCIBIOS_READ_CONFIG_BYTE),
25529 "b" (bx),
25530 "D" ((long)reg),
25531 - "S" (&pci_indirect));
25532 + "S" (&pci_indirect),
25533 + "r" (__PCIBIOS_DS));
25534 /*
25535 * Zero-extend the result beyond 8 bits, do not trust the
25536 * BIOS having done it:
25537 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25538 *value &= 0xff;
25539 break;
25540 case 2:
25541 - __asm__("lcall *(%%esi); cld\n\t"
25542 + __asm__("movw %w6, %%ds\n\t"
25543 + "lcall *%%ss:(%%esi); cld\n\t"
25544 + "push %%ss\n\t"
25545 + "pop %%ds\n\t"
25546 "jc 1f\n\t"
25547 "xor %%ah, %%ah\n"
25548 "1:"
25549 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25550 : "1" (PCIBIOS_READ_CONFIG_WORD),
25551 "b" (bx),
25552 "D" ((long)reg),
25553 - "S" (&pci_indirect));
25554 + "S" (&pci_indirect),
25555 + "r" (__PCIBIOS_DS));
25556 /*
25557 * Zero-extend the result beyond 16 bits, do not trust the
25558 * BIOS having done it:
25559 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25560 *value &= 0xffff;
25561 break;
25562 case 4:
25563 - __asm__("lcall *(%%esi); cld\n\t"
25564 + __asm__("movw %w6, %%ds\n\t"
25565 + "lcall *%%ss:(%%esi); cld\n\t"
25566 + "push %%ss\n\t"
25567 + "pop %%ds\n\t"
25568 "jc 1f\n\t"
25569 "xor %%ah, %%ah\n"
25570 "1:"
25571 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25572 : "1" (PCIBIOS_READ_CONFIG_DWORD),
25573 "b" (bx),
25574 "D" ((long)reg),
25575 - "S" (&pci_indirect));
25576 + "S" (&pci_indirect),
25577 + "r" (__PCIBIOS_DS));
25578 break;
25579 }
25580
25581 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
25582
25583 switch (len) {
25584 case 1:
25585 - __asm__("lcall *(%%esi); cld\n\t"
25586 + __asm__("movw %w6, %%ds\n\t"
25587 + "lcall *%%ss:(%%esi); cld\n\t"
25588 + "push %%ss\n\t"
25589 + "pop %%ds\n\t"
25590 "jc 1f\n\t"
25591 "xor %%ah, %%ah\n"
25592 "1:"
25593 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
25594 "c" (value),
25595 "b" (bx),
25596 "D" ((long)reg),
25597 - "S" (&pci_indirect));
25598 + "S" (&pci_indirect),
25599 + "r" (__PCIBIOS_DS));
25600 break;
25601 case 2:
25602 - __asm__("lcall *(%%esi); cld\n\t"
25603 + __asm__("movw %w6, %%ds\n\t"
25604 + "lcall *%%ss:(%%esi); cld\n\t"
25605 + "push %%ss\n\t"
25606 + "pop %%ds\n\t"
25607 "jc 1f\n\t"
25608 "xor %%ah, %%ah\n"
25609 "1:"
25610 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
25611 "c" (value),
25612 "b" (bx),
25613 "D" ((long)reg),
25614 - "S" (&pci_indirect));
25615 + "S" (&pci_indirect),
25616 + "r" (__PCIBIOS_DS));
25617 break;
25618 case 4:
25619 - __asm__("lcall *(%%esi); cld\n\t"
25620 + __asm__("movw %w6, %%ds\n\t"
25621 + "lcall *%%ss:(%%esi); cld\n\t"
25622 + "push %%ss\n\t"
25623 + "pop %%ds\n\t"
25624 "jc 1f\n\t"
25625 "xor %%ah, %%ah\n"
25626 "1:"
25627 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
25628 "c" (value),
25629 "b" (bx),
25630 "D" ((long)reg),
25631 - "S" (&pci_indirect));
25632 + "S" (&pci_indirect),
25633 + "r" (__PCIBIOS_DS));
25634 break;
25635 }
25636
25637 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
25638
25639 DBG("PCI: Fetching IRQ routing table... ");
25640 __asm__("push %%es\n\t"
25641 + "movw %w8, %%ds\n\t"
25642 "push %%ds\n\t"
25643 "pop %%es\n\t"
25644 - "lcall *(%%esi); cld\n\t"
25645 + "lcall *%%ss:(%%esi); cld\n\t"
25646 "pop %%es\n\t"
25647 + "push %%ss\n\t"
25648 + "pop %%ds\n"
25649 "jc 1f\n\t"
25650 "xor %%ah, %%ah\n"
25651 "1:"
25652 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
25653 "1" (0),
25654 "D" ((long) &opt),
25655 "S" (&pci_indirect),
25656 - "m" (opt)
25657 + "m" (opt),
25658 + "r" (__PCIBIOS_DS)
25659 : "memory");
25660 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
25661 if (ret & 0xff00)
25662 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
25663 {
25664 int ret;
25665
25666 - __asm__("lcall *(%%esi); cld\n\t"
25667 + __asm__("movw %w5, %%ds\n\t"
25668 + "lcall *%%ss:(%%esi); cld\n\t"
25669 + "push %%ss\n\t"
25670 + "pop %%ds\n"
25671 "jc 1f\n\t"
25672 "xor %%ah, %%ah\n"
25673 "1:"
25674 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
25675 : "0" (PCIBIOS_SET_PCI_HW_INT),
25676 "b" ((dev->bus->number << 8) | dev->devfn),
25677 "c" ((irq << 8) | (pin + 10)),
25678 - "S" (&pci_indirect));
25679 + "S" (&pci_indirect),
25680 + "r" (__PCIBIOS_DS));
25681 return !(ret & 0xff00);
25682 }
25683 EXPORT_SYMBOL(pcibios_set_irq_routing);
25684 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
25685 index 40e4469..1ab536e 100644
25686 --- a/arch/x86/platform/efi/efi_32.c
25687 +++ b/arch/x86/platform/efi/efi_32.c
25688 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
25689 {
25690 struct desc_ptr gdt_descr;
25691
25692 +#ifdef CONFIG_PAX_KERNEXEC
25693 + struct desc_struct d;
25694 +#endif
25695 +
25696 local_irq_save(efi_rt_eflags);
25697
25698 load_cr3(initial_page_table);
25699 __flush_tlb_all();
25700
25701 +#ifdef CONFIG_PAX_KERNEXEC
25702 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
25703 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
25704 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
25705 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
25706 +#endif
25707 +
25708 gdt_descr.address = __pa(get_cpu_gdt_table(0));
25709 gdt_descr.size = GDT_SIZE - 1;
25710 load_gdt(&gdt_descr);
25711 @@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
25712 {
25713 struct desc_ptr gdt_descr;
25714
25715 +#ifdef CONFIG_PAX_KERNEXEC
25716 + struct desc_struct d;
25717 +
25718 + memset(&d, 0, sizeof d);
25719 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
25720 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
25721 +#endif
25722 +
25723 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
25724 gdt_descr.size = GDT_SIZE - 1;
25725 load_gdt(&gdt_descr);
25726 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
25727 index fbe66e6..c5c0dd2 100644
25728 --- a/arch/x86/platform/efi/efi_stub_32.S
25729 +++ b/arch/x86/platform/efi/efi_stub_32.S
25730 @@ -6,7 +6,9 @@
25731 */
25732
25733 #include <linux/linkage.h>
25734 +#include <linux/init.h>
25735 #include <asm/page_types.h>
25736 +#include <asm/segment.h>
25737
25738 /*
25739 * efi_call_phys(void *, ...) is a function with variable parameters.
25740 @@ -20,7 +22,7 @@
25741 * service functions will comply with gcc calling convention, too.
25742 */
25743
25744 -.text
25745 +__INIT
25746 ENTRY(efi_call_phys)
25747 /*
25748 * 0. The function can only be called in Linux kernel. So CS has been
25749 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
25750 * The mapping of lower virtual memory has been created in prelog and
25751 * epilog.
25752 */
25753 - movl $1f, %edx
25754 - subl $__PAGE_OFFSET, %edx
25755 - jmp *%edx
25756 + movl $(__KERNEXEC_EFI_DS), %edx
25757 + mov %edx, %ds
25758 + mov %edx, %es
25759 + mov %edx, %ss
25760 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
25761 1:
25762
25763 /*
25764 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
25765 * parameter 2, ..., param n. To make things easy, we save the return
25766 * address of efi_call_phys in a global variable.
25767 */
25768 - popl %edx
25769 - movl %edx, saved_return_addr
25770 - /* get the function pointer into ECX*/
25771 - popl %ecx
25772 - movl %ecx, efi_rt_function_ptr
25773 - movl $2f, %edx
25774 - subl $__PAGE_OFFSET, %edx
25775 - pushl %edx
25776 + popl (saved_return_addr)
25777 + popl (efi_rt_function_ptr)
25778
25779 /*
25780 * 3. Clear PG bit in %CR0.
25781 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
25782 /*
25783 * 5. Call the physical function.
25784 */
25785 - jmp *%ecx
25786 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
25787
25788 -2:
25789 /*
25790 * 6. After EFI runtime service returns, control will return to
25791 * following instruction. We'd better readjust stack pointer first.
25792 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
25793 movl %cr0, %edx
25794 orl $0x80000000, %edx
25795 movl %edx, %cr0
25796 - jmp 1f
25797 -1:
25798 +
25799 /*
25800 * 8. Now restore the virtual mode from flat mode by
25801 * adding EIP with PAGE_OFFSET.
25802 */
25803 - movl $1f, %edx
25804 - jmp *%edx
25805 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
25806 1:
25807 + movl $(__KERNEL_DS), %edx
25808 + mov %edx, %ds
25809 + mov %edx, %es
25810 + mov %edx, %ss
25811
25812 /*
25813 * 9. Balance the stack. And because EAX contain the return value,
25814 * we'd better not clobber it.
25815 */
25816 - leal efi_rt_function_ptr, %edx
25817 - movl (%edx), %ecx
25818 - pushl %ecx
25819 + pushl (efi_rt_function_ptr)
25820
25821 /*
25822 - * 10. Push the saved return address onto the stack and return.
25823 + * 10. Return to the saved return address.
25824 */
25825 - leal saved_return_addr, %edx
25826 - movl (%edx), %ecx
25827 - pushl %ecx
25828 - ret
25829 + jmpl *(saved_return_addr)
25830 ENDPROC(efi_call_phys)
25831 .previous
25832
25833 -.data
25834 +__INITDATA
25835 saved_return_addr:
25836 .long 0
25837 efi_rt_function_ptr:
25838 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
25839 index 4c07cca..2c8427d 100644
25840 --- a/arch/x86/platform/efi/efi_stub_64.S
25841 +++ b/arch/x86/platform/efi/efi_stub_64.S
25842 @@ -7,6 +7,7 @@
25843 */
25844
25845 #include <linux/linkage.h>
25846 +#include <asm/alternative-asm.h>
25847
25848 #define SAVE_XMM \
25849 mov %rsp, %rax; \
25850 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
25851 call *%rdi
25852 addq $32, %rsp
25853 RESTORE_XMM
25854 + pax_force_retaddr 0, 1
25855 ret
25856 ENDPROC(efi_call0)
25857
25858 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
25859 call *%rdi
25860 addq $32, %rsp
25861 RESTORE_XMM
25862 + pax_force_retaddr 0, 1
25863 ret
25864 ENDPROC(efi_call1)
25865
25866 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
25867 call *%rdi
25868 addq $32, %rsp
25869 RESTORE_XMM
25870 + pax_force_retaddr 0, 1
25871 ret
25872 ENDPROC(efi_call2)
25873
25874 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
25875 call *%rdi
25876 addq $32, %rsp
25877 RESTORE_XMM
25878 + pax_force_retaddr 0, 1
25879 ret
25880 ENDPROC(efi_call3)
25881
25882 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
25883 call *%rdi
25884 addq $32, %rsp
25885 RESTORE_XMM
25886 + pax_force_retaddr 0, 1
25887 ret
25888 ENDPROC(efi_call4)
25889
25890 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
25891 call *%rdi
25892 addq $48, %rsp
25893 RESTORE_XMM
25894 + pax_force_retaddr 0, 1
25895 ret
25896 ENDPROC(efi_call5)
25897
25898 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
25899 call *%rdi
25900 addq $48, %rsp
25901 RESTORE_XMM
25902 + pax_force_retaddr 0, 1
25903 ret
25904 ENDPROC(efi_call6)
25905 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
25906 index ad4ec1c..686479e 100644
25907 --- a/arch/x86/platform/mrst/mrst.c
25908 +++ b/arch/x86/platform/mrst/mrst.c
25909 @@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
25910 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
25911 int sfi_mrtc_num;
25912
25913 -static void mrst_power_off(void)
25914 +static __noreturn void mrst_power_off(void)
25915 {
25916 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
25917 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
25918 + BUG();
25919 }
25920
25921 -static void mrst_reboot(void)
25922 +static __noreturn void mrst_reboot(void)
25923 {
25924 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
25925 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
25926 else
25927 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
25928 + BUG();
25929 }
25930
25931 /* parse all the mtimer info to a static mtimer array */
25932 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
25933 index f10c0af..3ec1f95 100644
25934 --- a/arch/x86/power/cpu.c
25935 +++ b/arch/x86/power/cpu.c
25936 @@ -131,7 +131,7 @@ static void do_fpu_end(void)
25937 static void fix_processor_context(void)
25938 {
25939 int cpu = smp_processor_id();
25940 - struct tss_struct *t = &per_cpu(init_tss, cpu);
25941 + struct tss_struct *t = init_tss + cpu;
25942
25943 set_tss_desc(cpu, t); /*
25944 * This just modifies memory; should not be
25945 @@ -141,7 +141,9 @@ static void fix_processor_context(void)
25946 */
25947
25948 #ifdef CONFIG_X86_64
25949 + pax_open_kernel();
25950 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
25951 + pax_close_kernel();
25952
25953 syscall_init(); /* This sets MSR_*STAR and related */
25954 #endif
25955 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
25956 index 5d17950..2253fc9 100644
25957 --- a/arch/x86/vdso/Makefile
25958 +++ b/arch/x86/vdso/Makefile
25959 @@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
25960 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
25961 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
25962
25963 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
25964 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
25965 GCOV_PROFILE := n
25966
25967 #
25968 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
25969 index 468d591..8e80a0a 100644
25970 --- a/arch/x86/vdso/vdso32-setup.c
25971 +++ b/arch/x86/vdso/vdso32-setup.c
25972 @@ -25,6 +25,7 @@
25973 #include <asm/tlbflush.h>
25974 #include <asm/vdso.h>
25975 #include <asm/proto.h>
25976 +#include <asm/mman.h>
25977
25978 enum {
25979 VDSO_DISABLED = 0,
25980 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
25981 void enable_sep_cpu(void)
25982 {
25983 int cpu = get_cpu();
25984 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
25985 + struct tss_struct *tss = init_tss + cpu;
25986
25987 if (!boot_cpu_has(X86_FEATURE_SEP)) {
25988 put_cpu();
25989 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
25990 gate_vma.vm_start = FIXADDR_USER_START;
25991 gate_vma.vm_end = FIXADDR_USER_END;
25992 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
25993 - gate_vma.vm_page_prot = __P101;
25994 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
25995 /*
25996 * Make sure the vDSO gets into every core dump.
25997 * Dumping its contents makes post-mortem fully interpretable later
25998 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
25999 if (compat)
26000 addr = VDSO_HIGH_BASE;
26001 else {
26002 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26003 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26004 if (IS_ERR_VALUE(addr)) {
26005 ret = addr;
26006 goto up_fail;
26007 }
26008 }
26009
26010 - current->mm->context.vdso = (void *)addr;
26011 + current->mm->context.vdso = addr;
26012
26013 if (compat_uses_vma || !compat) {
26014 /*
26015 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26016 }
26017
26018 current_thread_info()->sysenter_return =
26019 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26020 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26021
26022 up_fail:
26023 if (ret)
26024 - current->mm->context.vdso = NULL;
26025 + current->mm->context.vdso = 0;
26026
26027 up_write(&mm->mmap_sem);
26028
26029 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
26030
26031 const char *arch_vma_name(struct vm_area_struct *vma)
26032 {
26033 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26034 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26035 return "[vdso]";
26036 +
26037 +#ifdef CONFIG_PAX_SEGMEXEC
26038 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
26039 + return "[vdso]";
26040 +#endif
26041 +
26042 return NULL;
26043 }
26044
26045 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
26046 * Check to see if the corresponding task was created in compat vdso
26047 * mode.
26048 */
26049 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
26050 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
26051 return &gate_vma;
26052 return NULL;
26053 }
26054 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
26055 index 153407c..611cba9 100644
26056 --- a/arch/x86/vdso/vma.c
26057 +++ b/arch/x86/vdso/vma.c
26058 @@ -16,8 +16,6 @@
26059 #include <asm/vdso.h>
26060 #include <asm/page.h>
26061
26062 -unsigned int __read_mostly vdso_enabled = 1;
26063 -
26064 extern char vdso_start[], vdso_end[];
26065 extern unsigned short vdso_sync_cpuid;
26066
26067 @@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26068 * unaligned here as a result of stack start randomization.
26069 */
26070 addr = PAGE_ALIGN(addr);
26071 - addr = align_addr(addr, NULL, ALIGN_VDSO);
26072
26073 return addr;
26074 }
26075 @@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26076 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26077 {
26078 struct mm_struct *mm = current->mm;
26079 - unsigned long addr;
26080 + unsigned long addr = 0;
26081 int ret;
26082
26083 - if (!vdso_enabled)
26084 - return 0;
26085 -
26086 down_write(&mm->mmap_sem);
26087 +
26088 +#ifdef CONFIG_PAX_RANDMMAP
26089 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26090 +#endif
26091 +
26092 addr = vdso_addr(mm->start_stack, vdso_size);
26093 + addr = align_addr(addr, NULL, ALIGN_VDSO);
26094 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
26095 if (IS_ERR_VALUE(addr)) {
26096 ret = addr;
26097 goto up_fail;
26098 }
26099
26100 - current->mm->context.vdso = (void *)addr;
26101 + mm->context.vdso = addr;
26102
26103 ret = install_special_mapping(mm, addr, vdso_size,
26104 VM_READ|VM_EXEC|
26105 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
26106 VM_ALWAYSDUMP,
26107 vdso_pages);
26108 - if (ret) {
26109 - current->mm->context.vdso = NULL;
26110 - goto up_fail;
26111 - }
26112 +
26113 + if (ret)
26114 + mm->context.vdso = 0;
26115
26116 up_fail:
26117 up_write(&mm->mmap_sem);
26118 return ret;
26119 }
26120 -
26121 -static __init int vdso_setup(char *s)
26122 -{
26123 - vdso_enabled = simple_strtoul(s, NULL, 0);
26124 - return 0;
26125 -}
26126 -__setup("vdso=", vdso_setup);
26127 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
26128 index 1f92865..c843b20 100644
26129 --- a/arch/x86/xen/enlighten.c
26130 +++ b/arch/x86/xen/enlighten.c
26131 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
26132
26133 struct shared_info xen_dummy_shared_info;
26134
26135 -void *xen_initial_gdt;
26136 -
26137 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
26138 __read_mostly int xen_have_vector_callback;
26139 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
26140 @@ -1029,7 +1027,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
26141 #endif
26142 };
26143
26144 -static void xen_reboot(int reason)
26145 +static __noreturn void xen_reboot(int reason)
26146 {
26147 struct sched_shutdown r = { .reason = reason };
26148
26149 @@ -1037,17 +1035,17 @@ static void xen_reboot(int reason)
26150 BUG();
26151 }
26152
26153 -static void xen_restart(char *msg)
26154 +static __noreturn void xen_restart(char *msg)
26155 {
26156 xen_reboot(SHUTDOWN_reboot);
26157 }
26158
26159 -static void xen_emergency_restart(void)
26160 +static __noreturn void xen_emergency_restart(void)
26161 {
26162 xen_reboot(SHUTDOWN_reboot);
26163 }
26164
26165 -static void xen_machine_halt(void)
26166 +static __noreturn void xen_machine_halt(void)
26167 {
26168 xen_reboot(SHUTDOWN_poweroff);
26169 }
26170 @@ -1153,7 +1151,17 @@ asmlinkage void __init xen_start_kernel(void)
26171 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
26172
26173 /* Work out if we support NX */
26174 - x86_configure_nx();
26175 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26176 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
26177 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
26178 + unsigned l, h;
26179 +
26180 + __supported_pte_mask |= _PAGE_NX;
26181 + rdmsr(MSR_EFER, l, h);
26182 + l |= EFER_NX;
26183 + wrmsr(MSR_EFER, l, h);
26184 + }
26185 +#endif
26186
26187 xen_setup_features();
26188
26189 @@ -1184,13 +1192,6 @@ asmlinkage void __init xen_start_kernel(void)
26190
26191 machine_ops = xen_machine_ops;
26192
26193 - /*
26194 - * The only reliable way to retain the initial address of the
26195 - * percpu gdt_page is to remember it here, so we can go and
26196 - * mark it RW later, when the initial percpu area is freed.
26197 - */
26198 - xen_initial_gdt = &per_cpu(gdt_page, 0);
26199 -
26200 xen_smp_init();
26201
26202 #ifdef CONFIG_ACPI_NUMA
26203 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
26204 index 87f6673..e2555a6 100644
26205 --- a/arch/x86/xen/mmu.c
26206 +++ b/arch/x86/xen/mmu.c
26207 @@ -1733,6 +1733,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
26208 convert_pfn_mfn(init_level4_pgt);
26209 convert_pfn_mfn(level3_ident_pgt);
26210 convert_pfn_mfn(level3_kernel_pgt);
26211 + convert_pfn_mfn(level3_vmalloc_start_pgt);
26212 + convert_pfn_mfn(level3_vmalloc_end_pgt);
26213 + convert_pfn_mfn(level3_vmemmap_pgt);
26214
26215 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
26216 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
26217 @@ -1751,7 +1754,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
26218 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
26219 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
26220 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
26221 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
26222 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
26223 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
26224 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
26225 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
26226 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
26227 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
26228
26229 @@ -1962,6 +1969,7 @@ static void __init xen_post_allocator_init(void)
26230 pv_mmu_ops.set_pud = xen_set_pud;
26231 #if PAGETABLE_LEVELS == 4
26232 pv_mmu_ops.set_pgd = xen_set_pgd;
26233 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
26234 #endif
26235
26236 /* This will work as long as patching hasn't happened yet
26237 @@ -2043,6 +2051,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
26238 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
26239 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
26240 .set_pgd = xen_set_pgd_hyper,
26241 + .set_pgd_batched = xen_set_pgd_hyper,
26242
26243 .alloc_pud = xen_alloc_pmd_init,
26244 .release_pud = xen_release_pmd_init,
26245 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
26246 index 041d4fe..7666b7e 100644
26247 --- a/arch/x86/xen/smp.c
26248 +++ b/arch/x86/xen/smp.c
26249 @@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
26250 {
26251 BUG_ON(smp_processor_id() != 0);
26252 native_smp_prepare_boot_cpu();
26253 -
26254 - /* We've switched to the "real" per-cpu gdt, so make sure the
26255 - old memory can be recycled */
26256 - make_lowmem_page_readwrite(xen_initial_gdt);
26257 -
26258 xen_filter_cpu_maps();
26259 xen_setup_vcpu_info_placement();
26260 }
26261 @@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
26262 gdt = get_cpu_gdt_table(cpu);
26263
26264 ctxt->flags = VGCF_IN_KERNEL;
26265 - ctxt->user_regs.ds = __USER_DS;
26266 - ctxt->user_regs.es = __USER_DS;
26267 + ctxt->user_regs.ds = __KERNEL_DS;
26268 + ctxt->user_regs.es = __KERNEL_DS;
26269 ctxt->user_regs.ss = __KERNEL_DS;
26270 #ifdef CONFIG_X86_32
26271 ctxt->user_regs.fs = __KERNEL_PERCPU;
26272 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
26273 + savesegment(gs, ctxt->user_regs.gs);
26274 #else
26275 ctxt->gs_base_kernel = per_cpu_offset(cpu);
26276 #endif
26277 @@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
26278 int rc;
26279
26280 per_cpu(current_task, cpu) = idle;
26281 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
26282 #ifdef CONFIG_X86_32
26283 irq_ctx_init(cpu);
26284 #else
26285 clear_tsk_thread_flag(idle, TIF_FORK);
26286 - per_cpu(kernel_stack, cpu) =
26287 - (unsigned long)task_stack_page(idle) -
26288 - KERNEL_STACK_OFFSET + THREAD_SIZE;
26289 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
26290 #endif
26291 xen_setup_runstate_info(cpu);
26292 xen_setup_timer(cpu);
26293 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
26294 index b040b0e..8cc4fe0 100644
26295 --- a/arch/x86/xen/xen-asm_32.S
26296 +++ b/arch/x86/xen/xen-asm_32.S
26297 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
26298 ESP_OFFSET=4 # bytes pushed onto stack
26299
26300 /*
26301 - * Store vcpu_info pointer for easy access. Do it this way to
26302 - * avoid having to reload %fs
26303 + * Store vcpu_info pointer for easy access.
26304 */
26305 #ifdef CONFIG_SMP
26306 - GET_THREAD_INFO(%eax)
26307 - movl TI_cpu(%eax), %eax
26308 - movl __per_cpu_offset(,%eax,4), %eax
26309 - mov xen_vcpu(%eax), %eax
26310 + push %fs
26311 + mov $(__KERNEL_PERCPU), %eax
26312 + mov %eax, %fs
26313 + mov PER_CPU_VAR(xen_vcpu), %eax
26314 + pop %fs
26315 #else
26316 movl xen_vcpu, %eax
26317 #endif
26318 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
26319 index aaa7291..3f77960 100644
26320 --- a/arch/x86/xen/xen-head.S
26321 +++ b/arch/x86/xen/xen-head.S
26322 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
26323 #ifdef CONFIG_X86_32
26324 mov %esi,xen_start_info
26325 mov $init_thread_union+THREAD_SIZE,%esp
26326 +#ifdef CONFIG_SMP
26327 + movl $cpu_gdt_table,%edi
26328 + movl $__per_cpu_load,%eax
26329 + movw %ax,__KERNEL_PERCPU + 2(%edi)
26330 + rorl $16,%eax
26331 + movb %al,__KERNEL_PERCPU + 4(%edi)
26332 + movb %ah,__KERNEL_PERCPU + 7(%edi)
26333 + movl $__per_cpu_end - 1,%eax
26334 + subl $__per_cpu_start,%eax
26335 + movw %ax,__KERNEL_PERCPU + 0(%edi)
26336 +#endif
26337 #else
26338 mov %rsi,xen_start_info
26339 mov $init_thread_union+THREAD_SIZE,%rsp
26340 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
26341 index b095739..8c17bcd 100644
26342 --- a/arch/x86/xen/xen-ops.h
26343 +++ b/arch/x86/xen/xen-ops.h
26344 @@ -10,8 +10,6 @@
26345 extern const char xen_hypervisor_callback[];
26346 extern const char xen_failsafe_callback[];
26347
26348 -extern void *xen_initial_gdt;
26349 -
26350 struct trap_info;
26351 void xen_copy_trap_info(struct trap_info *traps);
26352
26353 diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
26354 index 525bd3d..ef888b1 100644
26355 --- a/arch/xtensa/variants/dc232b/include/variant/core.h
26356 +++ b/arch/xtensa/variants/dc232b/include/variant/core.h
26357 @@ -119,9 +119,9 @@
26358 ----------------------------------------------------------------------*/
26359
26360 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
26361 -#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
26362 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
26363 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
26364 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
26365
26366 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
26367 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
26368 diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
26369 index 2f33760..835e50a 100644
26370 --- a/arch/xtensa/variants/fsf/include/variant/core.h
26371 +++ b/arch/xtensa/variants/fsf/include/variant/core.h
26372 @@ -11,6 +11,7 @@
26373 #ifndef _XTENSA_CORE_H
26374 #define _XTENSA_CORE_H
26375
26376 +#include <linux/const.h>
26377
26378 /****************************************************************************
26379 Parameters Useful for Any Code, USER or PRIVILEGED
26380 @@ -112,9 +113,9 @@
26381 ----------------------------------------------------------------------*/
26382
26383 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
26384 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
26385 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
26386 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
26387 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
26388
26389 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
26390 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
26391 diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
26392 index af00795..2bb8105 100644
26393 --- a/arch/xtensa/variants/s6000/include/variant/core.h
26394 +++ b/arch/xtensa/variants/s6000/include/variant/core.h
26395 @@ -11,6 +11,7 @@
26396 #ifndef _XTENSA_CORE_CONFIGURATION_H
26397 #define _XTENSA_CORE_CONFIGURATION_H
26398
26399 +#include <linux/const.h>
26400
26401 /****************************************************************************
26402 Parameters Useful for Any Code, USER or PRIVILEGED
26403 @@ -118,9 +119,9 @@
26404 ----------------------------------------------------------------------*/
26405
26406 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
26407 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
26408 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
26409 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
26410 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
26411
26412 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
26413 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
26414 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
26415 index 58916af..9cb880b 100644
26416 --- a/block/blk-iopoll.c
26417 +++ b/block/blk-iopoll.c
26418 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
26419 }
26420 EXPORT_SYMBOL(blk_iopoll_complete);
26421
26422 -static void blk_iopoll_softirq(struct softirq_action *h)
26423 +static void blk_iopoll_softirq(void)
26424 {
26425 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
26426 int rearm = 0, budget = blk_iopoll_budget;
26427 diff --git a/block/blk-map.c b/block/blk-map.c
26428 index 623e1cd..ca1e109 100644
26429 --- a/block/blk-map.c
26430 +++ b/block/blk-map.c
26431 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
26432 if (!len || !kbuf)
26433 return -EINVAL;
26434
26435 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
26436 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
26437 if (do_copy)
26438 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
26439 else
26440 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
26441 index 1366a89..e17f54b 100644
26442 --- a/block/blk-softirq.c
26443 +++ b/block/blk-softirq.c
26444 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
26445 * Softirq action handler - move entries to local list and loop over them
26446 * while passing them to the queue registered handler.
26447 */
26448 -static void blk_done_softirq(struct softirq_action *h)
26449 +static void blk_done_softirq(void)
26450 {
26451 struct list_head *cpu_list, local_list;
26452
26453 diff --git a/block/bsg.c b/block/bsg.c
26454 index c0ab25c..9d49f8f 100644
26455 --- a/block/bsg.c
26456 +++ b/block/bsg.c
26457 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
26458 struct sg_io_v4 *hdr, struct bsg_device *bd,
26459 fmode_t has_write_perm)
26460 {
26461 + unsigned char tmpcmd[sizeof(rq->__cmd)];
26462 + unsigned char *cmdptr;
26463 +
26464 if (hdr->request_len > BLK_MAX_CDB) {
26465 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
26466 if (!rq->cmd)
26467 return -ENOMEM;
26468 - }
26469 + cmdptr = rq->cmd;
26470 + } else
26471 + cmdptr = tmpcmd;
26472
26473 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
26474 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
26475 hdr->request_len))
26476 return -EFAULT;
26477
26478 + if (cmdptr != rq->cmd)
26479 + memcpy(rq->cmd, cmdptr, hdr->request_len);
26480 +
26481 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
26482 if (blk_verify_command(rq->cmd, has_write_perm))
26483 return -EPERM;
26484 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
26485 index 7b72502..646105c 100644
26486 --- a/block/compat_ioctl.c
26487 +++ b/block/compat_ioctl.c
26488 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
26489 err |= __get_user(f->spec1, &uf->spec1);
26490 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
26491 err |= __get_user(name, &uf->name);
26492 - f->name = compat_ptr(name);
26493 + f->name = (void __force_kernel *)compat_ptr(name);
26494 if (err) {
26495 err = -EFAULT;
26496 goto out;
26497 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
26498 index 688be8a..8a37d98 100644
26499 --- a/block/scsi_ioctl.c
26500 +++ b/block/scsi_ioctl.c
26501 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
26502 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
26503 struct sg_io_hdr *hdr, fmode_t mode)
26504 {
26505 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
26506 + unsigned char tmpcmd[sizeof(rq->__cmd)];
26507 + unsigned char *cmdptr;
26508 +
26509 + if (rq->cmd != rq->__cmd)
26510 + cmdptr = rq->cmd;
26511 + else
26512 + cmdptr = tmpcmd;
26513 +
26514 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
26515 return -EFAULT;
26516 +
26517 + if (cmdptr != rq->cmd)
26518 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
26519 +
26520 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
26521 return -EPERM;
26522
26523 @@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
26524 int err;
26525 unsigned int in_len, out_len, bytes, opcode, cmdlen;
26526 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
26527 + unsigned char tmpcmd[sizeof(rq->__cmd)];
26528 + unsigned char *cmdptr;
26529
26530 if (!sic)
26531 return -EINVAL;
26532 @@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
26533 */
26534 err = -EFAULT;
26535 rq->cmd_len = cmdlen;
26536 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
26537 +
26538 + if (rq->cmd != rq->__cmd)
26539 + cmdptr = rq->cmd;
26540 + else
26541 + cmdptr = tmpcmd;
26542 +
26543 + if (copy_from_user(cmdptr, sic->data, cmdlen))
26544 goto error;
26545
26546 + if (rq->cmd != cmdptr)
26547 + memcpy(rq->cmd, cmdptr, cmdlen);
26548 +
26549 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
26550 goto error;
26551
26552 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
26553 index 671d4d6..5f24030 100644
26554 --- a/crypto/cryptd.c
26555 +++ b/crypto/cryptd.c
26556 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
26557
26558 struct cryptd_blkcipher_request_ctx {
26559 crypto_completion_t complete;
26560 -};
26561 +} __no_const;
26562
26563 struct cryptd_hash_ctx {
26564 struct crypto_shash *child;
26565 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
26566
26567 struct cryptd_aead_request_ctx {
26568 crypto_completion_t complete;
26569 -};
26570 +} __no_const;
26571
26572 static void cryptd_queue_worker(struct work_struct *work);
26573
26574 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
26575 index 5d41894..22021e4 100644
26576 --- a/drivers/acpi/apei/cper.c
26577 +++ b/drivers/acpi/apei/cper.c
26578 @@ -38,12 +38,12 @@
26579 */
26580 u64 cper_next_record_id(void)
26581 {
26582 - static atomic64_t seq;
26583 + static atomic64_unchecked_t seq;
26584
26585 - if (!atomic64_read(&seq))
26586 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
26587 + if (!atomic64_read_unchecked(&seq))
26588 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
26589
26590 - return atomic64_inc_return(&seq);
26591 + return atomic64_inc_return_unchecked(&seq);
26592 }
26593 EXPORT_SYMBOL_GPL(cper_next_record_id);
26594
26595 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
26596 index 6c47ae9..abfdd63 100644
26597 --- a/drivers/acpi/ec_sys.c
26598 +++ b/drivers/acpi/ec_sys.c
26599 @@ -12,6 +12,7 @@
26600 #include <linux/acpi.h>
26601 #include <linux/debugfs.h>
26602 #include <linux/module.h>
26603 +#include <linux/uaccess.h>
26604 #include "internal.h"
26605
26606 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
26607 @@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
26608 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
26609 */
26610 unsigned int size = EC_SPACE_SIZE;
26611 - u8 *data = (u8 *) buf;
26612 + u8 data;
26613 loff_t init_off = *off;
26614 int err = 0;
26615
26616 @@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
26617 size = count;
26618
26619 while (size) {
26620 - err = ec_read(*off, &data[*off - init_off]);
26621 + err = ec_read(*off, &data);
26622 if (err)
26623 return err;
26624 + if (put_user(data, &buf[*off - init_off]))
26625 + return -EFAULT;
26626 *off += 1;
26627 size--;
26628 }
26629 @@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
26630
26631 unsigned int size = count;
26632 loff_t init_off = *off;
26633 - u8 *data = (u8 *) buf;
26634 int err = 0;
26635
26636 if (*off >= EC_SPACE_SIZE)
26637 @@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
26638 }
26639
26640 while (size) {
26641 - u8 byte_write = data[*off - init_off];
26642 + u8 byte_write;
26643 + if (get_user(byte_write, &buf[*off - init_off]))
26644 + return -EFAULT;
26645 err = ec_write(*off, byte_write);
26646 if (err)
26647 return err;
26648 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
26649 index 251c7b62..000462d 100644
26650 --- a/drivers/acpi/proc.c
26651 +++ b/drivers/acpi/proc.c
26652 @@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
26653 size_t count, loff_t * ppos)
26654 {
26655 struct list_head *node, *next;
26656 - char strbuf[5];
26657 - char str[5] = "";
26658 - unsigned int len = count;
26659 + char strbuf[5] = {0};
26660
26661 - if (len > 4)
26662 - len = 4;
26663 - if (len < 0)
26664 + if (count > 4)
26665 + count = 4;
26666 + if (copy_from_user(strbuf, buffer, count))
26667 return -EFAULT;
26668 -
26669 - if (copy_from_user(strbuf, buffer, len))
26670 - return -EFAULT;
26671 - strbuf[len] = '\0';
26672 - sscanf(strbuf, "%s", str);
26673 + strbuf[count] = '\0';
26674
26675 mutex_lock(&acpi_device_lock);
26676 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
26677 @@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
26678 if (!dev->wakeup.flags.valid)
26679 continue;
26680
26681 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
26682 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
26683 if (device_can_wakeup(&dev->dev)) {
26684 bool enable = !device_may_wakeup(&dev->dev);
26685 device_set_wakeup_enable(&dev->dev, enable);
26686 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
26687 index 9d7bc9f..a6fc091 100644
26688 --- a/drivers/acpi/processor_driver.c
26689 +++ b/drivers/acpi/processor_driver.c
26690 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
26691 return 0;
26692 #endif
26693
26694 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
26695 + BUG_ON(pr->id >= nr_cpu_ids);
26696
26697 /*
26698 * Buggy BIOS check
26699 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
26700 index c04ad68..0b99473 100644
26701 --- a/drivers/ata/libata-core.c
26702 +++ b/drivers/ata/libata-core.c
26703 @@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
26704 struct ata_port *ap;
26705 unsigned int tag;
26706
26707 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
26708 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
26709 ap = qc->ap;
26710
26711 qc->flags = 0;
26712 @@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
26713 struct ata_port *ap;
26714 struct ata_link *link;
26715
26716 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
26717 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
26718 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
26719 ap = qc->ap;
26720 link = qc->dev->link;
26721 @@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
26722 return;
26723
26724 spin_lock(&lock);
26725 + pax_open_kernel();
26726
26727 for (cur = ops->inherits; cur; cur = cur->inherits) {
26728 void **inherit = (void **)cur;
26729 @@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
26730 if (IS_ERR(*pp))
26731 *pp = NULL;
26732
26733 - ops->inherits = NULL;
26734 + *(struct ata_port_operations **)&ops->inherits = NULL;
26735
26736 + pax_close_kernel();
26737 spin_unlock(&lock);
26738 }
26739
26740 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
26741 index e8574bb..f9f6a72 100644
26742 --- a/drivers/ata/pata_arasan_cf.c
26743 +++ b/drivers/ata/pata_arasan_cf.c
26744 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
26745 /* Handle platform specific quirks */
26746 if (pdata->quirk) {
26747 if (pdata->quirk & CF_BROKEN_PIO) {
26748 - ap->ops->set_piomode = NULL;
26749 + pax_open_kernel();
26750 + *(void **)&ap->ops->set_piomode = NULL;
26751 + pax_close_kernel();
26752 ap->pio_mask = 0;
26753 }
26754 if (pdata->quirk & CF_BROKEN_MWDMA)
26755 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
26756 index f9b983a..887b9d8 100644
26757 --- a/drivers/atm/adummy.c
26758 +++ b/drivers/atm/adummy.c
26759 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
26760 vcc->pop(vcc, skb);
26761 else
26762 dev_kfree_skb_any(skb);
26763 - atomic_inc(&vcc->stats->tx);
26764 + atomic_inc_unchecked(&vcc->stats->tx);
26765
26766 return 0;
26767 }
26768 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
26769 index f8f41e0..1f987dd 100644
26770 --- a/drivers/atm/ambassador.c
26771 +++ b/drivers/atm/ambassador.c
26772 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
26773 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
26774
26775 // VC layer stats
26776 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26777 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26778
26779 // free the descriptor
26780 kfree (tx_descr);
26781 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
26782 dump_skb ("<<<", vc, skb);
26783
26784 // VC layer stats
26785 - atomic_inc(&atm_vcc->stats->rx);
26786 + atomic_inc_unchecked(&atm_vcc->stats->rx);
26787 __net_timestamp(skb);
26788 // end of our responsibility
26789 atm_vcc->push (atm_vcc, skb);
26790 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
26791 } else {
26792 PRINTK (KERN_INFO, "dropped over-size frame");
26793 // should we count this?
26794 - atomic_inc(&atm_vcc->stats->rx_drop);
26795 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26796 }
26797
26798 } else {
26799 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
26800 }
26801
26802 if (check_area (skb->data, skb->len)) {
26803 - atomic_inc(&atm_vcc->stats->tx_err);
26804 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
26805 return -ENOMEM; // ?
26806 }
26807
26808 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
26809 index b22d71c..d6e1049 100644
26810 --- a/drivers/atm/atmtcp.c
26811 +++ b/drivers/atm/atmtcp.c
26812 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
26813 if (vcc->pop) vcc->pop(vcc,skb);
26814 else dev_kfree_skb(skb);
26815 if (dev_data) return 0;
26816 - atomic_inc(&vcc->stats->tx_err);
26817 + atomic_inc_unchecked(&vcc->stats->tx_err);
26818 return -ENOLINK;
26819 }
26820 size = skb->len+sizeof(struct atmtcp_hdr);
26821 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
26822 if (!new_skb) {
26823 if (vcc->pop) vcc->pop(vcc,skb);
26824 else dev_kfree_skb(skb);
26825 - atomic_inc(&vcc->stats->tx_err);
26826 + atomic_inc_unchecked(&vcc->stats->tx_err);
26827 return -ENOBUFS;
26828 }
26829 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
26830 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
26831 if (vcc->pop) vcc->pop(vcc,skb);
26832 else dev_kfree_skb(skb);
26833 out_vcc->push(out_vcc,new_skb);
26834 - atomic_inc(&vcc->stats->tx);
26835 - atomic_inc(&out_vcc->stats->rx);
26836 + atomic_inc_unchecked(&vcc->stats->tx);
26837 + atomic_inc_unchecked(&out_vcc->stats->rx);
26838 return 0;
26839 }
26840
26841 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
26842 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
26843 read_unlock(&vcc_sklist_lock);
26844 if (!out_vcc) {
26845 - atomic_inc(&vcc->stats->tx_err);
26846 + atomic_inc_unchecked(&vcc->stats->tx_err);
26847 goto done;
26848 }
26849 skb_pull(skb,sizeof(struct atmtcp_hdr));
26850 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
26851 __net_timestamp(new_skb);
26852 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
26853 out_vcc->push(out_vcc,new_skb);
26854 - atomic_inc(&vcc->stats->tx);
26855 - atomic_inc(&out_vcc->stats->rx);
26856 + atomic_inc_unchecked(&vcc->stats->tx);
26857 + atomic_inc_unchecked(&out_vcc->stats->rx);
26858 done:
26859 if (vcc->pop) vcc->pop(vcc,skb);
26860 else dev_kfree_skb(skb);
26861 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
26862 index 956e9ac..133516d 100644
26863 --- a/drivers/atm/eni.c
26864 +++ b/drivers/atm/eni.c
26865 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
26866 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
26867 vcc->dev->number);
26868 length = 0;
26869 - atomic_inc(&vcc->stats->rx_err);
26870 + atomic_inc_unchecked(&vcc->stats->rx_err);
26871 }
26872 else {
26873 length = ATM_CELL_SIZE-1; /* no HEC */
26874 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26875 size);
26876 }
26877 eff = length = 0;
26878 - atomic_inc(&vcc->stats->rx_err);
26879 + atomic_inc_unchecked(&vcc->stats->rx_err);
26880 }
26881 else {
26882 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
26883 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26884 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
26885 vcc->dev->number,vcc->vci,length,size << 2,descr);
26886 length = eff = 0;
26887 - atomic_inc(&vcc->stats->rx_err);
26888 + atomic_inc_unchecked(&vcc->stats->rx_err);
26889 }
26890 }
26891 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
26892 @@ -771,7 +771,7 @@ rx_dequeued++;
26893 vcc->push(vcc,skb);
26894 pushed++;
26895 }
26896 - atomic_inc(&vcc->stats->rx);
26897 + atomic_inc_unchecked(&vcc->stats->rx);
26898 }
26899 wake_up(&eni_dev->rx_wait);
26900 }
26901 @@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
26902 PCI_DMA_TODEVICE);
26903 if (vcc->pop) vcc->pop(vcc,skb);
26904 else dev_kfree_skb_irq(skb);
26905 - atomic_inc(&vcc->stats->tx);
26906 + atomic_inc_unchecked(&vcc->stats->tx);
26907 wake_up(&eni_dev->tx_wait);
26908 dma_complete++;
26909 }
26910 @@ -1569,7 +1569,7 @@ tx_complete++;
26911 /*--------------------------------- entries ---------------------------------*/
26912
26913
26914 -static const char *media_name[] __devinitdata = {
26915 +static const char *media_name[] __devinitconst = {
26916 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
26917 "UTP", "05?", "06?", "07?", /* 4- 7 */
26918 "TAXI","09?", "10?", "11?", /* 8-11 */
26919 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
26920 index 5072f8a..fa52520d 100644
26921 --- a/drivers/atm/firestream.c
26922 +++ b/drivers/atm/firestream.c
26923 @@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
26924 }
26925 }
26926
26927 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26928 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26929
26930 fs_dprintk (FS_DEBUG_TXMEM, "i");
26931 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
26932 @@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
26933 #endif
26934 skb_put (skb, qe->p1 & 0xffff);
26935 ATM_SKB(skb)->vcc = atm_vcc;
26936 - atomic_inc(&atm_vcc->stats->rx);
26937 + atomic_inc_unchecked(&atm_vcc->stats->rx);
26938 __net_timestamp(skb);
26939 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
26940 atm_vcc->push (atm_vcc, skb);
26941 @@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
26942 kfree (pe);
26943 }
26944 if (atm_vcc)
26945 - atomic_inc(&atm_vcc->stats->rx_drop);
26946 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26947 break;
26948 case 0x1f: /* Reassembly abort: no buffers. */
26949 /* Silently increment error counter. */
26950 if (atm_vcc)
26951 - atomic_inc(&atm_vcc->stats->rx_drop);
26952 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26953 break;
26954 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
26955 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
26956 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
26957 index 361f5ae..7fc552d 100644
26958 --- a/drivers/atm/fore200e.c
26959 +++ b/drivers/atm/fore200e.c
26960 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
26961 #endif
26962 /* check error condition */
26963 if (*entry->status & STATUS_ERROR)
26964 - atomic_inc(&vcc->stats->tx_err);
26965 + atomic_inc_unchecked(&vcc->stats->tx_err);
26966 else
26967 - atomic_inc(&vcc->stats->tx);
26968 + atomic_inc_unchecked(&vcc->stats->tx);
26969 }
26970 }
26971
26972 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
26973 if (skb == NULL) {
26974 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
26975
26976 - atomic_inc(&vcc->stats->rx_drop);
26977 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26978 return -ENOMEM;
26979 }
26980
26981 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
26982
26983 dev_kfree_skb_any(skb);
26984
26985 - atomic_inc(&vcc->stats->rx_drop);
26986 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26987 return -ENOMEM;
26988 }
26989
26990 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26991
26992 vcc->push(vcc, skb);
26993 - atomic_inc(&vcc->stats->rx);
26994 + atomic_inc_unchecked(&vcc->stats->rx);
26995
26996 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26997
26998 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
26999 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
27000 fore200e->atm_dev->number,
27001 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
27002 - atomic_inc(&vcc->stats->rx_err);
27003 + atomic_inc_unchecked(&vcc->stats->rx_err);
27004 }
27005 }
27006
27007 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
27008 goto retry_here;
27009 }
27010
27011 - atomic_inc(&vcc->stats->tx_err);
27012 + atomic_inc_unchecked(&vcc->stats->tx_err);
27013
27014 fore200e->tx_sat++;
27015 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
27016 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
27017 index 9a51df4..f3bb5f8 100644
27018 --- a/drivers/atm/he.c
27019 +++ b/drivers/atm/he.c
27020 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27021
27022 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
27023 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
27024 - atomic_inc(&vcc->stats->rx_drop);
27025 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27026 goto return_host_buffers;
27027 }
27028
27029 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27030 RBRQ_LEN_ERR(he_dev->rbrq_head)
27031 ? "LEN_ERR" : "",
27032 vcc->vpi, vcc->vci);
27033 - atomic_inc(&vcc->stats->rx_err);
27034 + atomic_inc_unchecked(&vcc->stats->rx_err);
27035 goto return_host_buffers;
27036 }
27037
27038 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27039 vcc->push(vcc, skb);
27040 spin_lock(&he_dev->global_lock);
27041
27042 - atomic_inc(&vcc->stats->rx);
27043 + atomic_inc_unchecked(&vcc->stats->rx);
27044
27045 return_host_buffers:
27046 ++pdus_assembled;
27047 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
27048 tpd->vcc->pop(tpd->vcc, tpd->skb);
27049 else
27050 dev_kfree_skb_any(tpd->skb);
27051 - atomic_inc(&tpd->vcc->stats->tx_err);
27052 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
27053 }
27054 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
27055 return;
27056 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27057 vcc->pop(vcc, skb);
27058 else
27059 dev_kfree_skb_any(skb);
27060 - atomic_inc(&vcc->stats->tx_err);
27061 + atomic_inc_unchecked(&vcc->stats->tx_err);
27062 return -EINVAL;
27063 }
27064
27065 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27066 vcc->pop(vcc, skb);
27067 else
27068 dev_kfree_skb_any(skb);
27069 - atomic_inc(&vcc->stats->tx_err);
27070 + atomic_inc_unchecked(&vcc->stats->tx_err);
27071 return -EINVAL;
27072 }
27073 #endif
27074 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27075 vcc->pop(vcc, skb);
27076 else
27077 dev_kfree_skb_any(skb);
27078 - atomic_inc(&vcc->stats->tx_err);
27079 + atomic_inc_unchecked(&vcc->stats->tx_err);
27080 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27081 return -ENOMEM;
27082 }
27083 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27084 vcc->pop(vcc, skb);
27085 else
27086 dev_kfree_skb_any(skb);
27087 - atomic_inc(&vcc->stats->tx_err);
27088 + atomic_inc_unchecked(&vcc->stats->tx_err);
27089 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27090 return -ENOMEM;
27091 }
27092 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27093 __enqueue_tpd(he_dev, tpd, cid);
27094 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27095
27096 - atomic_inc(&vcc->stats->tx);
27097 + atomic_inc_unchecked(&vcc->stats->tx);
27098
27099 return 0;
27100 }
27101 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
27102 index b812103..e391a49 100644
27103 --- a/drivers/atm/horizon.c
27104 +++ b/drivers/atm/horizon.c
27105 @@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
27106 {
27107 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
27108 // VC layer stats
27109 - atomic_inc(&vcc->stats->rx);
27110 + atomic_inc_unchecked(&vcc->stats->rx);
27111 __net_timestamp(skb);
27112 // end of our responsibility
27113 vcc->push (vcc, skb);
27114 @@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
27115 dev->tx_iovec = NULL;
27116
27117 // VC layer stats
27118 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27119 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27120
27121 // free the skb
27122 hrz_kfree_skb (skb);
27123 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
27124 index 1c05212..c28e200 100644
27125 --- a/drivers/atm/idt77252.c
27126 +++ b/drivers/atm/idt77252.c
27127 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
27128 else
27129 dev_kfree_skb(skb);
27130
27131 - atomic_inc(&vcc->stats->tx);
27132 + atomic_inc_unchecked(&vcc->stats->tx);
27133 }
27134
27135 atomic_dec(&scq->used);
27136 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27137 if ((sb = dev_alloc_skb(64)) == NULL) {
27138 printk("%s: Can't allocate buffers for aal0.\n",
27139 card->name);
27140 - atomic_add(i, &vcc->stats->rx_drop);
27141 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
27142 break;
27143 }
27144 if (!atm_charge(vcc, sb->truesize)) {
27145 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
27146 card->name);
27147 - atomic_add(i - 1, &vcc->stats->rx_drop);
27148 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
27149 dev_kfree_skb(sb);
27150 break;
27151 }
27152 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27153 ATM_SKB(sb)->vcc = vcc;
27154 __net_timestamp(sb);
27155 vcc->push(vcc, sb);
27156 - atomic_inc(&vcc->stats->rx);
27157 + atomic_inc_unchecked(&vcc->stats->rx);
27158
27159 cell += ATM_CELL_PAYLOAD;
27160 }
27161 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27162 "(CDC: %08x)\n",
27163 card->name, len, rpp->len, readl(SAR_REG_CDC));
27164 recycle_rx_pool_skb(card, rpp);
27165 - atomic_inc(&vcc->stats->rx_err);
27166 + atomic_inc_unchecked(&vcc->stats->rx_err);
27167 return;
27168 }
27169 if (stat & SAR_RSQE_CRC) {
27170 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
27171 recycle_rx_pool_skb(card, rpp);
27172 - atomic_inc(&vcc->stats->rx_err);
27173 + atomic_inc_unchecked(&vcc->stats->rx_err);
27174 return;
27175 }
27176 if (skb_queue_len(&rpp->queue) > 1) {
27177 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27178 RXPRINTK("%s: Can't alloc RX skb.\n",
27179 card->name);
27180 recycle_rx_pool_skb(card, rpp);
27181 - atomic_inc(&vcc->stats->rx_err);
27182 + atomic_inc_unchecked(&vcc->stats->rx_err);
27183 return;
27184 }
27185 if (!atm_charge(vcc, skb->truesize)) {
27186 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27187 __net_timestamp(skb);
27188
27189 vcc->push(vcc, skb);
27190 - atomic_inc(&vcc->stats->rx);
27191 + atomic_inc_unchecked(&vcc->stats->rx);
27192
27193 return;
27194 }
27195 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27196 __net_timestamp(skb);
27197
27198 vcc->push(vcc, skb);
27199 - atomic_inc(&vcc->stats->rx);
27200 + atomic_inc_unchecked(&vcc->stats->rx);
27201
27202 if (skb->truesize > SAR_FB_SIZE_3)
27203 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
27204 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
27205 if (vcc->qos.aal != ATM_AAL0) {
27206 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
27207 card->name, vpi, vci);
27208 - atomic_inc(&vcc->stats->rx_drop);
27209 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27210 goto drop;
27211 }
27212
27213 if ((sb = dev_alloc_skb(64)) == NULL) {
27214 printk("%s: Can't allocate buffers for AAL0.\n",
27215 card->name);
27216 - atomic_inc(&vcc->stats->rx_err);
27217 + atomic_inc_unchecked(&vcc->stats->rx_err);
27218 goto drop;
27219 }
27220
27221 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
27222 ATM_SKB(sb)->vcc = vcc;
27223 __net_timestamp(sb);
27224 vcc->push(vcc, sb);
27225 - atomic_inc(&vcc->stats->rx);
27226 + atomic_inc_unchecked(&vcc->stats->rx);
27227
27228 drop:
27229 skb_pull(queue, 64);
27230 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
27231
27232 if (vc == NULL) {
27233 printk("%s: NULL connection in send().\n", card->name);
27234 - atomic_inc(&vcc->stats->tx_err);
27235 + atomic_inc_unchecked(&vcc->stats->tx_err);
27236 dev_kfree_skb(skb);
27237 return -EINVAL;
27238 }
27239 if (!test_bit(VCF_TX, &vc->flags)) {
27240 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
27241 - atomic_inc(&vcc->stats->tx_err);
27242 + atomic_inc_unchecked(&vcc->stats->tx_err);
27243 dev_kfree_skb(skb);
27244 return -EINVAL;
27245 }
27246 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
27247 break;
27248 default:
27249 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
27250 - atomic_inc(&vcc->stats->tx_err);
27251 + atomic_inc_unchecked(&vcc->stats->tx_err);
27252 dev_kfree_skb(skb);
27253 return -EINVAL;
27254 }
27255
27256 if (skb_shinfo(skb)->nr_frags != 0) {
27257 printk("%s: No scatter-gather yet.\n", card->name);
27258 - atomic_inc(&vcc->stats->tx_err);
27259 + atomic_inc_unchecked(&vcc->stats->tx_err);
27260 dev_kfree_skb(skb);
27261 return -EINVAL;
27262 }
27263 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
27264
27265 err = queue_skb(card, vc, skb, oam);
27266 if (err) {
27267 - atomic_inc(&vcc->stats->tx_err);
27268 + atomic_inc_unchecked(&vcc->stats->tx_err);
27269 dev_kfree_skb(skb);
27270 return err;
27271 }
27272 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
27273 skb = dev_alloc_skb(64);
27274 if (!skb) {
27275 printk("%s: Out of memory in send_oam().\n", card->name);
27276 - atomic_inc(&vcc->stats->tx_err);
27277 + atomic_inc_unchecked(&vcc->stats->tx_err);
27278 return -ENOMEM;
27279 }
27280 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
27281 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
27282 index 3d0c2b0..45441fa 100644
27283 --- a/drivers/atm/iphase.c
27284 +++ b/drivers/atm/iphase.c
27285 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
27286 status = (u_short) (buf_desc_ptr->desc_mode);
27287 if (status & (RX_CER | RX_PTE | RX_OFL))
27288 {
27289 - atomic_inc(&vcc->stats->rx_err);
27290 + atomic_inc_unchecked(&vcc->stats->rx_err);
27291 IF_ERR(printk("IA: bad packet, dropping it");)
27292 if (status & RX_CER) {
27293 IF_ERR(printk(" cause: packet CRC error\n");)
27294 @@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
27295 len = dma_addr - buf_addr;
27296 if (len > iadev->rx_buf_sz) {
27297 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
27298 - atomic_inc(&vcc->stats->rx_err);
27299 + atomic_inc_unchecked(&vcc->stats->rx_err);
27300 goto out_free_desc;
27301 }
27302
27303 @@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
27304 ia_vcc = INPH_IA_VCC(vcc);
27305 if (ia_vcc == NULL)
27306 {
27307 - atomic_inc(&vcc->stats->rx_err);
27308 + atomic_inc_unchecked(&vcc->stats->rx_err);
27309 dev_kfree_skb_any(skb);
27310 atm_return(vcc, atm_guess_pdu2truesize(len));
27311 goto INCR_DLE;
27312 @@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
27313 if ((length > iadev->rx_buf_sz) || (length >
27314 (skb->len - sizeof(struct cpcs_trailer))))
27315 {
27316 - atomic_inc(&vcc->stats->rx_err);
27317 + atomic_inc_unchecked(&vcc->stats->rx_err);
27318 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
27319 length, skb->len);)
27320 dev_kfree_skb_any(skb);
27321 @@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
27322
27323 IF_RX(printk("rx_dle_intr: skb push");)
27324 vcc->push(vcc,skb);
27325 - atomic_inc(&vcc->stats->rx);
27326 + atomic_inc_unchecked(&vcc->stats->rx);
27327 iadev->rx_pkt_cnt++;
27328 }
27329 INCR_DLE:
27330 @@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
27331 {
27332 struct k_sonet_stats *stats;
27333 stats = &PRIV(_ia_dev[board])->sonet_stats;
27334 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
27335 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
27336 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
27337 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
27338 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
27339 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
27340 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
27341 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
27342 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
27343 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
27344 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
27345 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
27346 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
27347 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
27348 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
27349 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
27350 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
27351 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
27352 }
27353 ia_cmds.status = 0;
27354 break;
27355 @@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
27356 if ((desc == 0) || (desc > iadev->num_tx_desc))
27357 {
27358 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
27359 - atomic_inc(&vcc->stats->tx);
27360 + atomic_inc_unchecked(&vcc->stats->tx);
27361 if (vcc->pop)
27362 vcc->pop(vcc, skb);
27363 else
27364 @@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
27365 ATM_DESC(skb) = vcc->vci;
27366 skb_queue_tail(&iadev->tx_dma_q, skb);
27367
27368 - atomic_inc(&vcc->stats->tx);
27369 + atomic_inc_unchecked(&vcc->stats->tx);
27370 iadev->tx_pkt_cnt++;
27371 /* Increment transaction counter */
27372 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
27373
27374 #if 0
27375 /* add flow control logic */
27376 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
27377 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
27378 if (iavcc->vc_desc_cnt > 10) {
27379 vcc->tx_quota = vcc->tx_quota * 3 / 4;
27380 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
27381 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
27382 index f556969..0da15eb 100644
27383 --- a/drivers/atm/lanai.c
27384 +++ b/drivers/atm/lanai.c
27385 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
27386 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
27387 lanai_endtx(lanai, lvcc);
27388 lanai_free_skb(lvcc->tx.atmvcc, skb);
27389 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
27390 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
27391 }
27392
27393 /* Try to fill the buffer - don't call unless there is backlog */
27394 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
27395 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
27396 __net_timestamp(skb);
27397 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
27398 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
27399 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
27400 out:
27401 lvcc->rx.buf.ptr = end;
27402 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
27403 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
27404 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
27405 "vcc %d\n", lanai->number, (unsigned int) s, vci);
27406 lanai->stats.service_rxnotaal5++;
27407 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
27408 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
27409 return 0;
27410 }
27411 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
27412 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
27413 int bytes;
27414 read_unlock(&vcc_sklist_lock);
27415 DPRINTK("got trashed rx pdu on vci %d\n", vci);
27416 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
27417 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
27418 lvcc->stats.x.aal5.service_trash++;
27419 bytes = (SERVICE_GET_END(s) * 16) -
27420 (((unsigned long) lvcc->rx.buf.ptr) -
27421 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
27422 }
27423 if (s & SERVICE_STREAM) {
27424 read_unlock(&vcc_sklist_lock);
27425 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
27426 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
27427 lvcc->stats.x.aal5.service_stream++;
27428 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
27429 "PDU on VCI %d!\n", lanai->number, vci);
27430 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
27431 return 0;
27432 }
27433 DPRINTK("got rx crc error on vci %d\n", vci);
27434 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
27435 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
27436 lvcc->stats.x.aal5.service_rxcrc++;
27437 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
27438 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
27439 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
27440 index 1c70c45..300718d 100644
27441 --- a/drivers/atm/nicstar.c
27442 +++ b/drivers/atm/nicstar.c
27443 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
27444 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
27445 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
27446 card->index);
27447 - atomic_inc(&vcc->stats->tx_err);
27448 + atomic_inc_unchecked(&vcc->stats->tx_err);
27449 dev_kfree_skb_any(skb);
27450 return -EINVAL;
27451 }
27452 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
27453 if (!vc->tx) {
27454 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
27455 card->index);
27456 - atomic_inc(&vcc->stats->tx_err);
27457 + atomic_inc_unchecked(&vcc->stats->tx_err);
27458 dev_kfree_skb_any(skb);
27459 return -EINVAL;
27460 }
27461 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
27462 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
27463 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
27464 card->index);
27465 - atomic_inc(&vcc->stats->tx_err);
27466 + atomic_inc_unchecked(&vcc->stats->tx_err);
27467 dev_kfree_skb_any(skb);
27468 return -EINVAL;
27469 }
27470
27471 if (skb_shinfo(skb)->nr_frags != 0) {
27472 printk("nicstar%d: No scatter-gather yet.\n", card->index);
27473 - atomic_inc(&vcc->stats->tx_err);
27474 + atomic_inc_unchecked(&vcc->stats->tx_err);
27475 dev_kfree_skb_any(skb);
27476 return -EINVAL;
27477 }
27478 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
27479 }
27480
27481 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
27482 - atomic_inc(&vcc->stats->tx_err);
27483 + atomic_inc_unchecked(&vcc->stats->tx_err);
27484 dev_kfree_skb_any(skb);
27485 return -EIO;
27486 }
27487 - atomic_inc(&vcc->stats->tx);
27488 + atomic_inc_unchecked(&vcc->stats->tx);
27489
27490 return 0;
27491 }
27492 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27493 printk
27494 ("nicstar%d: Can't allocate buffers for aal0.\n",
27495 card->index);
27496 - atomic_add(i, &vcc->stats->rx_drop);
27497 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
27498 break;
27499 }
27500 if (!atm_charge(vcc, sb->truesize)) {
27501 RXPRINTK
27502 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
27503 card->index);
27504 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
27505 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
27506 dev_kfree_skb_any(sb);
27507 break;
27508 }
27509 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27510 ATM_SKB(sb)->vcc = vcc;
27511 __net_timestamp(sb);
27512 vcc->push(vcc, sb);
27513 - atomic_inc(&vcc->stats->rx);
27514 + atomic_inc_unchecked(&vcc->stats->rx);
27515 cell += ATM_CELL_PAYLOAD;
27516 }
27517
27518 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27519 if (iovb == NULL) {
27520 printk("nicstar%d: Out of iovec buffers.\n",
27521 card->index);
27522 - atomic_inc(&vcc->stats->rx_drop);
27523 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27524 recycle_rx_buf(card, skb);
27525 return;
27526 }
27527 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27528 small or large buffer itself. */
27529 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
27530 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
27531 - atomic_inc(&vcc->stats->rx_err);
27532 + atomic_inc_unchecked(&vcc->stats->rx_err);
27533 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
27534 NS_MAX_IOVECS);
27535 NS_PRV_IOVCNT(iovb) = 0;
27536 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27537 ("nicstar%d: Expected a small buffer, and this is not one.\n",
27538 card->index);
27539 which_list(card, skb);
27540 - atomic_inc(&vcc->stats->rx_err);
27541 + atomic_inc_unchecked(&vcc->stats->rx_err);
27542 recycle_rx_buf(card, skb);
27543 vc->rx_iov = NULL;
27544 recycle_iov_buf(card, iovb);
27545 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27546 ("nicstar%d: Expected a large buffer, and this is not one.\n",
27547 card->index);
27548 which_list(card, skb);
27549 - atomic_inc(&vcc->stats->rx_err);
27550 + atomic_inc_unchecked(&vcc->stats->rx_err);
27551 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
27552 NS_PRV_IOVCNT(iovb));
27553 vc->rx_iov = NULL;
27554 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27555 printk(" - PDU size mismatch.\n");
27556 else
27557 printk(".\n");
27558 - atomic_inc(&vcc->stats->rx_err);
27559 + atomic_inc_unchecked(&vcc->stats->rx_err);
27560 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
27561 NS_PRV_IOVCNT(iovb));
27562 vc->rx_iov = NULL;
27563 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27564 /* skb points to a small buffer */
27565 if (!atm_charge(vcc, skb->truesize)) {
27566 push_rxbufs(card, skb);
27567 - atomic_inc(&vcc->stats->rx_drop);
27568 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27569 } else {
27570 skb_put(skb, len);
27571 dequeue_sm_buf(card, skb);
27572 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27573 ATM_SKB(skb)->vcc = vcc;
27574 __net_timestamp(skb);
27575 vcc->push(vcc, skb);
27576 - atomic_inc(&vcc->stats->rx);
27577 + atomic_inc_unchecked(&vcc->stats->rx);
27578 }
27579 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
27580 struct sk_buff *sb;
27581 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27582 if (len <= NS_SMBUFSIZE) {
27583 if (!atm_charge(vcc, sb->truesize)) {
27584 push_rxbufs(card, sb);
27585 - atomic_inc(&vcc->stats->rx_drop);
27586 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27587 } else {
27588 skb_put(sb, len);
27589 dequeue_sm_buf(card, sb);
27590 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27591 ATM_SKB(sb)->vcc = vcc;
27592 __net_timestamp(sb);
27593 vcc->push(vcc, sb);
27594 - atomic_inc(&vcc->stats->rx);
27595 + atomic_inc_unchecked(&vcc->stats->rx);
27596 }
27597
27598 push_rxbufs(card, skb);
27599 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27600
27601 if (!atm_charge(vcc, skb->truesize)) {
27602 push_rxbufs(card, skb);
27603 - atomic_inc(&vcc->stats->rx_drop);
27604 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27605 } else {
27606 dequeue_lg_buf(card, skb);
27607 #ifdef NS_USE_DESTRUCTORS
27608 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27609 ATM_SKB(skb)->vcc = vcc;
27610 __net_timestamp(skb);
27611 vcc->push(vcc, skb);
27612 - atomic_inc(&vcc->stats->rx);
27613 + atomic_inc_unchecked(&vcc->stats->rx);
27614 }
27615
27616 push_rxbufs(card, sb);
27617 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27618 printk
27619 ("nicstar%d: Out of huge buffers.\n",
27620 card->index);
27621 - atomic_inc(&vcc->stats->rx_drop);
27622 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27623 recycle_iovec_rx_bufs(card,
27624 (struct iovec *)
27625 iovb->data,
27626 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27627 card->hbpool.count++;
27628 } else
27629 dev_kfree_skb_any(hb);
27630 - atomic_inc(&vcc->stats->rx_drop);
27631 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27632 } else {
27633 /* Copy the small buffer to the huge buffer */
27634 sb = (struct sk_buff *)iov->iov_base;
27635 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27636 #endif /* NS_USE_DESTRUCTORS */
27637 __net_timestamp(hb);
27638 vcc->push(vcc, hb);
27639 - atomic_inc(&vcc->stats->rx);
27640 + atomic_inc_unchecked(&vcc->stats->rx);
27641 }
27642 }
27643
27644 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
27645 index 5d1d076..12fbca4 100644
27646 --- a/drivers/atm/solos-pci.c
27647 +++ b/drivers/atm/solos-pci.c
27648 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
27649 }
27650 atm_charge(vcc, skb->truesize);
27651 vcc->push(vcc, skb);
27652 - atomic_inc(&vcc->stats->rx);
27653 + atomic_inc_unchecked(&vcc->stats->rx);
27654 break;
27655
27656 case PKT_STATUS:
27657 @@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
27658 vcc = SKB_CB(oldskb)->vcc;
27659
27660 if (vcc) {
27661 - atomic_inc(&vcc->stats->tx);
27662 + atomic_inc_unchecked(&vcc->stats->tx);
27663 solos_pop(vcc, oldskb);
27664 } else
27665 dev_kfree_skb_irq(oldskb);
27666 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
27667 index 90f1ccc..04c4a1e 100644
27668 --- a/drivers/atm/suni.c
27669 +++ b/drivers/atm/suni.c
27670 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
27671
27672
27673 #define ADD_LIMITED(s,v) \
27674 - atomic_add((v),&stats->s); \
27675 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
27676 + atomic_add_unchecked((v),&stats->s); \
27677 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
27678
27679
27680 static void suni_hz(unsigned long from_timer)
27681 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
27682 index 5120a96..e2572bd 100644
27683 --- a/drivers/atm/uPD98402.c
27684 +++ b/drivers/atm/uPD98402.c
27685 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
27686 struct sonet_stats tmp;
27687 int error = 0;
27688
27689 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
27690 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
27691 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
27692 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
27693 if (zero && !error) {
27694 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
27695
27696
27697 #define ADD_LIMITED(s,v) \
27698 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
27699 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
27700 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
27701 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
27702 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
27703 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
27704
27705
27706 static void stat_event(struct atm_dev *dev)
27707 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
27708 if (reason & uPD98402_INT_PFM) stat_event(dev);
27709 if (reason & uPD98402_INT_PCO) {
27710 (void) GET(PCOCR); /* clear interrupt cause */
27711 - atomic_add(GET(HECCT),
27712 + atomic_add_unchecked(GET(HECCT),
27713 &PRIV(dev)->sonet_stats.uncorr_hcs);
27714 }
27715 if ((reason & uPD98402_INT_RFO) &&
27716 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
27717 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
27718 uPD98402_INT_LOS),PIMR); /* enable them */
27719 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
27720 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
27721 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
27722 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
27723 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
27724 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
27725 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
27726 return 0;
27727 }
27728
27729 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
27730 index d889f56..17eb71e 100644
27731 --- a/drivers/atm/zatm.c
27732 +++ b/drivers/atm/zatm.c
27733 @@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
27734 }
27735 if (!size) {
27736 dev_kfree_skb_irq(skb);
27737 - if (vcc) atomic_inc(&vcc->stats->rx_err);
27738 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
27739 continue;
27740 }
27741 if (!atm_charge(vcc,skb->truesize)) {
27742 @@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
27743 skb->len = size;
27744 ATM_SKB(skb)->vcc = vcc;
27745 vcc->push(vcc,skb);
27746 - atomic_inc(&vcc->stats->rx);
27747 + atomic_inc_unchecked(&vcc->stats->rx);
27748 }
27749 zout(pos & 0xffff,MTA(mbx));
27750 #if 0 /* probably a stupid idea */
27751 @@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
27752 skb_queue_head(&zatm_vcc->backlog,skb);
27753 break;
27754 }
27755 - atomic_inc(&vcc->stats->tx);
27756 + atomic_inc_unchecked(&vcc->stats->tx);
27757 wake_up(&zatm_vcc->tx_wait);
27758 }
27759
27760 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
27761 index a4760e0..51283cf 100644
27762 --- a/drivers/base/devtmpfs.c
27763 +++ b/drivers/base/devtmpfs.c
27764 @@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
27765 if (!thread)
27766 return 0;
27767
27768 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
27769 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
27770 if (err)
27771 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
27772 else
27773 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
27774 index caf995f..6f76697 100644
27775 --- a/drivers/base/power/wakeup.c
27776 +++ b/drivers/base/power/wakeup.c
27777 @@ -30,14 +30,14 @@ bool events_check_enabled;
27778 * They need to be modified together atomically, so it's better to use one
27779 * atomic variable to hold them both.
27780 */
27781 -static atomic_t combined_event_count = ATOMIC_INIT(0);
27782 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
27783
27784 #define IN_PROGRESS_BITS (sizeof(int) * 4)
27785 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
27786
27787 static void split_counters(unsigned int *cnt, unsigned int *inpr)
27788 {
27789 - unsigned int comb = atomic_read(&combined_event_count);
27790 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
27791
27792 *cnt = (comb >> IN_PROGRESS_BITS);
27793 *inpr = comb & MAX_IN_PROGRESS;
27794 @@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
27795 ws->last_time = ktime_get();
27796
27797 /* Increment the counter of events in progress. */
27798 - atomic_inc(&combined_event_count);
27799 + atomic_inc_unchecked(&combined_event_count);
27800 }
27801
27802 /**
27803 @@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
27804 * Increment the counter of registered wakeup events and decrement the
27805 * couter of wakeup events in progress simultaneously.
27806 */
27807 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
27808 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
27809 }
27810
27811 /**
27812 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
27813 index b0f553b..77b928b 100644
27814 --- a/drivers/block/cciss.c
27815 +++ b/drivers/block/cciss.c
27816 @@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
27817 int err;
27818 u32 cp;
27819
27820 + memset(&arg64, 0, sizeof(arg64));
27821 +
27822 err = 0;
27823 err |=
27824 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
27825 @@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
27826 while (!list_empty(&h->reqQ)) {
27827 c = list_entry(h->reqQ.next, CommandList_struct, list);
27828 /* can't do anything if fifo is full */
27829 - if ((h->access.fifo_full(h))) {
27830 + if ((h->access->fifo_full(h))) {
27831 dev_warn(&h->pdev->dev, "fifo full\n");
27832 break;
27833 }
27834 @@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
27835 h->Qdepth--;
27836
27837 /* Tell the controller execute command */
27838 - h->access.submit_command(h, c);
27839 + h->access->submit_command(h, c);
27840
27841 /* Put job onto the completed Q */
27842 addQ(&h->cmpQ, c);
27843 @@ -3443,17 +3445,17 @@ startio:
27844
27845 static inline unsigned long get_next_completion(ctlr_info_t *h)
27846 {
27847 - return h->access.command_completed(h);
27848 + return h->access->command_completed(h);
27849 }
27850
27851 static inline int interrupt_pending(ctlr_info_t *h)
27852 {
27853 - return h->access.intr_pending(h);
27854 + return h->access->intr_pending(h);
27855 }
27856
27857 static inline long interrupt_not_for_us(ctlr_info_t *h)
27858 {
27859 - return ((h->access.intr_pending(h) == 0) ||
27860 + return ((h->access->intr_pending(h) == 0) ||
27861 (h->interrupts_enabled == 0));
27862 }
27863
27864 @@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
27865 u32 a;
27866
27867 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
27868 - return h->access.command_completed(h);
27869 + return h->access->command_completed(h);
27870
27871 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
27872 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
27873 @@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
27874 trans_support & CFGTBL_Trans_use_short_tags);
27875
27876 /* Change the access methods to the performant access methods */
27877 - h->access = SA5_performant_access;
27878 + h->access = &SA5_performant_access;
27879 h->transMethod = CFGTBL_Trans_Performant;
27880
27881 return;
27882 @@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
27883 if (prod_index < 0)
27884 return -ENODEV;
27885 h->product_name = products[prod_index].product_name;
27886 - h->access = *(products[prod_index].access);
27887 + h->access = products[prod_index].access;
27888
27889 if (cciss_board_disabled(h)) {
27890 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
27891 @@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
27892 }
27893
27894 /* make sure the board interrupts are off */
27895 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
27896 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
27897 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
27898 if (rc)
27899 goto clean2;
27900 @@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
27901 * fake ones to scoop up any residual completions.
27902 */
27903 spin_lock_irqsave(&h->lock, flags);
27904 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
27905 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
27906 spin_unlock_irqrestore(&h->lock, flags);
27907 free_irq(h->intr[h->intr_mode], h);
27908 rc = cciss_request_irq(h, cciss_msix_discard_completions,
27909 @@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
27910 dev_info(&h->pdev->dev, "Board READY.\n");
27911 dev_info(&h->pdev->dev,
27912 "Waiting for stale completions to drain.\n");
27913 - h->access.set_intr_mask(h, CCISS_INTR_ON);
27914 + h->access->set_intr_mask(h, CCISS_INTR_ON);
27915 msleep(10000);
27916 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
27917 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
27918
27919 rc = controller_reset_failed(h->cfgtable);
27920 if (rc)
27921 @@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
27922 cciss_scsi_setup(h);
27923
27924 /* Turn the interrupts on so we can service requests */
27925 - h->access.set_intr_mask(h, CCISS_INTR_ON);
27926 + h->access->set_intr_mask(h, CCISS_INTR_ON);
27927
27928 /* Get the firmware version */
27929 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
27930 @@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
27931 kfree(flush_buf);
27932 if (return_code != IO_OK)
27933 dev_warn(&h->pdev->dev, "Error flushing cache\n");
27934 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
27935 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
27936 free_irq(h->intr[h->intr_mode], h);
27937 }
27938
27939 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
27940 index 7fda30e..eb5dfe0 100644
27941 --- a/drivers/block/cciss.h
27942 +++ b/drivers/block/cciss.h
27943 @@ -101,7 +101,7 @@ struct ctlr_info
27944 /* information about each logical volume */
27945 drive_info_struct *drv[CISS_MAX_LUN];
27946
27947 - struct access_method access;
27948 + struct access_method *access;
27949
27950 /* queue and queue Info */
27951 struct list_head reqQ;
27952 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
27953 index 9125bbe..eede5c8 100644
27954 --- a/drivers/block/cpqarray.c
27955 +++ b/drivers/block/cpqarray.c
27956 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
27957 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
27958 goto Enomem4;
27959 }
27960 - hba[i]->access.set_intr_mask(hba[i], 0);
27961 + hba[i]->access->set_intr_mask(hba[i], 0);
27962 if (request_irq(hba[i]->intr, do_ida_intr,
27963 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
27964 {
27965 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
27966 add_timer(&hba[i]->timer);
27967
27968 /* Enable IRQ now that spinlock and rate limit timer are set up */
27969 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27970 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27971
27972 for(j=0; j<NWD; j++) {
27973 struct gendisk *disk = ida_gendisk[i][j];
27974 @@ -694,7 +694,7 @@ DBGINFO(
27975 for(i=0; i<NR_PRODUCTS; i++) {
27976 if (board_id == products[i].board_id) {
27977 c->product_name = products[i].product_name;
27978 - c->access = *(products[i].access);
27979 + c->access = products[i].access;
27980 break;
27981 }
27982 }
27983 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
27984 hba[ctlr]->intr = intr;
27985 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
27986 hba[ctlr]->product_name = products[j].product_name;
27987 - hba[ctlr]->access = *(products[j].access);
27988 + hba[ctlr]->access = products[j].access;
27989 hba[ctlr]->ctlr = ctlr;
27990 hba[ctlr]->board_id = board_id;
27991 hba[ctlr]->pci_dev = NULL; /* not PCI */
27992 @@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
27993
27994 while((c = h->reqQ) != NULL) {
27995 /* Can't do anything if we're busy */
27996 - if (h->access.fifo_full(h) == 0)
27997 + if (h->access->fifo_full(h) == 0)
27998 return;
27999
28000 /* Get the first entry from the request Q */
28001 @@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
28002 h->Qdepth--;
28003
28004 /* Tell the controller to do our bidding */
28005 - h->access.submit_command(h, c);
28006 + h->access->submit_command(h, c);
28007
28008 /* Get onto the completion Q */
28009 addQ(&h->cmpQ, c);
28010 @@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28011 unsigned long flags;
28012 __u32 a,a1;
28013
28014 - istat = h->access.intr_pending(h);
28015 + istat = h->access->intr_pending(h);
28016 /* Is this interrupt for us? */
28017 if (istat == 0)
28018 return IRQ_NONE;
28019 @@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28020 */
28021 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
28022 if (istat & FIFO_NOT_EMPTY) {
28023 - while((a = h->access.command_completed(h))) {
28024 + while((a = h->access->command_completed(h))) {
28025 a1 = a; a &= ~3;
28026 if ((c = h->cmpQ) == NULL)
28027 {
28028 @@ -1449,11 +1449,11 @@ static int sendcmd(
28029 /*
28030 * Disable interrupt
28031 */
28032 - info_p->access.set_intr_mask(info_p, 0);
28033 + info_p->access->set_intr_mask(info_p, 0);
28034 /* Make sure there is room in the command FIFO */
28035 /* Actually it should be completely empty at this time. */
28036 for (i = 200000; i > 0; i--) {
28037 - temp = info_p->access.fifo_full(info_p);
28038 + temp = info_p->access->fifo_full(info_p);
28039 if (temp != 0) {
28040 break;
28041 }
28042 @@ -1466,7 +1466,7 @@ DBG(
28043 /*
28044 * Send the cmd
28045 */
28046 - info_p->access.submit_command(info_p, c);
28047 + info_p->access->submit_command(info_p, c);
28048 complete = pollcomplete(ctlr);
28049
28050 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
28051 @@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
28052 * we check the new geometry. Then turn interrupts back on when
28053 * we're done.
28054 */
28055 - host->access.set_intr_mask(host, 0);
28056 + host->access->set_intr_mask(host, 0);
28057 getgeometry(ctlr);
28058 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
28059 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
28060
28061 for(i=0; i<NWD; i++) {
28062 struct gendisk *disk = ida_gendisk[ctlr][i];
28063 @@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
28064 /* Wait (up to 2 seconds) for a command to complete */
28065
28066 for (i = 200000; i > 0; i--) {
28067 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
28068 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
28069 if (done == 0) {
28070 udelay(10); /* a short fixed delay */
28071 } else
28072 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
28073 index be73e9d..7fbf140 100644
28074 --- a/drivers/block/cpqarray.h
28075 +++ b/drivers/block/cpqarray.h
28076 @@ -99,7 +99,7 @@ struct ctlr_info {
28077 drv_info_t drv[NWD];
28078 struct proc_dir_entry *proc;
28079
28080 - struct access_method access;
28081 + struct access_method *access;
28082
28083 cmdlist_t *reqQ;
28084 cmdlist_t *cmpQ;
28085 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
28086 index 9cf2035..bffca95 100644
28087 --- a/drivers/block/drbd/drbd_int.h
28088 +++ b/drivers/block/drbd/drbd_int.h
28089 @@ -736,7 +736,7 @@ struct drbd_request;
28090 struct drbd_epoch {
28091 struct list_head list;
28092 unsigned int barrier_nr;
28093 - atomic_t epoch_size; /* increased on every request added. */
28094 + atomic_unchecked_t epoch_size; /* increased on every request added. */
28095 atomic_t active; /* increased on every req. added, and dec on every finished. */
28096 unsigned long flags;
28097 };
28098 @@ -1108,7 +1108,7 @@ struct drbd_conf {
28099 void *int_dig_in;
28100 void *int_dig_vv;
28101 wait_queue_head_t seq_wait;
28102 - atomic_t packet_seq;
28103 + atomic_unchecked_t packet_seq;
28104 unsigned int peer_seq;
28105 spinlock_t peer_seq_lock;
28106 unsigned int minor;
28107 @@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
28108
28109 static inline void drbd_tcp_cork(struct socket *sock)
28110 {
28111 - int __user val = 1;
28112 + int val = 1;
28113 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
28114 - (char __user *)&val, sizeof(val));
28115 + (char __force_user *)&val, sizeof(val));
28116 }
28117
28118 static inline void drbd_tcp_uncork(struct socket *sock)
28119 {
28120 - int __user val = 0;
28121 + int val = 0;
28122 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
28123 - (char __user *)&val, sizeof(val));
28124 + (char __force_user *)&val, sizeof(val));
28125 }
28126
28127 static inline void drbd_tcp_nodelay(struct socket *sock)
28128 {
28129 - int __user val = 1;
28130 + int val = 1;
28131 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
28132 - (char __user *)&val, sizeof(val));
28133 + (char __force_user *)&val, sizeof(val));
28134 }
28135
28136 static inline void drbd_tcp_quickack(struct socket *sock)
28137 {
28138 - int __user val = 2;
28139 + int val = 2;
28140 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
28141 - (char __user *)&val, sizeof(val));
28142 + (char __force_user *)&val, sizeof(val));
28143 }
28144
28145 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
28146 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
28147 index 0358e55..bc33689 100644
28148 --- a/drivers/block/drbd/drbd_main.c
28149 +++ b/drivers/block/drbd/drbd_main.c
28150 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
28151 p.sector = sector;
28152 p.block_id = block_id;
28153 p.blksize = blksize;
28154 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
28155 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
28156
28157 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
28158 return false;
28159 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
28160 p.sector = cpu_to_be64(req->sector);
28161 p.block_id = (unsigned long)req;
28162 p.seq_num = cpu_to_be32(req->seq_num =
28163 - atomic_add_return(1, &mdev->packet_seq));
28164 + atomic_add_return_unchecked(1, &mdev->packet_seq));
28165
28166 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
28167
28168 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
28169 atomic_set(&mdev->unacked_cnt, 0);
28170 atomic_set(&mdev->local_cnt, 0);
28171 atomic_set(&mdev->net_cnt, 0);
28172 - atomic_set(&mdev->packet_seq, 0);
28173 + atomic_set_unchecked(&mdev->packet_seq, 0);
28174 atomic_set(&mdev->pp_in_use, 0);
28175 atomic_set(&mdev->pp_in_use_by_net, 0);
28176 atomic_set(&mdev->rs_sect_in, 0);
28177 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
28178 mdev->receiver.t_state);
28179
28180 /* no need to lock it, I'm the only thread alive */
28181 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
28182 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
28183 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
28184 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
28185 mdev->al_writ_cnt =
28186 mdev->bm_writ_cnt =
28187 mdev->read_cnt =
28188 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
28189 index af2a250..219c74b 100644
28190 --- a/drivers/block/drbd/drbd_nl.c
28191 +++ b/drivers/block/drbd/drbd_nl.c
28192 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
28193 module_put(THIS_MODULE);
28194 }
28195
28196 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
28197 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
28198
28199 static unsigned short *
28200 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
28201 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
28202 cn_reply->id.idx = CN_IDX_DRBD;
28203 cn_reply->id.val = CN_VAL_DRBD;
28204
28205 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
28206 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
28207 cn_reply->ack = 0; /* not used here. */
28208 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28209 (int)((char *)tl - (char *)reply->tag_list);
28210 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
28211 cn_reply->id.idx = CN_IDX_DRBD;
28212 cn_reply->id.val = CN_VAL_DRBD;
28213
28214 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
28215 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
28216 cn_reply->ack = 0; /* not used here. */
28217 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28218 (int)((char *)tl - (char *)reply->tag_list);
28219 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
28220 cn_reply->id.idx = CN_IDX_DRBD;
28221 cn_reply->id.val = CN_VAL_DRBD;
28222
28223 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
28224 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
28225 cn_reply->ack = 0; // not used here.
28226 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28227 (int)((char*)tl - (char*)reply->tag_list);
28228 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
28229 cn_reply->id.idx = CN_IDX_DRBD;
28230 cn_reply->id.val = CN_VAL_DRBD;
28231
28232 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
28233 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
28234 cn_reply->ack = 0; /* not used here. */
28235 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28236 (int)((char *)tl - (char *)reply->tag_list);
28237 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
28238 index 43beaca..4a5b1dd 100644
28239 --- a/drivers/block/drbd/drbd_receiver.c
28240 +++ b/drivers/block/drbd/drbd_receiver.c
28241 @@ -894,7 +894,7 @@ retry:
28242 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
28243 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
28244
28245 - atomic_set(&mdev->packet_seq, 0);
28246 + atomic_set_unchecked(&mdev->packet_seq, 0);
28247 mdev->peer_seq = 0;
28248
28249 drbd_thread_start(&mdev->asender);
28250 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
28251 do {
28252 next_epoch = NULL;
28253
28254 - epoch_size = atomic_read(&epoch->epoch_size);
28255 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
28256
28257 switch (ev & ~EV_CLEANUP) {
28258 case EV_PUT:
28259 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
28260 rv = FE_DESTROYED;
28261 } else {
28262 epoch->flags = 0;
28263 - atomic_set(&epoch->epoch_size, 0);
28264 + atomic_set_unchecked(&epoch->epoch_size, 0);
28265 /* atomic_set(&epoch->active, 0); is already zero */
28266 if (rv == FE_STILL_LIVE)
28267 rv = FE_RECYCLED;
28268 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
28269 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
28270 drbd_flush(mdev);
28271
28272 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
28273 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
28274 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
28275 if (epoch)
28276 break;
28277 }
28278
28279 epoch = mdev->current_epoch;
28280 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
28281 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
28282
28283 D_ASSERT(atomic_read(&epoch->active) == 0);
28284 D_ASSERT(epoch->flags == 0);
28285 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
28286 }
28287
28288 epoch->flags = 0;
28289 - atomic_set(&epoch->epoch_size, 0);
28290 + atomic_set_unchecked(&epoch->epoch_size, 0);
28291 atomic_set(&epoch->active, 0);
28292
28293 spin_lock(&mdev->epoch_lock);
28294 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
28295 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
28296 list_add(&epoch->list, &mdev->current_epoch->list);
28297 mdev->current_epoch = epoch;
28298 mdev->epochs++;
28299 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
28300 spin_unlock(&mdev->peer_seq_lock);
28301
28302 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
28303 - atomic_inc(&mdev->current_epoch->epoch_size);
28304 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
28305 return drbd_drain_block(mdev, data_size);
28306 }
28307
28308 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
28309
28310 spin_lock(&mdev->epoch_lock);
28311 e->epoch = mdev->current_epoch;
28312 - atomic_inc(&e->epoch->epoch_size);
28313 + atomic_inc_unchecked(&e->epoch->epoch_size);
28314 atomic_inc(&e->epoch->active);
28315 spin_unlock(&mdev->epoch_lock);
28316
28317 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
28318 D_ASSERT(list_empty(&mdev->done_ee));
28319
28320 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
28321 - atomic_set(&mdev->current_epoch->epoch_size, 0);
28322 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
28323 D_ASSERT(list_empty(&mdev->current_epoch->list));
28324 }
28325
28326 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
28327 index 1e888c9..05cf1b0 100644
28328 --- a/drivers/block/loop.c
28329 +++ b/drivers/block/loop.c
28330 @@ -227,7 +227,7 @@ static int __do_lo_send_write(struct file *file,
28331 mm_segment_t old_fs = get_fs();
28332
28333 set_fs(get_ds());
28334 - bw = file->f_op->write(file, buf, len, &pos);
28335 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
28336 set_fs(old_fs);
28337 if (likely(bw == len))
28338 return 0;
28339 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
28340 index 4364303..9adf4ee 100644
28341 --- a/drivers/char/Kconfig
28342 +++ b/drivers/char/Kconfig
28343 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
28344
28345 config DEVKMEM
28346 bool "/dev/kmem virtual device support"
28347 - default y
28348 + default n
28349 + depends on !GRKERNSEC_KMEM
28350 help
28351 Say Y here if you want to support the /dev/kmem device. The
28352 /dev/kmem device is rarely used, but can be used for certain
28353 @@ -596,6 +597,7 @@ config DEVPORT
28354 bool
28355 depends on !M68K
28356 depends on ISA || PCI
28357 + depends on !GRKERNSEC_KMEM
28358 default y
28359
28360 source "drivers/s390/char/Kconfig"
28361 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
28362 index 2e04433..22afc64 100644
28363 --- a/drivers/char/agp/frontend.c
28364 +++ b/drivers/char/agp/frontend.c
28365 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
28366 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
28367 return -EFAULT;
28368
28369 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
28370 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
28371 return -EFAULT;
28372
28373 client = agp_find_client_by_pid(reserve.pid);
28374 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
28375 index 095ab90..afad0a4 100644
28376 --- a/drivers/char/briq_panel.c
28377 +++ b/drivers/char/briq_panel.c
28378 @@ -9,6 +9,7 @@
28379 #include <linux/types.h>
28380 #include <linux/errno.h>
28381 #include <linux/tty.h>
28382 +#include <linux/mutex.h>
28383 #include <linux/timer.h>
28384 #include <linux/kernel.h>
28385 #include <linux/wait.h>
28386 @@ -34,6 +35,7 @@ static int vfd_is_open;
28387 static unsigned char vfd[40];
28388 static int vfd_cursor;
28389 static unsigned char ledpb, led;
28390 +static DEFINE_MUTEX(vfd_mutex);
28391
28392 static void update_vfd(void)
28393 {
28394 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
28395 if (!vfd_is_open)
28396 return -EBUSY;
28397
28398 + mutex_lock(&vfd_mutex);
28399 for (;;) {
28400 char c;
28401 if (!indx)
28402 break;
28403 - if (get_user(c, buf))
28404 + if (get_user(c, buf)) {
28405 + mutex_unlock(&vfd_mutex);
28406 return -EFAULT;
28407 + }
28408 if (esc) {
28409 set_led(c);
28410 esc = 0;
28411 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
28412 buf++;
28413 }
28414 update_vfd();
28415 + mutex_unlock(&vfd_mutex);
28416
28417 return len;
28418 }
28419 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
28420 index f773a9d..65cd683 100644
28421 --- a/drivers/char/genrtc.c
28422 +++ b/drivers/char/genrtc.c
28423 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
28424 switch (cmd) {
28425
28426 case RTC_PLL_GET:
28427 + memset(&pll, 0, sizeof(pll));
28428 if (get_rtc_pll(&pll))
28429 return -EINVAL;
28430 else
28431 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
28432 index 0833896..cccce52 100644
28433 --- a/drivers/char/hpet.c
28434 +++ b/drivers/char/hpet.c
28435 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
28436 }
28437
28438 static int
28439 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
28440 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
28441 struct hpet_info *info)
28442 {
28443 struct hpet_timer __iomem *timer;
28444 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
28445 index 58c0e63..46c16bf 100644
28446 --- a/drivers/char/ipmi/ipmi_msghandler.c
28447 +++ b/drivers/char/ipmi/ipmi_msghandler.c
28448 @@ -415,7 +415,7 @@ struct ipmi_smi {
28449 struct proc_dir_entry *proc_dir;
28450 char proc_dir_name[10];
28451
28452 - atomic_t stats[IPMI_NUM_STATS];
28453 + atomic_unchecked_t stats[IPMI_NUM_STATS];
28454
28455 /*
28456 * run_to_completion duplicate of smb_info, smi_info
28457 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
28458
28459
28460 #define ipmi_inc_stat(intf, stat) \
28461 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
28462 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
28463 #define ipmi_get_stat(intf, stat) \
28464 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
28465 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
28466
28467 static int is_lan_addr(struct ipmi_addr *addr)
28468 {
28469 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
28470 INIT_LIST_HEAD(&intf->cmd_rcvrs);
28471 init_waitqueue_head(&intf->waitq);
28472 for (i = 0; i < IPMI_NUM_STATS; i++)
28473 - atomic_set(&intf->stats[i], 0);
28474 + atomic_set_unchecked(&intf->stats[i], 0);
28475
28476 intf->proc_dir = NULL;
28477
28478 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
28479 index 9397ab4..d01bee1 100644
28480 --- a/drivers/char/ipmi/ipmi_si_intf.c
28481 +++ b/drivers/char/ipmi/ipmi_si_intf.c
28482 @@ -277,7 +277,7 @@ struct smi_info {
28483 unsigned char slave_addr;
28484
28485 /* Counters and things for the proc filesystem. */
28486 - atomic_t stats[SI_NUM_STATS];
28487 + atomic_unchecked_t stats[SI_NUM_STATS];
28488
28489 struct task_struct *thread;
28490
28491 @@ -286,9 +286,9 @@ struct smi_info {
28492 };
28493
28494 #define smi_inc_stat(smi, stat) \
28495 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
28496 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
28497 #define smi_get_stat(smi, stat) \
28498 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
28499 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
28500
28501 #define SI_MAX_PARMS 4
28502
28503 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
28504 atomic_set(&new_smi->req_events, 0);
28505 new_smi->run_to_completion = 0;
28506 for (i = 0; i < SI_NUM_STATS; i++)
28507 - atomic_set(&new_smi->stats[i], 0);
28508 + atomic_set_unchecked(&new_smi->stats[i], 0);
28509
28510 new_smi->interrupt_disabled = 1;
28511 atomic_set(&new_smi->stop_operation, 0);
28512 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
28513 index 1aeaaba..e018570 100644
28514 --- a/drivers/char/mbcs.c
28515 +++ b/drivers/char/mbcs.c
28516 @@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
28517 return 0;
28518 }
28519
28520 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
28521 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
28522 {
28523 .part_num = MBCS_PART_NUM,
28524 .mfg_num = MBCS_MFG_NUM,
28525 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
28526 index 1451790..f705c30 100644
28527 --- a/drivers/char/mem.c
28528 +++ b/drivers/char/mem.c
28529 @@ -18,6 +18,7 @@
28530 #include <linux/raw.h>
28531 #include <linux/tty.h>
28532 #include <linux/capability.h>
28533 +#include <linux/security.h>
28534 #include <linux/ptrace.h>
28535 #include <linux/device.h>
28536 #include <linux/highmem.h>
28537 @@ -35,6 +36,10 @@
28538 # include <linux/efi.h>
28539 #endif
28540
28541 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28542 +extern const struct file_operations grsec_fops;
28543 +#endif
28544 +
28545 static inline unsigned long size_inside_page(unsigned long start,
28546 unsigned long size)
28547 {
28548 @@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28549
28550 while (cursor < to) {
28551 if (!devmem_is_allowed(pfn)) {
28552 +#ifdef CONFIG_GRKERNSEC_KMEM
28553 + gr_handle_mem_readwrite(from, to);
28554 +#else
28555 printk(KERN_INFO
28556 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
28557 current->comm, from, to);
28558 +#endif
28559 return 0;
28560 }
28561 cursor += PAGE_SIZE;
28562 @@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28563 }
28564 return 1;
28565 }
28566 +#elif defined(CONFIG_GRKERNSEC_KMEM)
28567 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28568 +{
28569 + return 0;
28570 +}
28571 #else
28572 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28573 {
28574 @@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
28575
28576 while (count > 0) {
28577 unsigned long remaining;
28578 + char *temp;
28579
28580 sz = size_inside_page(p, count);
28581
28582 @@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
28583 if (!ptr)
28584 return -EFAULT;
28585
28586 - remaining = copy_to_user(buf, ptr, sz);
28587 +#ifdef CONFIG_PAX_USERCOPY
28588 + temp = kmalloc(sz, GFP_KERNEL);
28589 + if (!temp) {
28590 + unxlate_dev_mem_ptr(p, ptr);
28591 + return -ENOMEM;
28592 + }
28593 + memcpy(temp, ptr, sz);
28594 +#else
28595 + temp = ptr;
28596 +#endif
28597 +
28598 + remaining = copy_to_user(buf, temp, sz);
28599 +
28600 +#ifdef CONFIG_PAX_USERCOPY
28601 + kfree(temp);
28602 +#endif
28603 +
28604 unxlate_dev_mem_ptr(p, ptr);
28605 if (remaining)
28606 return -EFAULT;
28607 @@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
28608 size_t count, loff_t *ppos)
28609 {
28610 unsigned long p = *ppos;
28611 - ssize_t low_count, read, sz;
28612 + ssize_t low_count, read, sz, err = 0;
28613 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
28614 - int err = 0;
28615
28616 read = 0;
28617 if (p < (unsigned long) high_memory) {
28618 @@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
28619 }
28620 #endif
28621 while (low_count > 0) {
28622 + char *temp;
28623 +
28624 sz = size_inside_page(p, low_count);
28625
28626 /*
28627 @@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
28628 */
28629 kbuf = xlate_dev_kmem_ptr((char *)p);
28630
28631 - if (copy_to_user(buf, kbuf, sz))
28632 +#ifdef CONFIG_PAX_USERCOPY
28633 + temp = kmalloc(sz, GFP_KERNEL);
28634 + if (!temp)
28635 + return -ENOMEM;
28636 + memcpy(temp, kbuf, sz);
28637 +#else
28638 + temp = kbuf;
28639 +#endif
28640 +
28641 + err = copy_to_user(buf, temp, sz);
28642 +
28643 +#ifdef CONFIG_PAX_USERCOPY
28644 + kfree(temp);
28645 +#endif
28646 +
28647 + if (err)
28648 return -EFAULT;
28649 buf += sz;
28650 p += sz;
28651 @@ -867,6 +914,9 @@ static const struct memdev {
28652 #ifdef CONFIG_CRASH_DUMP
28653 [12] = { "oldmem", 0, &oldmem_fops, NULL },
28654 #endif
28655 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28656 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
28657 +#endif
28658 };
28659
28660 static int memory_open(struct inode *inode, struct file *filp)
28661 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
28662 index da3cfee..a5a6606 100644
28663 --- a/drivers/char/nvram.c
28664 +++ b/drivers/char/nvram.c
28665 @@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
28666
28667 spin_unlock_irq(&rtc_lock);
28668
28669 - if (copy_to_user(buf, contents, tmp - contents))
28670 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
28671 return -EFAULT;
28672
28673 *ppos = i;
28674 diff --git a/drivers/char/random.c b/drivers/char/random.c
28675 index 6035ab8..bdfe4fd 100644
28676 --- a/drivers/char/random.c
28677 +++ b/drivers/char/random.c
28678 @@ -261,8 +261,13 @@
28679 /*
28680 * Configuration information
28681 */
28682 +#ifdef CONFIG_GRKERNSEC_RANDNET
28683 +#define INPUT_POOL_WORDS 512
28684 +#define OUTPUT_POOL_WORDS 128
28685 +#else
28686 #define INPUT_POOL_WORDS 128
28687 #define OUTPUT_POOL_WORDS 32
28688 +#endif
28689 #define SEC_XFER_SIZE 512
28690 #define EXTRACT_SIZE 10
28691
28692 @@ -300,10 +305,17 @@ static struct poolinfo {
28693 int poolwords;
28694 int tap1, tap2, tap3, tap4, tap5;
28695 } poolinfo_table[] = {
28696 +#ifdef CONFIG_GRKERNSEC_RANDNET
28697 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
28698 + { 512, 411, 308, 208, 104, 1 },
28699 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
28700 + { 128, 103, 76, 51, 25, 1 },
28701 +#else
28702 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
28703 { 128, 103, 76, 51, 25, 1 },
28704 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
28705 { 32, 26, 20, 14, 7, 1 },
28706 +#endif
28707 #if 0
28708 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
28709 { 2048, 1638, 1231, 819, 411, 1 },
28710 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
28711
28712 extract_buf(r, tmp);
28713 i = min_t(int, nbytes, EXTRACT_SIZE);
28714 - if (copy_to_user(buf, tmp, i)) {
28715 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
28716 ret = -EFAULT;
28717 break;
28718 }
28719 @@ -1228,7 +1240,7 @@ EXPORT_SYMBOL(generate_random_uuid);
28720 #include <linux/sysctl.h>
28721
28722 static int min_read_thresh = 8, min_write_thresh;
28723 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
28724 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
28725 static int max_write_thresh = INPUT_POOL_WORDS * 32;
28726 static char sysctl_bootid[16];
28727
28728 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
28729 index 1ee8ce7..b778bef 100644
28730 --- a/drivers/char/sonypi.c
28731 +++ b/drivers/char/sonypi.c
28732 @@ -55,6 +55,7 @@
28733 #include <asm/uaccess.h>
28734 #include <asm/io.h>
28735 #include <asm/system.h>
28736 +#include <asm/local.h>
28737
28738 #include <linux/sonypi.h>
28739
28740 @@ -491,7 +492,7 @@ static struct sonypi_device {
28741 spinlock_t fifo_lock;
28742 wait_queue_head_t fifo_proc_list;
28743 struct fasync_struct *fifo_async;
28744 - int open_count;
28745 + local_t open_count;
28746 int model;
28747 struct input_dev *input_jog_dev;
28748 struct input_dev *input_key_dev;
28749 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
28750 static int sonypi_misc_release(struct inode *inode, struct file *file)
28751 {
28752 mutex_lock(&sonypi_device.lock);
28753 - sonypi_device.open_count--;
28754 + local_dec(&sonypi_device.open_count);
28755 mutex_unlock(&sonypi_device.lock);
28756 return 0;
28757 }
28758 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
28759 {
28760 mutex_lock(&sonypi_device.lock);
28761 /* Flush input queue on first open */
28762 - if (!sonypi_device.open_count)
28763 + if (!local_read(&sonypi_device.open_count))
28764 kfifo_reset(&sonypi_device.fifo);
28765 - sonypi_device.open_count++;
28766 + local_inc(&sonypi_device.open_count);
28767 mutex_unlock(&sonypi_device.lock);
28768
28769 return 0;
28770 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
28771 index 361a1df..2471eee 100644
28772 --- a/drivers/char/tpm/tpm.c
28773 +++ b/drivers/char/tpm/tpm.c
28774 @@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
28775 chip->vendor.req_complete_val)
28776 goto out_recv;
28777
28778 - if ((status == chip->vendor.req_canceled)) {
28779 + if (status == chip->vendor.req_canceled) {
28780 dev_err(chip->dev, "Operation Canceled\n");
28781 rc = -ECANCELED;
28782 goto out;
28783 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
28784 index 0636520..169c1d0 100644
28785 --- a/drivers/char/tpm/tpm_bios.c
28786 +++ b/drivers/char/tpm/tpm_bios.c
28787 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
28788 event = addr;
28789
28790 if ((event->event_type == 0 && event->event_size == 0) ||
28791 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
28792 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
28793 return NULL;
28794
28795 return addr;
28796 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
28797 return NULL;
28798
28799 if ((event->event_type == 0 && event->event_size == 0) ||
28800 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
28801 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
28802 return NULL;
28803
28804 (*pos)++;
28805 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
28806 int i;
28807
28808 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
28809 - seq_putc(m, data[i]);
28810 + if (!seq_putc(m, data[i]))
28811 + return -EFAULT;
28812
28813 return 0;
28814 }
28815 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
28816 log->bios_event_log_end = log->bios_event_log + len;
28817
28818 virt = acpi_os_map_memory(start, len);
28819 + if (!virt) {
28820 + kfree(log->bios_event_log);
28821 + log->bios_event_log = NULL;
28822 + return -EFAULT;
28823 + }
28824
28825 - memcpy(log->bios_event_log, virt, len);
28826 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
28827
28828 acpi_os_unmap_memory(virt, len);
28829 return 0;
28830 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
28831 index 8e3c46d..c139b99 100644
28832 --- a/drivers/char/virtio_console.c
28833 +++ b/drivers/char/virtio_console.c
28834 @@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
28835 if (to_user) {
28836 ssize_t ret;
28837
28838 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
28839 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
28840 if (ret)
28841 return -EFAULT;
28842 } else {
28843 @@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
28844 if (!port_has_data(port) && !port->host_connected)
28845 return 0;
28846
28847 - return fill_readbuf(port, ubuf, count, true);
28848 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
28849 }
28850
28851 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
28852 diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
28853 index eb1d864..39ee5a7 100644
28854 --- a/drivers/dma/dmatest.c
28855 +++ b/drivers/dma/dmatest.c
28856 @@ -591,7 +591,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
28857 }
28858 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
28859 cnt = dmatest_add_threads(dtc, DMA_PQ);
28860 - thread_count += cnt > 0 ?: 0;
28861 + thread_count += cnt > 0 ? cnt : 0;
28862 }
28863
28864 pr_info("dmatest: Started %u threads using %s\n",
28865 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
28866 index c9eee6d..f9d5280 100644
28867 --- a/drivers/edac/amd64_edac.c
28868 +++ b/drivers/edac/amd64_edac.c
28869 @@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
28870 * PCI core identifies what devices are on a system during boot, and then
28871 * inquiry this table to see if this driver is for a given device found.
28872 */
28873 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
28874 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
28875 {
28876 .vendor = PCI_VENDOR_ID_AMD,
28877 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
28878 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
28879 index e47e73b..348e0bd 100644
28880 --- a/drivers/edac/amd76x_edac.c
28881 +++ b/drivers/edac/amd76x_edac.c
28882 @@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
28883 edac_mc_free(mci);
28884 }
28885
28886 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
28887 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
28888 {
28889 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
28890 AMD762},
28891 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
28892 index 1af531a..3a8ff27 100644
28893 --- a/drivers/edac/e752x_edac.c
28894 +++ b/drivers/edac/e752x_edac.c
28895 @@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
28896 edac_mc_free(mci);
28897 }
28898
28899 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
28900 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
28901 {
28902 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
28903 E7520},
28904 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
28905 index 6ffb6d2..383d8d7 100644
28906 --- a/drivers/edac/e7xxx_edac.c
28907 +++ b/drivers/edac/e7xxx_edac.c
28908 @@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
28909 edac_mc_free(mci);
28910 }
28911
28912 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
28913 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
28914 {
28915 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
28916 E7205},
28917 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
28918 index 495198a..ac08c85 100644
28919 --- a/drivers/edac/edac_pci_sysfs.c
28920 +++ b/drivers/edac/edac_pci_sysfs.c
28921 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
28922 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
28923 static int edac_pci_poll_msec = 1000; /* one second workq period */
28924
28925 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
28926 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
28927 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
28928 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
28929
28930 static struct kobject *edac_pci_top_main_kobj;
28931 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
28932 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28933 edac_printk(KERN_CRIT, EDAC_PCI,
28934 "Signaled System Error on %s\n",
28935 pci_name(dev));
28936 - atomic_inc(&pci_nonparity_count);
28937 + atomic_inc_unchecked(&pci_nonparity_count);
28938 }
28939
28940 if (status & (PCI_STATUS_PARITY)) {
28941 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28942 "Master Data Parity Error on %s\n",
28943 pci_name(dev));
28944
28945 - atomic_inc(&pci_parity_count);
28946 + atomic_inc_unchecked(&pci_parity_count);
28947 }
28948
28949 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28950 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28951 "Detected Parity Error on %s\n",
28952 pci_name(dev));
28953
28954 - atomic_inc(&pci_parity_count);
28955 + atomic_inc_unchecked(&pci_parity_count);
28956 }
28957 }
28958
28959 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28960 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
28961 "Signaled System Error on %s\n",
28962 pci_name(dev));
28963 - atomic_inc(&pci_nonparity_count);
28964 + atomic_inc_unchecked(&pci_nonparity_count);
28965 }
28966
28967 if (status & (PCI_STATUS_PARITY)) {
28968 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28969 "Master Data Parity Error on "
28970 "%s\n", pci_name(dev));
28971
28972 - atomic_inc(&pci_parity_count);
28973 + atomic_inc_unchecked(&pci_parity_count);
28974 }
28975
28976 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28977 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28978 "Detected Parity Error on %s\n",
28979 pci_name(dev));
28980
28981 - atomic_inc(&pci_parity_count);
28982 + atomic_inc_unchecked(&pci_parity_count);
28983 }
28984 }
28985 }
28986 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
28987 if (!check_pci_errors)
28988 return;
28989
28990 - before_count = atomic_read(&pci_parity_count);
28991 + before_count = atomic_read_unchecked(&pci_parity_count);
28992
28993 /* scan all PCI devices looking for a Parity Error on devices and
28994 * bridges.
28995 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
28996 /* Only if operator has selected panic on PCI Error */
28997 if (edac_pci_get_panic_on_pe()) {
28998 /* If the count is different 'after' from 'before' */
28999 - if (before_count != atomic_read(&pci_parity_count))
29000 + if (before_count != atomic_read_unchecked(&pci_parity_count))
29001 panic("EDAC: PCI Parity Error");
29002 }
29003 }
29004 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
29005 index c0510b3..6e2a954 100644
29006 --- a/drivers/edac/i3000_edac.c
29007 +++ b/drivers/edac/i3000_edac.c
29008 @@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
29009 edac_mc_free(mci);
29010 }
29011
29012 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
29013 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
29014 {
29015 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29016 I3000},
29017 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
29018 index aa08497..7e6822a 100644
29019 --- a/drivers/edac/i3200_edac.c
29020 +++ b/drivers/edac/i3200_edac.c
29021 @@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
29022 edac_mc_free(mci);
29023 }
29024
29025 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
29026 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
29027 {
29028 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29029 I3200},
29030 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
29031 index 4dc3ac2..67d05a6 100644
29032 --- a/drivers/edac/i5000_edac.c
29033 +++ b/drivers/edac/i5000_edac.c
29034 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
29035 *
29036 * The "E500P" device is the first device supported.
29037 */
29038 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
29039 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
29040 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
29041 .driver_data = I5000P},
29042
29043 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
29044 index bcbdeec..9886d16 100644
29045 --- a/drivers/edac/i5100_edac.c
29046 +++ b/drivers/edac/i5100_edac.c
29047 @@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
29048 edac_mc_free(mci);
29049 }
29050
29051 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
29052 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
29053 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
29054 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
29055 { 0, }
29056 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
29057 index 74d6ec34..baff517 100644
29058 --- a/drivers/edac/i5400_edac.c
29059 +++ b/drivers/edac/i5400_edac.c
29060 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
29061 *
29062 * The "E500P" device is the first device supported.
29063 */
29064 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
29065 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
29066 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
29067 {0,} /* 0 terminated list. */
29068 };
29069 diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
29070 index 6104dba..e7ea8e1 100644
29071 --- a/drivers/edac/i7300_edac.c
29072 +++ b/drivers/edac/i7300_edac.c
29073 @@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
29074 *
29075 * Has only 8086:360c PCI ID
29076 */
29077 -static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
29078 +static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
29079 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
29080 {0,} /* 0 terminated list. */
29081 };
29082 diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
29083 index 70ad892..178943c 100644
29084 --- a/drivers/edac/i7core_edac.c
29085 +++ b/drivers/edac/i7core_edac.c
29086 @@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
29087 /*
29088 * pci_device_id table for which devices we are looking for
29089 */
29090 -static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
29091 +static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
29092 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
29093 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
29094 {0,} /* 0 terminated list. */
29095 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
29096 index 4329d39..f3022ef 100644
29097 --- a/drivers/edac/i82443bxgx_edac.c
29098 +++ b/drivers/edac/i82443bxgx_edac.c
29099 @@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
29100
29101 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
29102
29103 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
29104 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
29105 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
29106 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
29107 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
29108 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
29109 index 931a057..fd28340 100644
29110 --- a/drivers/edac/i82860_edac.c
29111 +++ b/drivers/edac/i82860_edac.c
29112 @@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
29113 edac_mc_free(mci);
29114 }
29115
29116 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
29117 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
29118 {
29119 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29120 I82860},
29121 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
29122 index 33864c6..01edc61 100644
29123 --- a/drivers/edac/i82875p_edac.c
29124 +++ b/drivers/edac/i82875p_edac.c
29125 @@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
29126 edac_mc_free(mci);
29127 }
29128
29129 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
29130 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
29131 {
29132 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29133 I82875P},
29134 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
29135 index a5da732..983363b 100644
29136 --- a/drivers/edac/i82975x_edac.c
29137 +++ b/drivers/edac/i82975x_edac.c
29138 @@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
29139 edac_mc_free(mci);
29140 }
29141
29142 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
29143 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
29144 {
29145 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29146 I82975X
29147 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
29148 index 0106747..0b40417 100644
29149 --- a/drivers/edac/mce_amd.h
29150 +++ b/drivers/edac/mce_amd.h
29151 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
29152 bool (*dc_mce)(u16, u8);
29153 bool (*ic_mce)(u16, u8);
29154 bool (*nb_mce)(u16, u8);
29155 -};
29156 +} __no_const;
29157
29158 void amd_report_gart_errors(bool);
29159 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
29160 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
29161 index b153674..ad2ba9b 100644
29162 --- a/drivers/edac/r82600_edac.c
29163 +++ b/drivers/edac/r82600_edac.c
29164 @@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
29165 edac_mc_free(mci);
29166 }
29167
29168 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
29169 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
29170 {
29171 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
29172 },
29173 diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
29174 index 7a402bf..af0b211 100644
29175 --- a/drivers/edac/sb_edac.c
29176 +++ b/drivers/edac/sb_edac.c
29177 @@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
29178 /*
29179 * pci_device_id table for which devices we are looking for
29180 */
29181 -static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
29182 +static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
29183 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
29184 {0,} /* 0 terminated list. */
29185 };
29186 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
29187 index b6f47de..c5acf3a 100644
29188 --- a/drivers/edac/x38_edac.c
29189 +++ b/drivers/edac/x38_edac.c
29190 @@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
29191 edac_mc_free(mci);
29192 }
29193
29194 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
29195 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
29196 {
29197 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29198 X38},
29199 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
29200 index 85661b0..c784559a 100644
29201 --- a/drivers/firewire/core-card.c
29202 +++ b/drivers/firewire/core-card.c
29203 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
29204
29205 void fw_core_remove_card(struct fw_card *card)
29206 {
29207 - struct fw_card_driver dummy_driver = dummy_driver_template;
29208 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
29209
29210 card->driver->update_phy_reg(card, 4,
29211 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
29212 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
29213 index 4799393..37bd3ab 100644
29214 --- a/drivers/firewire/core-cdev.c
29215 +++ b/drivers/firewire/core-cdev.c
29216 @@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
29217 int ret;
29218
29219 if ((request->channels == 0 && request->bandwidth == 0) ||
29220 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
29221 - request->bandwidth < 0)
29222 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
29223 return -EINVAL;
29224
29225 r = kmalloc(sizeof(*r), GFP_KERNEL);
29226 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
29227 index 855ab3f..11f4bbd 100644
29228 --- a/drivers/firewire/core-transaction.c
29229 +++ b/drivers/firewire/core-transaction.c
29230 @@ -37,6 +37,7 @@
29231 #include <linux/timer.h>
29232 #include <linux/types.h>
29233 #include <linux/workqueue.h>
29234 +#include <linux/sched.h>
29235
29236 #include <asm/byteorder.h>
29237
29238 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
29239 index b45be57..5fad18b 100644
29240 --- a/drivers/firewire/core.h
29241 +++ b/drivers/firewire/core.h
29242 @@ -101,6 +101,7 @@ struct fw_card_driver {
29243
29244 int (*stop_iso)(struct fw_iso_context *ctx);
29245 };
29246 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
29247
29248 void fw_card_initialize(struct fw_card *card,
29249 const struct fw_card_driver *driver, struct device *device);
29250 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
29251 index 153980b..4b4d046 100644
29252 --- a/drivers/firmware/dmi_scan.c
29253 +++ b/drivers/firmware/dmi_scan.c
29254 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
29255 }
29256 }
29257 else {
29258 - /*
29259 - * no iounmap() for that ioremap(); it would be a no-op, but
29260 - * it's so early in setup that sucker gets confused into doing
29261 - * what it shouldn't if we actually call it.
29262 - */
29263 p = dmi_ioremap(0xF0000, 0x10000);
29264 if (p == NULL)
29265 goto error;
29266 @@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
29267 if (buf == NULL)
29268 return -1;
29269
29270 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
29271 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
29272
29273 iounmap(buf);
29274 return 0;
29275 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
29276 index 98723cb..10ca85b 100644
29277 --- a/drivers/gpio/gpio-vr41xx.c
29278 +++ b/drivers/gpio/gpio-vr41xx.c
29279 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
29280 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
29281 maskl, pendl, maskh, pendh);
29282
29283 - atomic_inc(&irq_err_count);
29284 + atomic_inc_unchecked(&irq_err_count);
29285
29286 return -EINVAL;
29287 }
29288 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
29289 index 8323fc3..5c1d755 100644
29290 --- a/drivers/gpu/drm/drm_crtc.c
29291 +++ b/drivers/gpu/drm/drm_crtc.c
29292 @@ -1379,7 +1379,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
29293 */
29294 if ((out_resp->count_modes >= mode_count) && mode_count) {
29295 copied = 0;
29296 - mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
29297 + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
29298 list_for_each_entry(mode, &connector->modes, head) {
29299 drm_crtc_convert_to_umode(&u_mode, mode);
29300 if (copy_to_user(mode_ptr + copied,
29301 @@ -1394,8 +1394,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
29302
29303 if ((out_resp->count_props >= props_count) && props_count) {
29304 copied = 0;
29305 - prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
29306 - prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
29307 + prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
29308 + prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
29309 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
29310 if (connector->property_ids[i] != 0) {
29311 if (put_user(connector->property_ids[i],
29312 @@ -1417,7 +1417,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
29313
29314 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
29315 copied = 0;
29316 - encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
29317 + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
29318 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
29319 if (connector->encoder_ids[i] != 0) {
29320 if (put_user(connector->encoder_ids[i],
29321 @@ -1576,7 +1576,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
29322 }
29323
29324 for (i = 0; i < crtc_req->count_connectors; i++) {
29325 - set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
29326 + set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
29327 if (get_user(out_id, &set_connectors_ptr[i])) {
29328 ret = -EFAULT;
29329 goto out;
29330 @@ -1857,7 +1857,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
29331 fb = obj_to_fb(obj);
29332
29333 num_clips = r->num_clips;
29334 - clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
29335 + clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
29336
29337 if (!num_clips != !clips_ptr) {
29338 ret = -EINVAL;
29339 @@ -2283,7 +2283,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
29340 out_resp->flags = property->flags;
29341
29342 if ((out_resp->count_values >= value_count) && value_count) {
29343 - values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
29344 + values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
29345 for (i = 0; i < value_count; i++) {
29346 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
29347 ret = -EFAULT;
29348 @@ -2296,7 +2296,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
29349 if (property->flags & DRM_MODE_PROP_ENUM) {
29350 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
29351 copied = 0;
29352 - enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
29353 + enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
29354 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
29355
29356 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
29357 @@ -2319,7 +2319,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
29358 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
29359 copied = 0;
29360 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
29361 - blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
29362 + blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
29363
29364 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
29365 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
29366 @@ -2380,7 +2380,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
29367 struct drm_mode_get_blob *out_resp = data;
29368 struct drm_property_blob *blob;
29369 int ret = 0;
29370 - void *blob_ptr;
29371 + void __user *blob_ptr;
29372
29373 if (!drm_core_check_feature(dev, DRIVER_MODESET))
29374 return -EINVAL;
29375 @@ -2394,7 +2394,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
29376 blob = obj_to_blob(obj);
29377
29378 if (out_resp->length == blob->length) {
29379 - blob_ptr = (void *)(unsigned long)out_resp->data;
29380 + blob_ptr = (void __user *)(unsigned long)out_resp->data;
29381 if (copy_to_user(blob_ptr, blob->data, blob->length)){
29382 ret = -EFAULT;
29383 goto done;
29384 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
29385 index d2619d7..bd6bd00 100644
29386 --- a/drivers/gpu/drm/drm_crtc_helper.c
29387 +++ b/drivers/gpu/drm/drm_crtc_helper.c
29388 @@ -279,7 +279,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
29389 struct drm_crtc *tmp;
29390 int crtc_mask = 1;
29391
29392 - WARN(!crtc, "checking null crtc?\n");
29393 + BUG_ON(!crtc);
29394
29395 dev = crtc->dev;
29396
29397 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
29398 index 40c187c..5746164 100644
29399 --- a/drivers/gpu/drm/drm_drv.c
29400 +++ b/drivers/gpu/drm/drm_drv.c
29401 @@ -308,7 +308,7 @@ module_exit(drm_core_exit);
29402 /**
29403 * Copy and IOCTL return string to user space
29404 */
29405 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
29406 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
29407 {
29408 int len;
29409
29410 @@ -387,7 +387,7 @@ long drm_ioctl(struct file *filp,
29411
29412 dev = file_priv->minor->dev;
29413 atomic_inc(&dev->ioctl_count);
29414 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
29415 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
29416 ++file_priv->ioctl_count;
29417
29418 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
29419 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
29420 index 828bf65..cdaa0e9 100644
29421 --- a/drivers/gpu/drm/drm_fops.c
29422 +++ b/drivers/gpu/drm/drm_fops.c
29423 @@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
29424 }
29425
29426 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
29427 - atomic_set(&dev->counts[i], 0);
29428 + atomic_set_unchecked(&dev->counts[i], 0);
29429
29430 dev->sigdata.lock = NULL;
29431
29432 @@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp)
29433
29434 retcode = drm_open_helper(inode, filp, dev);
29435 if (!retcode) {
29436 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
29437 - if (!dev->open_count++)
29438 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
29439 + if (local_inc_return(&dev->open_count) == 1)
29440 retcode = drm_setup(dev);
29441 }
29442 if (!retcode) {
29443 @@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp)
29444
29445 mutex_lock(&drm_global_mutex);
29446
29447 - DRM_DEBUG("open_count = %d\n", dev->open_count);
29448 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
29449
29450 if (dev->driver->preclose)
29451 dev->driver->preclose(dev, file_priv);
29452 @@ -485,7 +485,7 @@ int drm_release(struct inode *inode, struct file *filp)
29453 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
29454 task_pid_nr(current),
29455 (long)old_encode_dev(file_priv->minor->device),
29456 - dev->open_count);
29457 + local_read(&dev->open_count));
29458
29459 /* Release any auth tokens that might point to this file_priv,
29460 (do that under the drm_global_mutex) */
29461 @@ -571,8 +571,8 @@ int drm_release(struct inode *inode, struct file *filp)
29462 * End inline drm_release
29463 */
29464
29465 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
29466 - if (!--dev->open_count) {
29467 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
29468 + if (local_dec_and_test(&dev->open_count)) {
29469 if (atomic_read(&dev->ioctl_count)) {
29470 DRM_ERROR("Device busy: %d\n",
29471 atomic_read(&dev->ioctl_count));
29472 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
29473 index c87dc96..326055d 100644
29474 --- a/drivers/gpu/drm/drm_global.c
29475 +++ b/drivers/gpu/drm/drm_global.c
29476 @@ -36,7 +36,7 @@
29477 struct drm_global_item {
29478 struct mutex mutex;
29479 void *object;
29480 - int refcount;
29481 + atomic_t refcount;
29482 };
29483
29484 static struct drm_global_item glob[DRM_GLOBAL_NUM];
29485 @@ -49,7 +49,7 @@ void drm_global_init(void)
29486 struct drm_global_item *item = &glob[i];
29487 mutex_init(&item->mutex);
29488 item->object = NULL;
29489 - item->refcount = 0;
29490 + atomic_set(&item->refcount, 0);
29491 }
29492 }
29493
29494 @@ -59,7 +59,7 @@ void drm_global_release(void)
29495 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
29496 struct drm_global_item *item = &glob[i];
29497 BUG_ON(item->object != NULL);
29498 - BUG_ON(item->refcount != 0);
29499 + BUG_ON(atomic_read(&item->refcount) != 0);
29500 }
29501 }
29502
29503 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
29504 void *object;
29505
29506 mutex_lock(&item->mutex);
29507 - if (item->refcount == 0) {
29508 + if (atomic_read(&item->refcount) == 0) {
29509 item->object = kzalloc(ref->size, GFP_KERNEL);
29510 if (unlikely(item->object == NULL)) {
29511 ret = -ENOMEM;
29512 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
29513 goto out_err;
29514
29515 }
29516 - ++item->refcount;
29517 + atomic_inc(&item->refcount);
29518 ref->object = item->object;
29519 object = item->object;
29520 mutex_unlock(&item->mutex);
29521 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
29522 struct drm_global_item *item = &glob[ref->global_type];
29523
29524 mutex_lock(&item->mutex);
29525 - BUG_ON(item->refcount == 0);
29526 + BUG_ON(atomic_read(&item->refcount) == 0);
29527 BUG_ON(ref->object != item->object);
29528 - if (--item->refcount == 0) {
29529 + if (atomic_dec_and_test(&item->refcount)) {
29530 ref->release(ref);
29531 item->object = NULL;
29532 }
29533 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
29534 index ab1162d..42587b2 100644
29535 --- a/drivers/gpu/drm/drm_info.c
29536 +++ b/drivers/gpu/drm/drm_info.c
29537 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
29538 struct drm_local_map *map;
29539 struct drm_map_list *r_list;
29540
29541 - /* Hardcoded from _DRM_FRAME_BUFFER,
29542 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
29543 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
29544 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
29545 + static const char * const types[] = {
29546 + [_DRM_FRAME_BUFFER] = "FB",
29547 + [_DRM_REGISTERS] = "REG",
29548 + [_DRM_SHM] = "SHM",
29549 + [_DRM_AGP] = "AGP",
29550 + [_DRM_SCATTER_GATHER] = "SG",
29551 + [_DRM_CONSISTENT] = "PCI",
29552 + [_DRM_GEM] = "GEM" };
29553 const char *type;
29554 int i;
29555
29556 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
29557 map = r_list->map;
29558 if (!map)
29559 continue;
29560 - if (map->type < 0 || map->type > 5)
29561 + if (map->type >= ARRAY_SIZE(types))
29562 type = "??";
29563 else
29564 type = types[map->type];
29565 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
29566 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
29567 vma->vm_flags & VM_LOCKED ? 'l' : '-',
29568 vma->vm_flags & VM_IO ? 'i' : '-',
29569 +#ifdef CONFIG_GRKERNSEC_HIDESYM
29570 + 0);
29571 +#else
29572 vma->vm_pgoff);
29573 +#endif
29574
29575 #if defined(__i386__)
29576 pgprot = pgprot_val(vma->vm_page_prot);
29577 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
29578 index ddd70db..40321e6 100644
29579 --- a/drivers/gpu/drm/drm_ioc32.c
29580 +++ b/drivers/gpu/drm/drm_ioc32.c
29581 @@ -456,7 +456,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
29582 request = compat_alloc_user_space(nbytes);
29583 if (!access_ok(VERIFY_WRITE, request, nbytes))
29584 return -EFAULT;
29585 - list = (struct drm_buf_desc *) (request + 1);
29586 + list = (struct drm_buf_desc __user *) (request + 1);
29587
29588 if (__put_user(count, &request->count)
29589 || __put_user(list, &request->list))
29590 @@ -517,7 +517,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
29591 request = compat_alloc_user_space(nbytes);
29592 if (!access_ok(VERIFY_WRITE, request, nbytes))
29593 return -EFAULT;
29594 - list = (struct drm_buf_pub *) (request + 1);
29595 + list = (struct drm_buf_pub __user *) (request + 1);
29596
29597 if (__put_user(count, &request->count)
29598 || __put_user(list, &request->list))
29599 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
29600 index 904d7e9..ab88581 100644
29601 --- a/drivers/gpu/drm/drm_ioctl.c
29602 +++ b/drivers/gpu/drm/drm_ioctl.c
29603 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, void *data,
29604 stats->data[i].value =
29605 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
29606 else
29607 - stats->data[i].value = atomic_read(&dev->counts[i]);
29608 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
29609 stats->data[i].type = dev->types[i];
29610 }
29611
29612 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
29613 index 632ae24..244cf4a 100644
29614 --- a/drivers/gpu/drm/drm_lock.c
29615 +++ b/drivers/gpu/drm/drm_lock.c
29616 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
29617 if (drm_lock_take(&master->lock, lock->context)) {
29618 master->lock.file_priv = file_priv;
29619 master->lock.lock_time = jiffies;
29620 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
29621 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
29622 break; /* Got lock */
29623 }
29624
29625 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
29626 return -EINVAL;
29627 }
29628
29629 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
29630 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
29631
29632 if (drm_lock_free(&master->lock, lock->context)) {
29633 /* FIXME: Should really bail out here. */
29634 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
29635 index 8f371e8..9f85d52 100644
29636 --- a/drivers/gpu/drm/i810/i810_dma.c
29637 +++ b/drivers/gpu/drm/i810/i810_dma.c
29638 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
29639 dma->buflist[vertex->idx],
29640 vertex->discard, vertex->used);
29641
29642 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29643 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29644 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29645 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29646 sarea_priv->last_enqueue = dev_priv->counter - 1;
29647 sarea_priv->last_dispatch = (int)hw_status[5];
29648
29649 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
29650 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
29651 mc->last_render);
29652
29653 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29654 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29655 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29656 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29657 sarea_priv->last_enqueue = dev_priv->counter - 1;
29658 sarea_priv->last_dispatch = (int)hw_status[5];
29659
29660 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
29661 index c9339f4..f5e1b9d 100644
29662 --- a/drivers/gpu/drm/i810/i810_drv.h
29663 +++ b/drivers/gpu/drm/i810/i810_drv.h
29664 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
29665 int page_flipping;
29666
29667 wait_queue_head_t irq_queue;
29668 - atomic_t irq_received;
29669 - atomic_t irq_emitted;
29670 + atomic_unchecked_t irq_received;
29671 + atomic_unchecked_t irq_emitted;
29672
29673 int front_offset;
29674 } drm_i810_private_t;
29675 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
29676 index b2e3c97..58cf079 100644
29677 --- a/drivers/gpu/drm/i915/i915_debugfs.c
29678 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
29679 @@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
29680 I915_READ(GTIMR));
29681 }
29682 seq_printf(m, "Interrupts received: %d\n",
29683 - atomic_read(&dev_priv->irq_received));
29684 + atomic_read_unchecked(&dev_priv->irq_received));
29685 for (i = 0; i < I915_NUM_RINGS; i++) {
29686 if (IS_GEN6(dev) || IS_GEN7(dev)) {
29687 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
29688 @@ -1232,7 +1232,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
29689 return ret;
29690
29691 if (opregion->header)
29692 - seq_write(m, opregion->header, OPREGION_SIZE);
29693 + seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
29694
29695 mutex_unlock(&dev->struct_mutex);
29696
29697 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
29698 index c4da951..3c59c5c 100644
29699 --- a/drivers/gpu/drm/i915/i915_dma.c
29700 +++ b/drivers/gpu/drm/i915/i915_dma.c
29701 @@ -1172,7 +1172,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
29702 bool can_switch;
29703
29704 spin_lock(&dev->count_lock);
29705 - can_switch = (dev->open_count == 0);
29706 + can_switch = (local_read(&dev->open_count) == 0);
29707 spin_unlock(&dev->count_lock);
29708 return can_switch;
29709 }
29710 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
29711 index ae294a0..1755461 100644
29712 --- a/drivers/gpu/drm/i915/i915_drv.h
29713 +++ b/drivers/gpu/drm/i915/i915_drv.h
29714 @@ -229,7 +229,7 @@ struct drm_i915_display_funcs {
29715 /* render clock increase/decrease */
29716 /* display clock increase/decrease */
29717 /* pll clock increase/decrease */
29718 -};
29719 +} __no_const;
29720
29721 struct intel_device_info {
29722 u8 gen;
29723 @@ -318,7 +318,7 @@ typedef struct drm_i915_private {
29724 int current_page;
29725 int page_flipping;
29726
29727 - atomic_t irq_received;
29728 + atomic_unchecked_t irq_received;
29729
29730 /* protects the irq masks */
29731 spinlock_t irq_lock;
29732 @@ -893,7 +893,7 @@ struct drm_i915_gem_object {
29733 * will be page flipped away on the next vblank. When it
29734 * reaches 0, dev_priv->pending_flip_queue will be woken up.
29735 */
29736 - atomic_t pending_flip;
29737 + atomic_unchecked_t pending_flip;
29738 };
29739
29740 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
29741 @@ -1273,7 +1273,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
29742 extern void intel_teardown_gmbus(struct drm_device *dev);
29743 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
29744 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
29745 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
29746 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
29747 {
29748 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
29749 }
29750 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
29751 index b9da890..cad1d98 100644
29752 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
29753 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
29754 @@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
29755 i915_gem_clflush_object(obj);
29756
29757 if (obj->base.pending_write_domain)
29758 - cd->flips |= atomic_read(&obj->pending_flip);
29759 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
29760
29761 /* The actual obj->write_domain will be updated with
29762 * pending_write_domain after we emit the accumulated flush for all
29763 @@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
29764
29765 static int
29766 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
29767 - int count)
29768 + unsigned int count)
29769 {
29770 - int i;
29771 + unsigned int i;
29772
29773 for (i = 0; i < count; i++) {
29774 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
29775 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
29776 index d47a53b..61154c2 100644
29777 --- a/drivers/gpu/drm/i915/i915_irq.c
29778 +++ b/drivers/gpu/drm/i915/i915_irq.c
29779 @@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
29780 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
29781 struct drm_i915_master_private *master_priv;
29782
29783 - atomic_inc(&dev_priv->irq_received);
29784 + atomic_inc_unchecked(&dev_priv->irq_received);
29785
29786 /* disable master interrupt before clearing iir */
29787 de_ier = I915_READ(DEIER);
29788 @@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
29789 struct drm_i915_master_private *master_priv;
29790 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
29791
29792 - atomic_inc(&dev_priv->irq_received);
29793 + atomic_inc_unchecked(&dev_priv->irq_received);
29794
29795 if (IS_GEN6(dev))
29796 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
29797 @@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
29798 int ret = IRQ_NONE, pipe;
29799 bool blc_event = false;
29800
29801 - atomic_inc(&dev_priv->irq_received);
29802 + atomic_inc_unchecked(&dev_priv->irq_received);
29803
29804 iir = I915_READ(IIR);
29805
29806 @@ -1750,7 +1750,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
29807 {
29808 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
29809
29810 - atomic_set(&dev_priv->irq_received, 0);
29811 + atomic_set_unchecked(&dev_priv->irq_received, 0);
29812
29813 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
29814 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
29815 @@ -1938,7 +1938,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
29816 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
29817 int pipe;
29818
29819 - atomic_set(&dev_priv->irq_received, 0);
29820 + atomic_set_unchecked(&dev_priv->irq_received, 0);
29821
29822 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
29823 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
29824 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
29825 index 9ec9755..6d1cf2d 100644
29826 --- a/drivers/gpu/drm/i915/intel_display.c
29827 +++ b/drivers/gpu/drm/i915/intel_display.c
29828 @@ -2230,7 +2230,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
29829
29830 wait_event(dev_priv->pending_flip_queue,
29831 atomic_read(&dev_priv->mm.wedged) ||
29832 - atomic_read(&obj->pending_flip) == 0);
29833 + atomic_read_unchecked(&obj->pending_flip) == 0);
29834
29835 /* Big Hammer, we also need to ensure that any pending
29836 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
29837 @@ -2851,7 +2851,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
29838 obj = to_intel_framebuffer(crtc->fb)->obj;
29839 dev_priv = crtc->dev->dev_private;
29840 wait_event(dev_priv->pending_flip_queue,
29841 - atomic_read(&obj->pending_flip) == 0);
29842 + atomic_read_unchecked(&obj->pending_flip) == 0);
29843 }
29844
29845 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
29846 @@ -6952,7 +6952,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
29847
29848 atomic_clear_mask(1 << intel_crtc->plane,
29849 &obj->pending_flip.counter);
29850 - if (atomic_read(&obj->pending_flip) == 0)
29851 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
29852 wake_up(&dev_priv->pending_flip_queue);
29853
29854 schedule_work(&work->work);
29855 @@ -7242,7 +7242,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
29856 /* Block clients from rendering to the new back buffer until
29857 * the flip occurs and the object is no longer visible.
29858 */
29859 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
29860 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
29861
29862 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
29863 if (ret)
29864 @@ -7256,7 +7256,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
29865 return 0;
29866
29867 cleanup_pending:
29868 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
29869 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
29870 drm_gem_object_unreference(&work->old_fb_obj->base);
29871 drm_gem_object_unreference(&obj->base);
29872 mutex_unlock(&dev->struct_mutex);
29873 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
29874 index 54558a0..2d97005 100644
29875 --- a/drivers/gpu/drm/mga/mga_drv.h
29876 +++ b/drivers/gpu/drm/mga/mga_drv.h
29877 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
29878 u32 clear_cmd;
29879 u32 maccess;
29880
29881 - atomic_t vbl_received; /**< Number of vblanks received. */
29882 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
29883 wait_queue_head_t fence_queue;
29884 - atomic_t last_fence_retired;
29885 + atomic_unchecked_t last_fence_retired;
29886 u32 next_fence_to_post;
29887
29888 unsigned int fb_cpp;
29889 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
29890 index 2581202..f230a8d9 100644
29891 --- a/drivers/gpu/drm/mga/mga_irq.c
29892 +++ b/drivers/gpu/drm/mga/mga_irq.c
29893 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
29894 if (crtc != 0)
29895 return 0;
29896
29897 - return atomic_read(&dev_priv->vbl_received);
29898 + return atomic_read_unchecked(&dev_priv->vbl_received);
29899 }
29900
29901
29902 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
29903 /* VBLANK interrupt */
29904 if (status & MGA_VLINEPEN) {
29905 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
29906 - atomic_inc(&dev_priv->vbl_received);
29907 + atomic_inc_unchecked(&dev_priv->vbl_received);
29908 drm_handle_vblank(dev, 0);
29909 handled = 1;
29910 }
29911 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
29912 if ((prim_start & ~0x03) != (prim_end & ~0x03))
29913 MGA_WRITE(MGA_PRIMEND, prim_end);
29914
29915 - atomic_inc(&dev_priv->last_fence_retired);
29916 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
29917 DRM_WAKEUP(&dev_priv->fence_queue);
29918 handled = 1;
29919 }
29920 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
29921 * using fences.
29922 */
29923 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
29924 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
29925 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
29926 - *sequence) <= (1 << 23)));
29927
29928 *sequence = cur_fence;
29929 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
29930 index 5fc201b..7b032b9 100644
29931 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
29932 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
29933 @@ -201,7 +201,7 @@ struct methods {
29934 const char desc[8];
29935 void (*loadbios)(struct drm_device *, uint8_t *);
29936 const bool rw;
29937 -};
29938 +} __do_const;
29939
29940 static struct methods shadow_methods[] = {
29941 { "PRAMIN", load_vbios_pramin, true },
29942 @@ -5474,7 +5474,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
29943 struct bit_table {
29944 const char id;
29945 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
29946 -};
29947 +} __no_const;
29948
29949 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
29950
29951 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
29952 index 4c0be3a..5757582 100644
29953 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
29954 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
29955 @@ -238,7 +238,7 @@ struct nouveau_channel {
29956 struct list_head pending;
29957 uint32_t sequence;
29958 uint32_t sequence_ack;
29959 - atomic_t last_sequence_irq;
29960 + atomic_unchecked_t last_sequence_irq;
29961 struct nouveau_vma vma;
29962 } fence;
29963
29964 @@ -319,7 +319,7 @@ struct nouveau_exec_engine {
29965 u32 handle, u16 class);
29966 void (*set_tile_region)(struct drm_device *dev, int i);
29967 void (*tlb_flush)(struct drm_device *, int engine);
29968 -};
29969 +} __no_const;
29970
29971 struct nouveau_instmem_engine {
29972 void *priv;
29973 @@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
29974 struct nouveau_mc_engine {
29975 int (*init)(struct drm_device *dev);
29976 void (*takedown)(struct drm_device *dev);
29977 -};
29978 +} __no_const;
29979
29980 struct nouveau_timer_engine {
29981 int (*init)(struct drm_device *dev);
29982 void (*takedown)(struct drm_device *dev);
29983 uint64_t (*read)(struct drm_device *dev);
29984 -};
29985 +} __no_const;
29986
29987 struct nouveau_fb_engine {
29988 int num_tiles;
29989 @@ -558,7 +558,7 @@ struct nouveau_vram_engine {
29990 void (*put)(struct drm_device *, struct nouveau_mem **);
29991
29992 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
29993 -};
29994 +} __no_const;
29995
29996 struct nouveau_engine {
29997 struct nouveau_instmem_engine instmem;
29998 @@ -706,7 +706,7 @@ struct drm_nouveau_private {
29999 struct drm_global_reference mem_global_ref;
30000 struct ttm_bo_global_ref bo_global_ref;
30001 struct ttm_bo_device bdev;
30002 - atomic_t validate_sequence;
30003 + atomic_unchecked_t validate_sequence;
30004 } ttm;
30005
30006 struct {
30007 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
30008 index 2f6daae..c9d7b9e 100644
30009 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
30010 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
30011 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
30012 if (USE_REFCNT(dev))
30013 sequence = nvchan_rd32(chan, 0x48);
30014 else
30015 - sequence = atomic_read(&chan->fence.last_sequence_irq);
30016 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
30017
30018 if (chan->fence.sequence_ack == sequence)
30019 goto out;
30020 @@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
30021 return ret;
30022 }
30023
30024 - atomic_set(&chan->fence.last_sequence_irq, 0);
30025 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
30026 return 0;
30027 }
30028
30029 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
30030 index 7ce3fde..cb3ea04 100644
30031 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
30032 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
30033 @@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
30034 int trycnt = 0;
30035 int ret, i;
30036
30037 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
30038 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
30039 retry:
30040 if (++trycnt > 100000) {
30041 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
30042 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
30043 index d8831ab..0ba8356 100644
30044 --- a/drivers/gpu/drm/nouveau/nouveau_state.c
30045 +++ b/drivers/gpu/drm/nouveau/nouveau_state.c
30046 @@ -542,7 +542,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
30047 bool can_switch;
30048
30049 spin_lock(&dev->count_lock);
30050 - can_switch = (dev->open_count == 0);
30051 + can_switch = (local_read(&dev->open_count) == 0);
30052 spin_unlock(&dev->count_lock);
30053 return can_switch;
30054 }
30055 diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
30056 index dbdea8e..cd6eeeb 100644
30057 --- a/drivers/gpu/drm/nouveau/nv04_graph.c
30058 +++ b/drivers/gpu/drm/nouveau/nv04_graph.c
30059 @@ -554,7 +554,7 @@ static int
30060 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
30061 u32 class, u32 mthd, u32 data)
30062 {
30063 - atomic_set(&chan->fence.last_sequence_irq, data);
30064 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
30065 return 0;
30066 }
30067
30068 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
30069 index bcac90b..53bfc76 100644
30070 --- a/drivers/gpu/drm/r128/r128_cce.c
30071 +++ b/drivers/gpu/drm/r128/r128_cce.c
30072 @@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
30073
30074 /* GH: Simple idle check.
30075 */
30076 - atomic_set(&dev_priv->idle_count, 0);
30077 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30078
30079 /* We don't support anything other than bus-mastering ring mode,
30080 * but the ring can be in either AGP or PCI space for the ring
30081 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
30082 index 930c71b..499aded 100644
30083 --- a/drivers/gpu/drm/r128/r128_drv.h
30084 +++ b/drivers/gpu/drm/r128/r128_drv.h
30085 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
30086 int is_pci;
30087 unsigned long cce_buffers_offset;
30088
30089 - atomic_t idle_count;
30090 + atomic_unchecked_t idle_count;
30091
30092 int page_flipping;
30093 int current_page;
30094 u32 crtc_offset;
30095 u32 crtc_offset_cntl;
30096
30097 - atomic_t vbl_received;
30098 + atomic_unchecked_t vbl_received;
30099
30100 u32 color_fmt;
30101 unsigned int front_offset;
30102 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
30103 index 429d5a0..7e899ed 100644
30104 --- a/drivers/gpu/drm/r128/r128_irq.c
30105 +++ b/drivers/gpu/drm/r128/r128_irq.c
30106 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
30107 if (crtc != 0)
30108 return 0;
30109
30110 - return atomic_read(&dev_priv->vbl_received);
30111 + return atomic_read_unchecked(&dev_priv->vbl_received);
30112 }
30113
30114 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30115 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30116 /* VBLANK interrupt */
30117 if (status & R128_CRTC_VBLANK_INT) {
30118 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
30119 - atomic_inc(&dev_priv->vbl_received);
30120 + atomic_inc_unchecked(&dev_priv->vbl_received);
30121 drm_handle_vblank(dev, 0);
30122 return IRQ_HANDLED;
30123 }
30124 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
30125 index a9e33ce..09edd4b 100644
30126 --- a/drivers/gpu/drm/r128/r128_state.c
30127 +++ b/drivers/gpu/drm/r128/r128_state.c
30128 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
30129
30130 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
30131 {
30132 - if (atomic_read(&dev_priv->idle_count) == 0)
30133 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
30134 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
30135 else
30136 - atomic_set(&dev_priv->idle_count, 0);
30137 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30138 }
30139
30140 #endif
30141 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
30142 index 5a82b6b..9e69c73 100644
30143 --- a/drivers/gpu/drm/radeon/mkregtable.c
30144 +++ b/drivers/gpu/drm/radeon/mkregtable.c
30145 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
30146 regex_t mask_rex;
30147 regmatch_t match[4];
30148 char buf[1024];
30149 - size_t end;
30150 + long end;
30151 int len;
30152 int done = 0;
30153 int r;
30154 unsigned o;
30155 struct offset *offset;
30156 char last_reg_s[10];
30157 - int last_reg;
30158 + unsigned long last_reg;
30159
30160 if (regcomp
30161 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
30162 diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
30163 index cb1acff..8861bc5 100644
30164 --- a/drivers/gpu/drm/radeon/r600_cs.c
30165 +++ b/drivers/gpu/drm/radeon/r600_cs.c
30166 @@ -1304,6 +1304,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
30167 h0 = G_038004_TEX_HEIGHT(word1) + 1;
30168 d0 = G_038004_TEX_DEPTH(word1);
30169 nfaces = 1;
30170 + array = 0;
30171 switch (G_038000_DIM(word0)) {
30172 case V_038000_SQ_TEX_DIM_1D:
30173 case V_038000_SQ_TEX_DIM_2D:
30174 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
30175 index 8227e76..ce0b195 100644
30176 --- a/drivers/gpu/drm/radeon/radeon.h
30177 +++ b/drivers/gpu/drm/radeon/radeon.h
30178 @@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_device *rdev);
30179 */
30180 struct radeon_fence_driver {
30181 uint32_t scratch_reg;
30182 - atomic_t seq;
30183 + atomic_unchecked_t seq;
30184 uint32_t last_seq;
30185 unsigned long last_jiffies;
30186 unsigned long last_timeout;
30187 @@ -530,7 +530,7 @@ struct r600_blit_cp_primitives {
30188 int x2, int y2);
30189 void (*draw_auto)(struct radeon_device *rdev);
30190 void (*set_default_state)(struct radeon_device *rdev);
30191 -};
30192 +} __no_const;
30193
30194 struct r600_blit {
30195 struct mutex mutex;
30196 @@ -954,7 +954,7 @@ struct radeon_asic {
30197 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
30198 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
30199 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
30200 -};
30201 +} __no_const;
30202
30203 /*
30204 * Asic structures
30205 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
30206 index 9231564..78b00fd 100644
30207 --- a/drivers/gpu/drm/radeon/radeon_device.c
30208 +++ b/drivers/gpu/drm/radeon/radeon_device.c
30209 @@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
30210 bool can_switch;
30211
30212 spin_lock(&dev->count_lock);
30213 - can_switch = (dev->open_count == 0);
30214 + can_switch = (local_read(&dev->open_count) == 0);
30215 spin_unlock(&dev->count_lock);
30216 return can_switch;
30217 }
30218 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
30219 index a1b59ca..86f2d44 100644
30220 --- a/drivers/gpu/drm/radeon/radeon_drv.h
30221 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
30222 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
30223
30224 /* SW interrupt */
30225 wait_queue_head_t swi_queue;
30226 - atomic_t swi_emitted;
30227 + atomic_unchecked_t swi_emitted;
30228 int vblank_crtc;
30229 uint32_t irq_enable_reg;
30230 uint32_t r500_disp_irq_reg;
30231 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
30232 index 76ec0e9..6feb1a3 100644
30233 --- a/drivers/gpu/drm/radeon/radeon_fence.c
30234 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
30235 @@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
30236 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
30237 return 0;
30238 }
30239 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
30240 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
30241 if (!rdev->cp.ready)
30242 /* FIXME: cp is not running assume everythings is done right
30243 * away
30244 @@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
30245 return r;
30246 }
30247 radeon_fence_write(rdev, 0);
30248 - atomic_set(&rdev->fence_drv.seq, 0);
30249 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
30250 INIT_LIST_HEAD(&rdev->fence_drv.created);
30251 INIT_LIST_HEAD(&rdev->fence_drv.emited);
30252 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
30253 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
30254 index 48b7cea..342236f 100644
30255 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
30256 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
30257 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
30258 request = compat_alloc_user_space(sizeof(*request));
30259 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
30260 || __put_user(req32.param, &request->param)
30261 - || __put_user((void __user *)(unsigned long)req32.value,
30262 + || __put_user((unsigned long)req32.value,
30263 &request->value))
30264 return -EFAULT;
30265
30266 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
30267 index 00da384..32f972d 100644
30268 --- a/drivers/gpu/drm/radeon/radeon_irq.c
30269 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
30270 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
30271 unsigned int ret;
30272 RING_LOCALS;
30273
30274 - atomic_inc(&dev_priv->swi_emitted);
30275 - ret = atomic_read(&dev_priv->swi_emitted);
30276 + atomic_inc_unchecked(&dev_priv->swi_emitted);
30277 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
30278
30279 BEGIN_RING(4);
30280 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
30281 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
30282 drm_radeon_private_t *dev_priv =
30283 (drm_radeon_private_t *) dev->dev_private;
30284
30285 - atomic_set(&dev_priv->swi_emitted, 0);
30286 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
30287 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
30288
30289 dev->max_vblank_count = 0x001fffff;
30290 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
30291 index e8422ae..d22d4a8 100644
30292 --- a/drivers/gpu/drm/radeon/radeon_state.c
30293 +++ b/drivers/gpu/drm/radeon/radeon_state.c
30294 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
30295 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
30296 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
30297
30298 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
30299 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
30300 sarea_priv->nbox * sizeof(depth_boxes[0])))
30301 return -EFAULT;
30302
30303 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
30304 {
30305 drm_radeon_private_t *dev_priv = dev->dev_private;
30306 drm_radeon_getparam_t *param = data;
30307 - int value;
30308 + int value = 0;
30309
30310 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
30311
30312 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
30313 index 0b5468b..9c4b308 100644
30314 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
30315 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
30316 @@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
30317 }
30318 if (unlikely(ttm_vm_ops == NULL)) {
30319 ttm_vm_ops = vma->vm_ops;
30320 - radeon_ttm_vm_ops = *ttm_vm_ops;
30321 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30322 + pax_open_kernel();
30323 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
30324 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30325 + pax_close_kernel();
30326 }
30327 vma->vm_ops = &radeon_ttm_vm_ops;
30328 return 0;
30329 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
30330 index a9049ed..501f284 100644
30331 --- a/drivers/gpu/drm/radeon/rs690.c
30332 +++ b/drivers/gpu/drm/radeon/rs690.c
30333 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
30334 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
30335 rdev->pm.sideport_bandwidth.full)
30336 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
30337 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
30338 + read_delay_latency.full = dfixed_const(800 * 1000);
30339 read_delay_latency.full = dfixed_div(read_delay_latency,
30340 rdev->pm.igp_sideport_mclk);
30341 + a.full = dfixed_const(370);
30342 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
30343 } else {
30344 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
30345 rdev->pm.k8_bandwidth.full)
30346 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
30347 index 727e93d..1565650 100644
30348 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
30349 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
30350 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
30351 static int ttm_pool_mm_shrink(struct shrinker *shrink,
30352 struct shrink_control *sc)
30353 {
30354 - static atomic_t start_pool = ATOMIC_INIT(0);
30355 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
30356 unsigned i;
30357 - unsigned pool_offset = atomic_add_return(1, &start_pool);
30358 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
30359 struct ttm_page_pool *pool;
30360 int shrink_pages = sc->nr_to_scan;
30361
30362 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
30363 index 9cf87d9..2000b7d 100644
30364 --- a/drivers/gpu/drm/via/via_drv.h
30365 +++ b/drivers/gpu/drm/via/via_drv.h
30366 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
30367 typedef uint32_t maskarray_t[5];
30368
30369 typedef struct drm_via_irq {
30370 - atomic_t irq_received;
30371 + atomic_unchecked_t irq_received;
30372 uint32_t pending_mask;
30373 uint32_t enable_mask;
30374 wait_queue_head_t irq_queue;
30375 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
30376 struct timeval last_vblank;
30377 int last_vblank_valid;
30378 unsigned usec_per_vblank;
30379 - atomic_t vbl_received;
30380 + atomic_unchecked_t vbl_received;
30381 drm_via_state_t hc_state;
30382 char pci_buf[VIA_PCI_BUF_SIZE];
30383 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
30384 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
30385 index d391f48..10c8ca3 100644
30386 --- a/drivers/gpu/drm/via/via_irq.c
30387 +++ b/drivers/gpu/drm/via/via_irq.c
30388 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
30389 if (crtc != 0)
30390 return 0;
30391
30392 - return atomic_read(&dev_priv->vbl_received);
30393 + return atomic_read_unchecked(&dev_priv->vbl_received);
30394 }
30395
30396 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30397 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30398
30399 status = VIA_READ(VIA_REG_INTERRUPT);
30400 if (status & VIA_IRQ_VBLANK_PENDING) {
30401 - atomic_inc(&dev_priv->vbl_received);
30402 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
30403 + atomic_inc_unchecked(&dev_priv->vbl_received);
30404 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
30405 do_gettimeofday(&cur_vblank);
30406 if (dev_priv->last_vblank_valid) {
30407 dev_priv->usec_per_vblank =
30408 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30409 dev_priv->last_vblank = cur_vblank;
30410 dev_priv->last_vblank_valid = 1;
30411 }
30412 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
30413 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
30414 DRM_DEBUG("US per vblank is: %u\n",
30415 dev_priv->usec_per_vblank);
30416 }
30417 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30418
30419 for (i = 0; i < dev_priv->num_irqs; ++i) {
30420 if (status & cur_irq->pending_mask) {
30421 - atomic_inc(&cur_irq->irq_received);
30422 + atomic_inc_unchecked(&cur_irq->irq_received);
30423 DRM_WAKEUP(&cur_irq->irq_queue);
30424 handled = 1;
30425 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
30426 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
30427 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30428 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
30429 masks[irq][4]));
30430 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
30431 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
30432 } else {
30433 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30434 (((cur_irq_sequence =
30435 - atomic_read(&cur_irq->irq_received)) -
30436 + atomic_read_unchecked(&cur_irq->irq_received)) -
30437 *sequence) <= (1 << 23)));
30438 }
30439 *sequence = cur_irq_sequence;
30440 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
30441 }
30442
30443 for (i = 0; i < dev_priv->num_irqs; ++i) {
30444 - atomic_set(&cur_irq->irq_received, 0);
30445 + atomic_set_unchecked(&cur_irq->irq_received, 0);
30446 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
30447 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
30448 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
30449 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
30450 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
30451 case VIA_IRQ_RELATIVE:
30452 irqwait->request.sequence +=
30453 - atomic_read(&cur_irq->irq_received);
30454 + atomic_read_unchecked(&cur_irq->irq_received);
30455 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
30456 case VIA_IRQ_ABSOLUTE:
30457 break;
30458 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
30459 index dc27970..f18b008 100644
30460 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
30461 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
30462 @@ -260,7 +260,7 @@ struct vmw_private {
30463 * Fencing and IRQs.
30464 */
30465
30466 - atomic_t marker_seq;
30467 + atomic_unchecked_t marker_seq;
30468 wait_queue_head_t fence_queue;
30469 wait_queue_head_t fifo_queue;
30470 int fence_queue_waiters; /* Protected by hw_mutex */
30471 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
30472 index a0c2f12..68ae6cb 100644
30473 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
30474 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
30475 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
30476 (unsigned int) min,
30477 (unsigned int) fifo->capabilities);
30478
30479 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
30480 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
30481 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
30482 vmw_marker_queue_init(&fifo->marker_queue);
30483 return vmw_fifo_send_fence(dev_priv, &dummy);
30484 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
30485 if (reserveable)
30486 iowrite32(bytes, fifo_mem +
30487 SVGA_FIFO_RESERVED);
30488 - return fifo_mem + (next_cmd >> 2);
30489 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
30490 } else {
30491 need_bounce = true;
30492 }
30493 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
30494
30495 fm = vmw_fifo_reserve(dev_priv, bytes);
30496 if (unlikely(fm == NULL)) {
30497 - *seqno = atomic_read(&dev_priv->marker_seq);
30498 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
30499 ret = -ENOMEM;
30500 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
30501 false, 3*HZ);
30502 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
30503 }
30504
30505 do {
30506 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
30507 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
30508 } while (*seqno == 0);
30509
30510 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
30511 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
30512 index cabc95f..14b3d77 100644
30513 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
30514 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
30515 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
30516 * emitted. Then the fence is stale and signaled.
30517 */
30518
30519 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
30520 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
30521 > VMW_FENCE_WRAP);
30522
30523 return ret;
30524 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
30525
30526 if (fifo_idle)
30527 down_read(&fifo_state->rwsem);
30528 - signal_seq = atomic_read(&dev_priv->marker_seq);
30529 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
30530 ret = 0;
30531
30532 for (;;) {
30533 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
30534 index 8a8725c..afed796 100644
30535 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
30536 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
30537 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
30538 while (!vmw_lag_lt(queue, us)) {
30539 spin_lock(&queue->lock);
30540 if (list_empty(&queue->head))
30541 - seqno = atomic_read(&dev_priv->marker_seq);
30542 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
30543 else {
30544 marker = list_first_entry(&queue->head,
30545 struct vmw_marker, head);
30546 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
30547 index bb656d8..4169fca 100644
30548 --- a/drivers/hid/hid-core.c
30549 +++ b/drivers/hid/hid-core.c
30550 @@ -2012,7 +2012,7 @@ static bool hid_ignore(struct hid_device *hdev)
30551
30552 int hid_add_device(struct hid_device *hdev)
30553 {
30554 - static atomic_t id = ATOMIC_INIT(0);
30555 + static atomic_unchecked_t id = ATOMIC_INIT(0);
30556 int ret;
30557
30558 if (WARN_ON(hdev->status & HID_STAT_ADDED))
30559 @@ -2027,7 +2027,7 @@ int hid_add_device(struct hid_device *hdev)
30560 /* XXX hack, any other cleaner solution after the driver core
30561 * is converted to allow more than 20 bytes as the device name? */
30562 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
30563 - hdev->vendor, hdev->product, atomic_inc_return(&id));
30564 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
30565
30566 hid_debug_register(hdev, dev_name(&hdev->dev));
30567 ret = device_add(&hdev->dev);
30568 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
30569 index 4ef02b2..8a96831 100644
30570 --- a/drivers/hid/usbhid/hiddev.c
30571 +++ b/drivers/hid/usbhid/hiddev.c
30572 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
30573 break;
30574
30575 case HIDIOCAPPLICATION:
30576 - if (arg < 0 || arg >= hid->maxapplication)
30577 + if (arg >= hid->maxapplication)
30578 break;
30579
30580 for (i = 0; i < hid->maxcollection; i++)
30581 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
30582 index 4065374..10ed7dc 100644
30583 --- a/drivers/hv/channel.c
30584 +++ b/drivers/hv/channel.c
30585 @@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
30586 int ret = 0;
30587 int t;
30588
30589 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
30590 - atomic_inc(&vmbus_connection.next_gpadl_handle);
30591 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
30592 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
30593
30594 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
30595 if (ret)
30596 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
30597 index 0fb100e..baf87e5 100644
30598 --- a/drivers/hv/hv.c
30599 +++ b/drivers/hv/hv.c
30600 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
30601 u64 output_address = (output) ? virt_to_phys(output) : 0;
30602 u32 output_address_hi = output_address >> 32;
30603 u32 output_address_lo = output_address & 0xFFFFFFFF;
30604 - void *hypercall_page = hv_context.hypercall_page;
30605 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
30606
30607 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
30608 "=a"(hv_status_lo) : "d" (control_hi),
30609 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
30610 index 0aee112..b72d21f 100644
30611 --- a/drivers/hv/hyperv_vmbus.h
30612 +++ b/drivers/hv/hyperv_vmbus.h
30613 @@ -556,7 +556,7 @@ enum vmbus_connect_state {
30614 struct vmbus_connection {
30615 enum vmbus_connect_state conn_state;
30616
30617 - atomic_t next_gpadl_handle;
30618 + atomic_unchecked_t next_gpadl_handle;
30619
30620 /*
30621 * Represents channel interrupts. Each bit position represents a
30622 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
30623 index d2d0a2a..90b8f4d 100644
30624 --- a/drivers/hv/vmbus_drv.c
30625 +++ b/drivers/hv/vmbus_drv.c
30626 @@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
30627 {
30628 int ret = 0;
30629
30630 - static atomic_t device_num = ATOMIC_INIT(0);
30631 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
30632
30633 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
30634 - atomic_inc_return(&device_num));
30635 + atomic_inc_return_unchecked(&device_num));
30636
30637 child_device_obj->device.bus = &hv_bus;
30638 child_device_obj->device.parent = &hv_acpi_dev->dev;
30639 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
30640 index 66f6729..2d6de0a 100644
30641 --- a/drivers/hwmon/acpi_power_meter.c
30642 +++ b/drivers/hwmon/acpi_power_meter.c
30643 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
30644 return res;
30645
30646 temp /= 1000;
30647 - if (temp < 0)
30648 - return -EINVAL;
30649
30650 mutex_lock(&resource->lock);
30651 resource->trip[attr->index - 7] = temp;
30652 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
30653 index 5357925..6cf0418 100644
30654 --- a/drivers/hwmon/sht15.c
30655 +++ b/drivers/hwmon/sht15.c
30656 @@ -166,7 +166,7 @@ struct sht15_data {
30657 int supply_uV;
30658 bool supply_uV_valid;
30659 struct work_struct update_supply_work;
30660 - atomic_t interrupt_handled;
30661 + atomic_unchecked_t interrupt_handled;
30662 };
30663
30664 /**
30665 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
30666 return ret;
30667
30668 gpio_direction_input(data->pdata->gpio_data);
30669 - atomic_set(&data->interrupt_handled, 0);
30670 + atomic_set_unchecked(&data->interrupt_handled, 0);
30671
30672 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30673 if (gpio_get_value(data->pdata->gpio_data) == 0) {
30674 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
30675 /* Only relevant if the interrupt hasn't occurred. */
30676 - if (!atomic_read(&data->interrupt_handled))
30677 + if (!atomic_read_unchecked(&data->interrupt_handled))
30678 schedule_work(&data->read_work);
30679 }
30680 ret = wait_event_timeout(data->wait_queue,
30681 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
30682
30683 /* First disable the interrupt */
30684 disable_irq_nosync(irq);
30685 - atomic_inc(&data->interrupt_handled);
30686 + atomic_inc_unchecked(&data->interrupt_handled);
30687 /* Then schedule a reading work struct */
30688 if (data->state != SHT15_READING_NOTHING)
30689 schedule_work(&data->read_work);
30690 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
30691 * If not, then start the interrupt again - care here as could
30692 * have gone low in meantime so verify it hasn't!
30693 */
30694 - atomic_set(&data->interrupt_handled, 0);
30695 + atomic_set_unchecked(&data->interrupt_handled, 0);
30696 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30697 /* If still not occurred or another handler has been scheduled */
30698 if (gpio_get_value(data->pdata->gpio_data)
30699 - || atomic_read(&data->interrupt_handled))
30700 + || atomic_read_unchecked(&data->interrupt_handled))
30701 return;
30702 }
30703
30704 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
30705 index 378fcb5..5e91fa8 100644
30706 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
30707 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
30708 @@ -43,7 +43,7 @@
30709 extern struct i2c_adapter amd756_smbus;
30710
30711 static struct i2c_adapter *s4882_adapter;
30712 -static struct i2c_algorithm *s4882_algo;
30713 +static i2c_algorithm_no_const *s4882_algo;
30714
30715 /* Wrapper access functions for multiplexed SMBus */
30716 static DEFINE_MUTEX(amd756_lock);
30717 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
30718 index 29015eb..af2d8e9 100644
30719 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
30720 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
30721 @@ -41,7 +41,7 @@
30722 extern struct i2c_adapter *nforce2_smbus;
30723
30724 static struct i2c_adapter *s4985_adapter;
30725 -static struct i2c_algorithm *s4985_algo;
30726 +static i2c_algorithm_no_const *s4985_algo;
30727
30728 /* Wrapper access functions for multiplexed SMBus */
30729 static DEFINE_MUTEX(nforce2_lock);
30730 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
30731 index d7a4833..7fae376 100644
30732 --- a/drivers/i2c/i2c-mux.c
30733 +++ b/drivers/i2c/i2c-mux.c
30734 @@ -28,7 +28,7 @@
30735 /* multiplexer per channel data */
30736 struct i2c_mux_priv {
30737 struct i2c_adapter adap;
30738 - struct i2c_algorithm algo;
30739 + i2c_algorithm_no_const algo;
30740
30741 struct i2c_adapter *parent;
30742 void *mux_dev; /* the mux chip/device */
30743 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
30744 index 57d00ca..0145194 100644
30745 --- a/drivers/ide/aec62xx.c
30746 +++ b/drivers/ide/aec62xx.c
30747 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
30748 .cable_detect = atp86x_cable_detect,
30749 };
30750
30751 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
30752 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
30753 { /* 0: AEC6210 */
30754 .name = DRV_NAME,
30755 .init_chipset = init_chipset_aec62xx,
30756 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
30757 index 2c8016a..911a27c 100644
30758 --- a/drivers/ide/alim15x3.c
30759 +++ b/drivers/ide/alim15x3.c
30760 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
30761 .dma_sff_read_status = ide_dma_sff_read_status,
30762 };
30763
30764 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
30765 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
30766 .name = DRV_NAME,
30767 .init_chipset = init_chipset_ali15x3,
30768 .init_hwif = init_hwif_ali15x3,
30769 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
30770 index 3747b25..56fc995 100644
30771 --- a/drivers/ide/amd74xx.c
30772 +++ b/drivers/ide/amd74xx.c
30773 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
30774 .udma_mask = udma, \
30775 }
30776
30777 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
30778 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
30779 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
30780 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
30781 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
30782 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
30783 index 15f0ead..cb43480 100644
30784 --- a/drivers/ide/atiixp.c
30785 +++ b/drivers/ide/atiixp.c
30786 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
30787 .cable_detect = atiixp_cable_detect,
30788 };
30789
30790 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
30791 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
30792 { /* 0: IXP200/300/400/700 */
30793 .name = DRV_NAME,
30794 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
30795 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
30796 index 5f80312..d1fc438 100644
30797 --- a/drivers/ide/cmd64x.c
30798 +++ b/drivers/ide/cmd64x.c
30799 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
30800 .dma_sff_read_status = ide_dma_sff_read_status,
30801 };
30802
30803 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
30804 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
30805 { /* 0: CMD643 */
30806 .name = DRV_NAME,
30807 .init_chipset = init_chipset_cmd64x,
30808 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
30809 index 2c1e5f7..1444762 100644
30810 --- a/drivers/ide/cs5520.c
30811 +++ b/drivers/ide/cs5520.c
30812 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
30813 .set_dma_mode = cs5520_set_dma_mode,
30814 };
30815
30816 -static const struct ide_port_info cyrix_chipset __devinitdata = {
30817 +static const struct ide_port_info cyrix_chipset __devinitconst = {
30818 .name = DRV_NAME,
30819 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
30820 .port_ops = &cs5520_port_ops,
30821 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
30822 index 4dc4eb9..49b40ad 100644
30823 --- a/drivers/ide/cs5530.c
30824 +++ b/drivers/ide/cs5530.c
30825 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
30826 .udma_filter = cs5530_udma_filter,
30827 };
30828
30829 -static const struct ide_port_info cs5530_chipset __devinitdata = {
30830 +static const struct ide_port_info cs5530_chipset __devinitconst = {
30831 .name = DRV_NAME,
30832 .init_chipset = init_chipset_cs5530,
30833 .init_hwif = init_hwif_cs5530,
30834 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
30835 index 5059faf..18d4c85 100644
30836 --- a/drivers/ide/cs5535.c
30837 +++ b/drivers/ide/cs5535.c
30838 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
30839 .cable_detect = cs5535_cable_detect,
30840 };
30841
30842 -static const struct ide_port_info cs5535_chipset __devinitdata = {
30843 +static const struct ide_port_info cs5535_chipset __devinitconst = {
30844 .name = DRV_NAME,
30845 .port_ops = &cs5535_port_ops,
30846 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
30847 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
30848 index 847553f..3ffb49d 100644
30849 --- a/drivers/ide/cy82c693.c
30850 +++ b/drivers/ide/cy82c693.c
30851 @@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
30852 .set_dma_mode = cy82c693_set_dma_mode,
30853 };
30854
30855 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
30856 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
30857 .name = DRV_NAME,
30858 .init_iops = init_iops_cy82c693,
30859 .port_ops = &cy82c693_port_ops,
30860 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
30861 index 58c51cd..4aec3b8 100644
30862 --- a/drivers/ide/hpt366.c
30863 +++ b/drivers/ide/hpt366.c
30864 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
30865 }
30866 };
30867
30868 -static const struct hpt_info hpt36x __devinitdata = {
30869 +static const struct hpt_info hpt36x __devinitconst = {
30870 .chip_name = "HPT36x",
30871 .chip_type = HPT36x,
30872 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
30873 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
30874 .timings = &hpt36x_timings
30875 };
30876
30877 -static const struct hpt_info hpt370 __devinitdata = {
30878 +static const struct hpt_info hpt370 __devinitconst = {
30879 .chip_name = "HPT370",
30880 .chip_type = HPT370,
30881 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
30882 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
30883 .timings = &hpt37x_timings
30884 };
30885
30886 -static const struct hpt_info hpt370a __devinitdata = {
30887 +static const struct hpt_info hpt370a __devinitconst = {
30888 .chip_name = "HPT370A",
30889 .chip_type = HPT370A,
30890 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
30891 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
30892 .timings = &hpt37x_timings
30893 };
30894
30895 -static const struct hpt_info hpt374 __devinitdata = {
30896 +static const struct hpt_info hpt374 __devinitconst = {
30897 .chip_name = "HPT374",
30898 .chip_type = HPT374,
30899 .udma_mask = ATA_UDMA5,
30900 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
30901 .timings = &hpt37x_timings
30902 };
30903
30904 -static const struct hpt_info hpt372 __devinitdata = {
30905 +static const struct hpt_info hpt372 __devinitconst = {
30906 .chip_name = "HPT372",
30907 .chip_type = HPT372,
30908 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30909 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
30910 .timings = &hpt37x_timings
30911 };
30912
30913 -static const struct hpt_info hpt372a __devinitdata = {
30914 +static const struct hpt_info hpt372a __devinitconst = {
30915 .chip_name = "HPT372A",
30916 .chip_type = HPT372A,
30917 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30918 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
30919 .timings = &hpt37x_timings
30920 };
30921
30922 -static const struct hpt_info hpt302 __devinitdata = {
30923 +static const struct hpt_info hpt302 __devinitconst = {
30924 .chip_name = "HPT302",
30925 .chip_type = HPT302,
30926 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30927 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
30928 .timings = &hpt37x_timings
30929 };
30930
30931 -static const struct hpt_info hpt371 __devinitdata = {
30932 +static const struct hpt_info hpt371 __devinitconst = {
30933 .chip_name = "HPT371",
30934 .chip_type = HPT371,
30935 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30936 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
30937 .timings = &hpt37x_timings
30938 };
30939
30940 -static const struct hpt_info hpt372n __devinitdata = {
30941 +static const struct hpt_info hpt372n __devinitconst = {
30942 .chip_name = "HPT372N",
30943 .chip_type = HPT372N,
30944 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30945 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
30946 .timings = &hpt37x_timings
30947 };
30948
30949 -static const struct hpt_info hpt302n __devinitdata = {
30950 +static const struct hpt_info hpt302n __devinitconst = {
30951 .chip_name = "HPT302N",
30952 .chip_type = HPT302N,
30953 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30954 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
30955 .timings = &hpt37x_timings
30956 };
30957
30958 -static const struct hpt_info hpt371n __devinitdata = {
30959 +static const struct hpt_info hpt371n __devinitconst = {
30960 .chip_name = "HPT371N",
30961 .chip_type = HPT371N,
30962 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30963 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
30964 .dma_sff_read_status = ide_dma_sff_read_status,
30965 };
30966
30967 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
30968 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
30969 { /* 0: HPT36x */
30970 .name = DRV_NAME,
30971 .init_chipset = init_chipset_hpt366,
30972 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
30973 index 8126824..55a2798 100644
30974 --- a/drivers/ide/ide-cd.c
30975 +++ b/drivers/ide/ide-cd.c
30976 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
30977 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
30978 if ((unsigned long)buf & alignment
30979 || blk_rq_bytes(rq) & q->dma_pad_mask
30980 - || object_is_on_stack(buf))
30981 + || object_starts_on_stack(buf))
30982 drive->dma = 0;
30983 }
30984 }
30985 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
30986 index a743e68..1cfd674 100644
30987 --- a/drivers/ide/ide-pci-generic.c
30988 +++ b/drivers/ide/ide-pci-generic.c
30989 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
30990 .udma_mask = ATA_UDMA6, \
30991 }
30992
30993 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
30994 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
30995 /* 0: Unknown */
30996 DECLARE_GENERIC_PCI_DEV(0),
30997
30998 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
30999 index 560e66d..d5dd180 100644
31000 --- a/drivers/ide/it8172.c
31001 +++ b/drivers/ide/it8172.c
31002 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
31003 .set_dma_mode = it8172_set_dma_mode,
31004 };
31005
31006 -static const struct ide_port_info it8172_port_info __devinitdata = {
31007 +static const struct ide_port_info it8172_port_info __devinitconst = {
31008 .name = DRV_NAME,
31009 .port_ops = &it8172_port_ops,
31010 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
31011 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
31012 index 46816ba..1847aeb 100644
31013 --- a/drivers/ide/it8213.c
31014 +++ b/drivers/ide/it8213.c
31015 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
31016 .cable_detect = it8213_cable_detect,
31017 };
31018
31019 -static const struct ide_port_info it8213_chipset __devinitdata = {
31020 +static const struct ide_port_info it8213_chipset __devinitconst = {
31021 .name = DRV_NAME,
31022 .enablebits = { {0x41, 0x80, 0x80} },
31023 .port_ops = &it8213_port_ops,
31024 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
31025 index 2e3169f..c5611db 100644
31026 --- a/drivers/ide/it821x.c
31027 +++ b/drivers/ide/it821x.c
31028 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
31029 .cable_detect = it821x_cable_detect,
31030 };
31031
31032 -static const struct ide_port_info it821x_chipset __devinitdata = {
31033 +static const struct ide_port_info it821x_chipset __devinitconst = {
31034 .name = DRV_NAME,
31035 .init_chipset = init_chipset_it821x,
31036 .init_hwif = init_hwif_it821x,
31037 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
31038 index 74c2c4a..efddd7d 100644
31039 --- a/drivers/ide/jmicron.c
31040 +++ b/drivers/ide/jmicron.c
31041 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
31042 .cable_detect = jmicron_cable_detect,
31043 };
31044
31045 -static const struct ide_port_info jmicron_chipset __devinitdata = {
31046 +static const struct ide_port_info jmicron_chipset __devinitconst = {
31047 .name = DRV_NAME,
31048 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
31049 .port_ops = &jmicron_port_ops,
31050 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
31051 index 95327a2..73f78d8 100644
31052 --- a/drivers/ide/ns87415.c
31053 +++ b/drivers/ide/ns87415.c
31054 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
31055 .dma_sff_read_status = superio_dma_sff_read_status,
31056 };
31057
31058 -static const struct ide_port_info ns87415_chipset __devinitdata = {
31059 +static const struct ide_port_info ns87415_chipset __devinitconst = {
31060 .name = DRV_NAME,
31061 .init_hwif = init_hwif_ns87415,
31062 .tp_ops = &ns87415_tp_ops,
31063 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
31064 index 1a53a4c..39edc66 100644
31065 --- a/drivers/ide/opti621.c
31066 +++ b/drivers/ide/opti621.c
31067 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
31068 .set_pio_mode = opti621_set_pio_mode,
31069 };
31070
31071 -static const struct ide_port_info opti621_chipset __devinitdata = {
31072 +static const struct ide_port_info opti621_chipset __devinitconst = {
31073 .name = DRV_NAME,
31074 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
31075 .port_ops = &opti621_port_ops,
31076 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
31077 index 9546fe2..2e5ceb6 100644
31078 --- a/drivers/ide/pdc202xx_new.c
31079 +++ b/drivers/ide/pdc202xx_new.c
31080 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
31081 .udma_mask = udma, \
31082 }
31083
31084 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
31085 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
31086 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
31087 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
31088 };
31089 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
31090 index 3a35ec6..5634510 100644
31091 --- a/drivers/ide/pdc202xx_old.c
31092 +++ b/drivers/ide/pdc202xx_old.c
31093 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
31094 .max_sectors = sectors, \
31095 }
31096
31097 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
31098 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
31099 { /* 0: PDC20246 */
31100 .name = DRV_NAME,
31101 .init_chipset = init_chipset_pdc202xx,
31102 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
31103 index 1892e81..fe0fd60 100644
31104 --- a/drivers/ide/piix.c
31105 +++ b/drivers/ide/piix.c
31106 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
31107 .udma_mask = udma, \
31108 }
31109
31110 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
31111 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
31112 /* 0: MPIIX */
31113 { /*
31114 * MPIIX actually has only a single IDE channel mapped to
31115 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
31116 index a6414a8..c04173e 100644
31117 --- a/drivers/ide/rz1000.c
31118 +++ b/drivers/ide/rz1000.c
31119 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
31120 }
31121 }
31122
31123 -static const struct ide_port_info rz1000_chipset __devinitdata = {
31124 +static const struct ide_port_info rz1000_chipset __devinitconst = {
31125 .name = DRV_NAME,
31126 .host_flags = IDE_HFLAG_NO_DMA,
31127 };
31128 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
31129 index 356b9b5..d4758eb 100644
31130 --- a/drivers/ide/sc1200.c
31131 +++ b/drivers/ide/sc1200.c
31132 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
31133 .dma_sff_read_status = ide_dma_sff_read_status,
31134 };
31135
31136 -static const struct ide_port_info sc1200_chipset __devinitdata = {
31137 +static const struct ide_port_info sc1200_chipset __devinitconst = {
31138 .name = DRV_NAME,
31139 .port_ops = &sc1200_port_ops,
31140 .dma_ops = &sc1200_dma_ops,
31141 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
31142 index b7f5b0c..9701038 100644
31143 --- a/drivers/ide/scc_pata.c
31144 +++ b/drivers/ide/scc_pata.c
31145 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
31146 .dma_sff_read_status = scc_dma_sff_read_status,
31147 };
31148
31149 -static const struct ide_port_info scc_chipset __devinitdata = {
31150 +static const struct ide_port_info scc_chipset __devinitconst = {
31151 .name = "sccIDE",
31152 .init_iops = init_iops_scc,
31153 .init_dma = scc_init_dma,
31154 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
31155 index 35fb8da..24d72ef 100644
31156 --- a/drivers/ide/serverworks.c
31157 +++ b/drivers/ide/serverworks.c
31158 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
31159 .cable_detect = svwks_cable_detect,
31160 };
31161
31162 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
31163 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
31164 { /* 0: OSB4 */
31165 .name = DRV_NAME,
31166 .init_chipset = init_chipset_svwks,
31167 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
31168 index ddeda44..46f7e30 100644
31169 --- a/drivers/ide/siimage.c
31170 +++ b/drivers/ide/siimage.c
31171 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
31172 .udma_mask = ATA_UDMA6, \
31173 }
31174
31175 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
31176 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
31177 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
31178 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
31179 };
31180 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
31181 index 4a00225..09e61b4 100644
31182 --- a/drivers/ide/sis5513.c
31183 +++ b/drivers/ide/sis5513.c
31184 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
31185 .cable_detect = sis_cable_detect,
31186 };
31187
31188 -static const struct ide_port_info sis5513_chipset __devinitdata = {
31189 +static const struct ide_port_info sis5513_chipset __devinitconst = {
31190 .name = DRV_NAME,
31191 .init_chipset = init_chipset_sis5513,
31192 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
31193 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
31194 index f21dc2a..d051cd2 100644
31195 --- a/drivers/ide/sl82c105.c
31196 +++ b/drivers/ide/sl82c105.c
31197 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
31198 .dma_sff_read_status = ide_dma_sff_read_status,
31199 };
31200
31201 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
31202 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
31203 .name = DRV_NAME,
31204 .init_chipset = init_chipset_sl82c105,
31205 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
31206 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
31207 index 864ffe0..863a5e9 100644
31208 --- a/drivers/ide/slc90e66.c
31209 +++ b/drivers/ide/slc90e66.c
31210 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
31211 .cable_detect = slc90e66_cable_detect,
31212 };
31213
31214 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
31215 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
31216 .name = DRV_NAME,
31217 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
31218 .port_ops = &slc90e66_port_ops,
31219 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
31220 index 4799d5c..1794678 100644
31221 --- a/drivers/ide/tc86c001.c
31222 +++ b/drivers/ide/tc86c001.c
31223 @@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
31224 .dma_sff_read_status = ide_dma_sff_read_status,
31225 };
31226
31227 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
31228 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
31229 .name = DRV_NAME,
31230 .init_hwif = init_hwif_tc86c001,
31231 .port_ops = &tc86c001_port_ops,
31232 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
31233 index 281c914..55ce1b8 100644
31234 --- a/drivers/ide/triflex.c
31235 +++ b/drivers/ide/triflex.c
31236 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
31237 .set_dma_mode = triflex_set_mode,
31238 };
31239
31240 -static const struct ide_port_info triflex_device __devinitdata = {
31241 +static const struct ide_port_info triflex_device __devinitconst = {
31242 .name = DRV_NAME,
31243 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
31244 .port_ops = &triflex_port_ops,
31245 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
31246 index 4b42ca0..e494a98 100644
31247 --- a/drivers/ide/trm290.c
31248 +++ b/drivers/ide/trm290.c
31249 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
31250 .dma_check = trm290_dma_check,
31251 };
31252
31253 -static const struct ide_port_info trm290_chipset __devinitdata = {
31254 +static const struct ide_port_info trm290_chipset __devinitconst = {
31255 .name = DRV_NAME,
31256 .init_hwif = init_hwif_trm290,
31257 .tp_ops = &trm290_tp_ops,
31258 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
31259 index f46f49c..eb77678 100644
31260 --- a/drivers/ide/via82cxxx.c
31261 +++ b/drivers/ide/via82cxxx.c
31262 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
31263 .cable_detect = via82cxxx_cable_detect,
31264 };
31265
31266 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
31267 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
31268 .name = DRV_NAME,
31269 .init_chipset = init_chipset_via82cxxx,
31270 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
31271 diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
31272 index eb0e2cc..14241c7 100644
31273 --- a/drivers/ieee802154/fakehard.c
31274 +++ b/drivers/ieee802154/fakehard.c
31275 @@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
31276 phy->transmit_power = 0xbf;
31277
31278 dev->netdev_ops = &fake_ops;
31279 - dev->ml_priv = &fake_mlme;
31280 + dev->ml_priv = (void *)&fake_mlme;
31281
31282 priv = netdev_priv(dev);
31283 priv->phy = phy;
31284 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
31285 index 8b72f39..55df4c8 100644
31286 --- a/drivers/infiniband/core/cm.c
31287 +++ b/drivers/infiniband/core/cm.c
31288 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
31289
31290 struct cm_counter_group {
31291 struct kobject obj;
31292 - atomic_long_t counter[CM_ATTR_COUNT];
31293 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
31294 };
31295
31296 struct cm_counter_attribute {
31297 @@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
31298 struct ib_mad_send_buf *msg = NULL;
31299 int ret;
31300
31301 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31302 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31303 counter[CM_REQ_COUNTER]);
31304
31305 /* Quick state check to discard duplicate REQs. */
31306 @@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
31307 if (!cm_id_priv)
31308 return;
31309
31310 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31311 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31312 counter[CM_REP_COUNTER]);
31313 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
31314 if (ret)
31315 @@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
31316 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
31317 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
31318 spin_unlock_irq(&cm_id_priv->lock);
31319 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31320 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31321 counter[CM_RTU_COUNTER]);
31322 goto out;
31323 }
31324 @@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
31325 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
31326 dreq_msg->local_comm_id);
31327 if (!cm_id_priv) {
31328 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31329 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31330 counter[CM_DREQ_COUNTER]);
31331 cm_issue_drep(work->port, work->mad_recv_wc);
31332 return -EINVAL;
31333 @@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
31334 case IB_CM_MRA_REP_RCVD:
31335 break;
31336 case IB_CM_TIMEWAIT:
31337 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31338 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31339 counter[CM_DREQ_COUNTER]);
31340 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
31341 goto unlock;
31342 @@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
31343 cm_free_msg(msg);
31344 goto deref;
31345 case IB_CM_DREQ_RCVD:
31346 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31347 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31348 counter[CM_DREQ_COUNTER]);
31349 goto unlock;
31350 default:
31351 @@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
31352 ib_modify_mad(cm_id_priv->av.port->mad_agent,
31353 cm_id_priv->msg, timeout)) {
31354 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
31355 - atomic_long_inc(&work->port->
31356 + atomic_long_inc_unchecked(&work->port->
31357 counter_group[CM_RECV_DUPLICATES].
31358 counter[CM_MRA_COUNTER]);
31359 goto out;
31360 @@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
31361 break;
31362 case IB_CM_MRA_REQ_RCVD:
31363 case IB_CM_MRA_REP_RCVD:
31364 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31365 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31366 counter[CM_MRA_COUNTER]);
31367 /* fall through */
31368 default:
31369 @@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
31370 case IB_CM_LAP_IDLE:
31371 break;
31372 case IB_CM_MRA_LAP_SENT:
31373 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31374 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31375 counter[CM_LAP_COUNTER]);
31376 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
31377 goto unlock;
31378 @@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
31379 cm_free_msg(msg);
31380 goto deref;
31381 case IB_CM_LAP_RCVD:
31382 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31383 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31384 counter[CM_LAP_COUNTER]);
31385 goto unlock;
31386 default:
31387 @@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
31388 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
31389 if (cur_cm_id_priv) {
31390 spin_unlock_irq(&cm.lock);
31391 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31392 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31393 counter[CM_SIDR_REQ_COUNTER]);
31394 goto out; /* Duplicate message. */
31395 }
31396 @@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
31397 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
31398 msg->retries = 1;
31399
31400 - atomic_long_add(1 + msg->retries,
31401 + atomic_long_add_unchecked(1 + msg->retries,
31402 &port->counter_group[CM_XMIT].counter[attr_index]);
31403 if (msg->retries)
31404 - atomic_long_add(msg->retries,
31405 + atomic_long_add_unchecked(msg->retries,
31406 &port->counter_group[CM_XMIT_RETRIES].
31407 counter[attr_index]);
31408
31409 @@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
31410 }
31411
31412 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
31413 - atomic_long_inc(&port->counter_group[CM_RECV].
31414 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
31415 counter[attr_id - CM_ATTR_ID_OFFSET]);
31416
31417 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
31418 @@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
31419 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
31420
31421 return sprintf(buf, "%ld\n",
31422 - atomic_long_read(&group->counter[cm_attr->index]));
31423 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
31424 }
31425
31426 static const struct sysfs_ops cm_counter_ops = {
31427 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
31428 index 176c8f9..2627b62 100644
31429 --- a/drivers/infiniband/core/fmr_pool.c
31430 +++ b/drivers/infiniband/core/fmr_pool.c
31431 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
31432
31433 struct task_struct *thread;
31434
31435 - atomic_t req_ser;
31436 - atomic_t flush_ser;
31437 + atomic_unchecked_t req_ser;
31438 + atomic_unchecked_t flush_ser;
31439
31440 wait_queue_head_t force_wait;
31441 };
31442 @@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
31443 struct ib_fmr_pool *pool = pool_ptr;
31444
31445 do {
31446 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
31447 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
31448 ib_fmr_batch_release(pool);
31449
31450 - atomic_inc(&pool->flush_ser);
31451 + atomic_inc_unchecked(&pool->flush_ser);
31452 wake_up_interruptible(&pool->force_wait);
31453
31454 if (pool->flush_function)
31455 @@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
31456 }
31457
31458 set_current_state(TASK_INTERRUPTIBLE);
31459 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
31460 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
31461 !kthread_should_stop())
31462 schedule();
31463 __set_current_state(TASK_RUNNING);
31464 @@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
31465 pool->dirty_watermark = params->dirty_watermark;
31466 pool->dirty_len = 0;
31467 spin_lock_init(&pool->pool_lock);
31468 - atomic_set(&pool->req_ser, 0);
31469 - atomic_set(&pool->flush_ser, 0);
31470 + atomic_set_unchecked(&pool->req_ser, 0);
31471 + atomic_set_unchecked(&pool->flush_ser, 0);
31472 init_waitqueue_head(&pool->force_wait);
31473
31474 pool->thread = kthread_run(ib_fmr_cleanup_thread,
31475 @@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
31476 }
31477 spin_unlock_irq(&pool->pool_lock);
31478
31479 - serial = atomic_inc_return(&pool->req_ser);
31480 + serial = atomic_inc_return_unchecked(&pool->req_ser);
31481 wake_up_process(pool->thread);
31482
31483 if (wait_event_interruptible(pool->force_wait,
31484 - atomic_read(&pool->flush_ser) - serial >= 0))
31485 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
31486 return -EINTR;
31487
31488 return 0;
31489 @@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
31490 } else {
31491 list_add_tail(&fmr->list, &pool->dirty_list);
31492 if (++pool->dirty_len >= pool->dirty_watermark) {
31493 - atomic_inc(&pool->req_ser);
31494 + atomic_inc_unchecked(&pool->req_ser);
31495 wake_up_process(pool->thread);
31496 }
31497 }
31498 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
31499 index 40c8353..946b0e4 100644
31500 --- a/drivers/infiniband/hw/cxgb4/mem.c
31501 +++ b/drivers/infiniband/hw/cxgb4/mem.c
31502 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
31503 int err;
31504 struct fw_ri_tpte tpt;
31505 u32 stag_idx;
31506 - static atomic_t key;
31507 + static atomic_unchecked_t key;
31508
31509 if (c4iw_fatal_error(rdev))
31510 return -EIO;
31511 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
31512 &rdev->resource.tpt_fifo_lock);
31513 if (!stag_idx)
31514 return -ENOMEM;
31515 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
31516 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
31517 }
31518 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
31519 __func__, stag_state, type, pdid, stag_idx);
31520 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
31521 index 79b3dbc..96e5fcc 100644
31522 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
31523 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
31524 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
31525 struct ib_atomic_eth *ateth;
31526 struct ipath_ack_entry *e;
31527 u64 vaddr;
31528 - atomic64_t *maddr;
31529 + atomic64_unchecked_t *maddr;
31530 u64 sdata;
31531 u32 rkey;
31532 u8 next;
31533 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
31534 IB_ACCESS_REMOTE_ATOMIC)))
31535 goto nack_acc_unlck;
31536 /* Perform atomic OP and save result. */
31537 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
31538 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
31539 sdata = be64_to_cpu(ateth->swap_data);
31540 e = &qp->s_ack_queue[qp->r_head_ack_queue];
31541 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
31542 - (u64) atomic64_add_return(sdata, maddr) - sdata :
31543 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
31544 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
31545 be64_to_cpu(ateth->compare_data),
31546 sdata);
31547 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
31548 index 1f95bba..9530f87 100644
31549 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
31550 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
31551 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
31552 unsigned long flags;
31553 struct ib_wc wc;
31554 u64 sdata;
31555 - atomic64_t *maddr;
31556 + atomic64_unchecked_t *maddr;
31557 enum ib_wc_status send_status;
31558
31559 /*
31560 @@ -382,11 +382,11 @@ again:
31561 IB_ACCESS_REMOTE_ATOMIC)))
31562 goto acc_err;
31563 /* Perform atomic OP and save result. */
31564 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
31565 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
31566 sdata = wqe->wr.wr.atomic.compare_add;
31567 *(u64 *) sqp->s_sge.sge.vaddr =
31568 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
31569 - (u64) atomic64_add_return(sdata, maddr) - sdata :
31570 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
31571 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
31572 sdata, wqe->wr.wr.atomic.swap);
31573 goto send_comp;
31574 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
31575 index 5965b3d..16817fb 100644
31576 --- a/drivers/infiniband/hw/nes/nes.c
31577 +++ b/drivers/infiniband/hw/nes/nes.c
31578 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
31579 LIST_HEAD(nes_adapter_list);
31580 static LIST_HEAD(nes_dev_list);
31581
31582 -atomic_t qps_destroyed;
31583 +atomic_unchecked_t qps_destroyed;
31584
31585 static unsigned int ee_flsh_adapter;
31586 static unsigned int sysfs_nonidx_addr;
31587 @@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
31588 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
31589 struct nes_adapter *nesadapter = nesdev->nesadapter;
31590
31591 - atomic_inc(&qps_destroyed);
31592 + atomic_inc_unchecked(&qps_destroyed);
31593
31594 /* Free the control structures */
31595
31596 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
31597 index 568b4f1..5ea3eff 100644
31598 --- a/drivers/infiniband/hw/nes/nes.h
31599 +++ b/drivers/infiniband/hw/nes/nes.h
31600 @@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
31601 extern unsigned int wqm_quanta;
31602 extern struct list_head nes_adapter_list;
31603
31604 -extern atomic_t cm_connects;
31605 -extern atomic_t cm_accepts;
31606 -extern atomic_t cm_disconnects;
31607 -extern atomic_t cm_closes;
31608 -extern atomic_t cm_connecteds;
31609 -extern atomic_t cm_connect_reqs;
31610 -extern atomic_t cm_rejects;
31611 -extern atomic_t mod_qp_timouts;
31612 -extern atomic_t qps_created;
31613 -extern atomic_t qps_destroyed;
31614 -extern atomic_t sw_qps_destroyed;
31615 +extern atomic_unchecked_t cm_connects;
31616 +extern atomic_unchecked_t cm_accepts;
31617 +extern atomic_unchecked_t cm_disconnects;
31618 +extern atomic_unchecked_t cm_closes;
31619 +extern atomic_unchecked_t cm_connecteds;
31620 +extern atomic_unchecked_t cm_connect_reqs;
31621 +extern atomic_unchecked_t cm_rejects;
31622 +extern atomic_unchecked_t mod_qp_timouts;
31623 +extern atomic_unchecked_t qps_created;
31624 +extern atomic_unchecked_t qps_destroyed;
31625 +extern atomic_unchecked_t sw_qps_destroyed;
31626 extern u32 mh_detected;
31627 extern u32 mh_pauses_sent;
31628 extern u32 cm_packets_sent;
31629 @@ -197,16 +197,16 @@ extern u32 cm_packets_created;
31630 extern u32 cm_packets_received;
31631 extern u32 cm_packets_dropped;
31632 extern u32 cm_packets_retrans;
31633 -extern atomic_t cm_listens_created;
31634 -extern atomic_t cm_listens_destroyed;
31635 +extern atomic_unchecked_t cm_listens_created;
31636 +extern atomic_unchecked_t cm_listens_destroyed;
31637 extern u32 cm_backlog_drops;
31638 -extern atomic_t cm_loopbacks;
31639 -extern atomic_t cm_nodes_created;
31640 -extern atomic_t cm_nodes_destroyed;
31641 -extern atomic_t cm_accel_dropped_pkts;
31642 -extern atomic_t cm_resets_recvd;
31643 -extern atomic_t pau_qps_created;
31644 -extern atomic_t pau_qps_destroyed;
31645 +extern atomic_unchecked_t cm_loopbacks;
31646 +extern atomic_unchecked_t cm_nodes_created;
31647 +extern atomic_unchecked_t cm_nodes_destroyed;
31648 +extern atomic_unchecked_t cm_accel_dropped_pkts;
31649 +extern atomic_unchecked_t cm_resets_recvd;
31650 +extern atomic_unchecked_t pau_qps_created;
31651 +extern atomic_unchecked_t pau_qps_destroyed;
31652
31653 extern u32 int_mod_timer_init;
31654 extern u32 int_mod_cq_depth_256;
31655 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
31656 index 0a52d72..0642f36 100644
31657 --- a/drivers/infiniband/hw/nes/nes_cm.c
31658 +++ b/drivers/infiniband/hw/nes/nes_cm.c
31659 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
31660 u32 cm_packets_retrans;
31661 u32 cm_packets_created;
31662 u32 cm_packets_received;
31663 -atomic_t cm_listens_created;
31664 -atomic_t cm_listens_destroyed;
31665 +atomic_unchecked_t cm_listens_created;
31666 +atomic_unchecked_t cm_listens_destroyed;
31667 u32 cm_backlog_drops;
31668 -atomic_t cm_loopbacks;
31669 -atomic_t cm_nodes_created;
31670 -atomic_t cm_nodes_destroyed;
31671 -atomic_t cm_accel_dropped_pkts;
31672 -atomic_t cm_resets_recvd;
31673 +atomic_unchecked_t cm_loopbacks;
31674 +atomic_unchecked_t cm_nodes_created;
31675 +atomic_unchecked_t cm_nodes_destroyed;
31676 +atomic_unchecked_t cm_accel_dropped_pkts;
31677 +atomic_unchecked_t cm_resets_recvd;
31678
31679 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
31680 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
31681 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
31682
31683 static struct nes_cm_core *g_cm_core;
31684
31685 -atomic_t cm_connects;
31686 -atomic_t cm_accepts;
31687 -atomic_t cm_disconnects;
31688 -atomic_t cm_closes;
31689 -atomic_t cm_connecteds;
31690 -atomic_t cm_connect_reqs;
31691 -atomic_t cm_rejects;
31692 +atomic_unchecked_t cm_connects;
31693 +atomic_unchecked_t cm_accepts;
31694 +atomic_unchecked_t cm_disconnects;
31695 +atomic_unchecked_t cm_closes;
31696 +atomic_unchecked_t cm_connecteds;
31697 +atomic_unchecked_t cm_connect_reqs;
31698 +atomic_unchecked_t cm_rejects;
31699
31700 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
31701 {
31702 @@ -1271,7 +1271,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
31703 kfree(listener);
31704 listener = NULL;
31705 ret = 0;
31706 - atomic_inc(&cm_listens_destroyed);
31707 + atomic_inc_unchecked(&cm_listens_destroyed);
31708 } else {
31709 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
31710 }
31711 @@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
31712 cm_node->rem_mac);
31713
31714 add_hte_node(cm_core, cm_node);
31715 - atomic_inc(&cm_nodes_created);
31716 + atomic_inc_unchecked(&cm_nodes_created);
31717
31718 return cm_node;
31719 }
31720 @@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
31721 }
31722
31723 atomic_dec(&cm_core->node_cnt);
31724 - atomic_inc(&cm_nodes_destroyed);
31725 + atomic_inc_unchecked(&cm_nodes_destroyed);
31726 nesqp = cm_node->nesqp;
31727 if (nesqp) {
31728 nesqp->cm_node = NULL;
31729 @@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
31730
31731 static void drop_packet(struct sk_buff *skb)
31732 {
31733 - atomic_inc(&cm_accel_dropped_pkts);
31734 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
31735 dev_kfree_skb_any(skb);
31736 }
31737
31738 @@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
31739 {
31740
31741 int reset = 0; /* whether to send reset in case of err.. */
31742 - atomic_inc(&cm_resets_recvd);
31743 + atomic_inc_unchecked(&cm_resets_recvd);
31744 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
31745 " refcnt=%d\n", cm_node, cm_node->state,
31746 atomic_read(&cm_node->ref_count));
31747 @@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
31748 rem_ref_cm_node(cm_node->cm_core, cm_node);
31749 return NULL;
31750 }
31751 - atomic_inc(&cm_loopbacks);
31752 + atomic_inc_unchecked(&cm_loopbacks);
31753 loopbackremotenode->loopbackpartner = cm_node;
31754 loopbackremotenode->tcp_cntxt.rcv_wscale =
31755 NES_CM_DEFAULT_RCV_WND_SCALE;
31756 @@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
31757 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
31758 else {
31759 rem_ref_cm_node(cm_core, cm_node);
31760 - atomic_inc(&cm_accel_dropped_pkts);
31761 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
31762 dev_kfree_skb_any(skb);
31763 }
31764 break;
31765 @@ -2880,7 +2880,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
31766
31767 if ((cm_id) && (cm_id->event_handler)) {
31768 if (issue_disconn) {
31769 - atomic_inc(&cm_disconnects);
31770 + atomic_inc_unchecked(&cm_disconnects);
31771 cm_event.event = IW_CM_EVENT_DISCONNECT;
31772 cm_event.status = disconn_status;
31773 cm_event.local_addr = cm_id->local_addr;
31774 @@ -2902,7 +2902,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
31775 }
31776
31777 if (issue_close) {
31778 - atomic_inc(&cm_closes);
31779 + atomic_inc_unchecked(&cm_closes);
31780 nes_disconnect(nesqp, 1);
31781
31782 cm_id->provider_data = nesqp;
31783 @@ -3038,7 +3038,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
31784
31785 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
31786 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
31787 - atomic_inc(&cm_accepts);
31788 + atomic_inc_unchecked(&cm_accepts);
31789
31790 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
31791 netdev_refcnt_read(nesvnic->netdev));
31792 @@ -3240,7 +3240,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
31793 struct nes_cm_core *cm_core;
31794 u8 *start_buff;
31795
31796 - atomic_inc(&cm_rejects);
31797 + atomic_inc_unchecked(&cm_rejects);
31798 cm_node = (struct nes_cm_node *)cm_id->provider_data;
31799 loopback = cm_node->loopbackpartner;
31800 cm_core = cm_node->cm_core;
31801 @@ -3300,7 +3300,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
31802 ntohl(cm_id->local_addr.sin_addr.s_addr),
31803 ntohs(cm_id->local_addr.sin_port));
31804
31805 - atomic_inc(&cm_connects);
31806 + atomic_inc_unchecked(&cm_connects);
31807 nesqp->active_conn = 1;
31808
31809 /* cache the cm_id in the qp */
31810 @@ -3406,7 +3406,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
31811 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
31812 return err;
31813 }
31814 - atomic_inc(&cm_listens_created);
31815 + atomic_inc_unchecked(&cm_listens_created);
31816 }
31817
31818 cm_id->add_ref(cm_id);
31819 @@ -3507,7 +3507,7 @@ static void cm_event_connected(struct nes_cm_event *event)
31820
31821 if (nesqp->destroyed)
31822 return;
31823 - atomic_inc(&cm_connecteds);
31824 + atomic_inc_unchecked(&cm_connecteds);
31825 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
31826 " local port 0x%04X. jiffies = %lu.\n",
31827 nesqp->hwqp.qp_id,
31828 @@ -3694,7 +3694,7 @@ static void cm_event_reset(struct nes_cm_event *event)
31829
31830 cm_id->add_ref(cm_id);
31831 ret = cm_id->event_handler(cm_id, &cm_event);
31832 - atomic_inc(&cm_closes);
31833 + atomic_inc_unchecked(&cm_closes);
31834 cm_event.event = IW_CM_EVENT_CLOSE;
31835 cm_event.status = 0;
31836 cm_event.provider_data = cm_id->provider_data;
31837 @@ -3730,7 +3730,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
31838 return;
31839 cm_id = cm_node->cm_id;
31840
31841 - atomic_inc(&cm_connect_reqs);
31842 + atomic_inc_unchecked(&cm_connect_reqs);
31843 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
31844 cm_node, cm_id, jiffies);
31845
31846 @@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
31847 return;
31848 cm_id = cm_node->cm_id;
31849
31850 - atomic_inc(&cm_connect_reqs);
31851 + atomic_inc_unchecked(&cm_connect_reqs);
31852 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
31853 cm_node, cm_id, jiffies);
31854
31855 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
31856 index b3b2a24..7bfaf1e 100644
31857 --- a/drivers/infiniband/hw/nes/nes_mgt.c
31858 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
31859 @@ -40,8 +40,8 @@
31860 #include "nes.h"
31861 #include "nes_mgt.h"
31862
31863 -atomic_t pau_qps_created;
31864 -atomic_t pau_qps_destroyed;
31865 +atomic_unchecked_t pau_qps_created;
31866 +atomic_unchecked_t pau_qps_destroyed;
31867
31868 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
31869 {
31870 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
31871 {
31872 struct sk_buff *skb;
31873 unsigned long flags;
31874 - atomic_inc(&pau_qps_destroyed);
31875 + atomic_inc_unchecked(&pau_qps_destroyed);
31876
31877 /* Free packets that have not yet been forwarded */
31878 /* Lock is acquired by skb_dequeue when removing the skb */
31879 @@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
31880 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
31881 skb_queue_head_init(&nesqp->pau_list);
31882 spin_lock_init(&nesqp->pau_lock);
31883 - atomic_inc(&pau_qps_created);
31884 + atomic_inc_unchecked(&pau_qps_created);
31885 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
31886 }
31887
31888 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
31889 index c00d2f3..8834298 100644
31890 --- a/drivers/infiniband/hw/nes/nes_nic.c
31891 +++ b/drivers/infiniband/hw/nes/nes_nic.c
31892 @@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
31893 target_stat_values[++index] = mh_detected;
31894 target_stat_values[++index] = mh_pauses_sent;
31895 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
31896 - target_stat_values[++index] = atomic_read(&cm_connects);
31897 - target_stat_values[++index] = atomic_read(&cm_accepts);
31898 - target_stat_values[++index] = atomic_read(&cm_disconnects);
31899 - target_stat_values[++index] = atomic_read(&cm_connecteds);
31900 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
31901 - target_stat_values[++index] = atomic_read(&cm_rejects);
31902 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
31903 - target_stat_values[++index] = atomic_read(&qps_created);
31904 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
31905 - target_stat_values[++index] = atomic_read(&qps_destroyed);
31906 - target_stat_values[++index] = atomic_read(&cm_closes);
31907 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
31908 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
31909 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
31910 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
31911 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
31912 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
31913 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
31914 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
31915 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
31916 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
31917 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
31918 target_stat_values[++index] = cm_packets_sent;
31919 target_stat_values[++index] = cm_packets_bounced;
31920 target_stat_values[++index] = cm_packets_created;
31921 target_stat_values[++index] = cm_packets_received;
31922 target_stat_values[++index] = cm_packets_dropped;
31923 target_stat_values[++index] = cm_packets_retrans;
31924 - target_stat_values[++index] = atomic_read(&cm_listens_created);
31925 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
31926 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
31927 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
31928 target_stat_values[++index] = cm_backlog_drops;
31929 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
31930 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
31931 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
31932 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
31933 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
31934 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
31935 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
31936 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
31937 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
31938 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
31939 target_stat_values[++index] = nesadapter->free_4kpbl;
31940 target_stat_values[++index] = nesadapter->free_256pbl;
31941 target_stat_values[++index] = int_mod_timer_init;
31942 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
31943 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
31944 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
31945 - target_stat_values[++index] = atomic_read(&pau_qps_created);
31946 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
31947 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
31948 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
31949 }
31950
31951 /**
31952 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
31953 index 5095bc4..41e8fff 100644
31954 --- a/drivers/infiniband/hw/nes/nes_verbs.c
31955 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
31956 @@ -46,9 +46,9 @@
31957
31958 #include <rdma/ib_umem.h>
31959
31960 -atomic_t mod_qp_timouts;
31961 -atomic_t qps_created;
31962 -atomic_t sw_qps_destroyed;
31963 +atomic_unchecked_t mod_qp_timouts;
31964 +atomic_unchecked_t qps_created;
31965 +atomic_unchecked_t sw_qps_destroyed;
31966
31967 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
31968
31969 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
31970 if (init_attr->create_flags)
31971 return ERR_PTR(-EINVAL);
31972
31973 - atomic_inc(&qps_created);
31974 + atomic_inc_unchecked(&qps_created);
31975 switch (init_attr->qp_type) {
31976 case IB_QPT_RC:
31977 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
31978 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
31979 struct iw_cm_event cm_event;
31980 int ret = 0;
31981
31982 - atomic_inc(&sw_qps_destroyed);
31983 + atomic_inc_unchecked(&sw_qps_destroyed);
31984 nesqp->destroyed = 1;
31985
31986 /* Blow away the connection if it exists. */
31987 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
31988 index b881bdc..c2e360c 100644
31989 --- a/drivers/infiniband/hw/qib/qib.h
31990 +++ b/drivers/infiniband/hw/qib/qib.h
31991 @@ -51,6 +51,7 @@
31992 #include <linux/completion.h>
31993 #include <linux/kref.h>
31994 #include <linux/sched.h>
31995 +#include <linux/slab.h>
31996
31997 #include "qib_common.h"
31998 #include "qib_verbs.h"
31999 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
32000 index c351aa4..e6967c2 100644
32001 --- a/drivers/input/gameport/gameport.c
32002 +++ b/drivers/input/gameport/gameport.c
32003 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
32004 */
32005 static void gameport_init_port(struct gameport *gameport)
32006 {
32007 - static atomic_t gameport_no = ATOMIC_INIT(0);
32008 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
32009
32010 __module_get(THIS_MODULE);
32011
32012 mutex_init(&gameport->drv_mutex);
32013 device_initialize(&gameport->dev);
32014 dev_set_name(&gameport->dev, "gameport%lu",
32015 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
32016 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
32017 gameport->dev.bus = &gameport_bus;
32018 gameport->dev.release = gameport_release_port;
32019 if (gameport->parent)
32020 diff --git a/drivers/input/input.c b/drivers/input/input.c
32021 index da38d97..2aa0b79 100644
32022 --- a/drivers/input/input.c
32023 +++ b/drivers/input/input.c
32024 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
32025 */
32026 int input_register_device(struct input_dev *dev)
32027 {
32028 - static atomic_t input_no = ATOMIC_INIT(0);
32029 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
32030 struct input_handler *handler;
32031 const char *path;
32032 int error;
32033 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
32034 dev->setkeycode = input_default_setkeycode;
32035
32036 dev_set_name(&dev->dev, "input%ld",
32037 - (unsigned long) atomic_inc_return(&input_no) - 1);
32038 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
32039
32040 error = device_add(&dev->dev);
32041 if (error)
32042 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
32043 index b8d8611..7a4a04b 100644
32044 --- a/drivers/input/joystick/sidewinder.c
32045 +++ b/drivers/input/joystick/sidewinder.c
32046 @@ -30,6 +30,7 @@
32047 #include <linux/kernel.h>
32048 #include <linux/module.h>
32049 #include <linux/slab.h>
32050 +#include <linux/sched.h>
32051 #include <linux/init.h>
32052 #include <linux/input.h>
32053 #include <linux/gameport.h>
32054 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
32055 index d728875..844c89b 100644
32056 --- a/drivers/input/joystick/xpad.c
32057 +++ b/drivers/input/joystick/xpad.c
32058 @@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
32059
32060 static int xpad_led_probe(struct usb_xpad *xpad)
32061 {
32062 - static atomic_t led_seq = ATOMIC_INIT(0);
32063 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
32064 long led_no;
32065 struct xpad_led *led;
32066 struct led_classdev *led_cdev;
32067 @@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
32068 if (!led)
32069 return -ENOMEM;
32070
32071 - led_no = (long)atomic_inc_return(&led_seq) - 1;
32072 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
32073
32074 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
32075 led->xpad = xpad;
32076 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
32077 index 0110b5a..d3ad144 100644
32078 --- a/drivers/input/mousedev.c
32079 +++ b/drivers/input/mousedev.c
32080 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
32081
32082 spin_unlock_irq(&client->packet_lock);
32083
32084 - if (copy_to_user(buffer, data, count))
32085 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
32086 return -EFAULT;
32087
32088 return count;
32089 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
32090 index ba70058..571d25d 100644
32091 --- a/drivers/input/serio/serio.c
32092 +++ b/drivers/input/serio/serio.c
32093 @@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
32094 */
32095 static void serio_init_port(struct serio *serio)
32096 {
32097 - static atomic_t serio_no = ATOMIC_INIT(0);
32098 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
32099
32100 __module_get(THIS_MODULE);
32101
32102 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
32103 mutex_init(&serio->drv_mutex);
32104 device_initialize(&serio->dev);
32105 dev_set_name(&serio->dev, "serio%ld",
32106 - (long)atomic_inc_return(&serio_no) - 1);
32107 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
32108 serio->dev.bus = &serio_bus;
32109 serio->dev.release = serio_release_port;
32110 serio->dev.groups = serio_device_attr_groups;
32111 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
32112 index e44933d..9ba484a 100644
32113 --- a/drivers/isdn/capi/capi.c
32114 +++ b/drivers/isdn/capi/capi.c
32115 @@ -83,8 +83,8 @@ struct capiminor {
32116
32117 struct capi20_appl *ap;
32118 u32 ncci;
32119 - atomic_t datahandle;
32120 - atomic_t msgid;
32121 + atomic_unchecked_t datahandle;
32122 + atomic_unchecked_t msgid;
32123
32124 struct tty_port port;
32125 int ttyinstop;
32126 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
32127 capimsg_setu16(s, 2, mp->ap->applid);
32128 capimsg_setu8 (s, 4, CAPI_DATA_B3);
32129 capimsg_setu8 (s, 5, CAPI_RESP);
32130 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
32131 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
32132 capimsg_setu32(s, 8, mp->ncci);
32133 capimsg_setu16(s, 12, datahandle);
32134 }
32135 @@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
32136 mp->outbytes -= len;
32137 spin_unlock_bh(&mp->outlock);
32138
32139 - datahandle = atomic_inc_return(&mp->datahandle);
32140 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
32141 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
32142 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32143 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32144 capimsg_setu16(skb->data, 2, mp->ap->applid);
32145 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
32146 capimsg_setu8 (skb->data, 5, CAPI_REQ);
32147 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
32148 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
32149 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
32150 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
32151 capimsg_setu16(skb->data, 16, len); /* Data length */
32152 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
32153 index db621db..825ea1a 100644
32154 --- a/drivers/isdn/gigaset/common.c
32155 +++ b/drivers/isdn/gigaset/common.c
32156 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
32157 cs->commands_pending = 0;
32158 cs->cur_at_seq = 0;
32159 cs->gotfwver = -1;
32160 - cs->open_count = 0;
32161 + local_set(&cs->open_count, 0);
32162 cs->dev = NULL;
32163 cs->tty = NULL;
32164 cs->tty_dev = NULL;
32165 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
32166 index 212efaf..f187c6b 100644
32167 --- a/drivers/isdn/gigaset/gigaset.h
32168 +++ b/drivers/isdn/gigaset/gigaset.h
32169 @@ -35,6 +35,7 @@
32170 #include <linux/tty_driver.h>
32171 #include <linux/list.h>
32172 #include <linux/atomic.h>
32173 +#include <asm/local.h>
32174
32175 #define GIG_VERSION {0, 5, 0, 0}
32176 #define GIG_COMPAT {0, 4, 0, 0}
32177 @@ -433,7 +434,7 @@ struct cardstate {
32178 spinlock_t cmdlock;
32179 unsigned curlen, cmdbytes;
32180
32181 - unsigned open_count;
32182 + local_t open_count;
32183 struct tty_struct *tty;
32184 struct tasklet_struct if_wake_tasklet;
32185 unsigned control_state;
32186 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
32187 index ee0a549..a7c9798 100644
32188 --- a/drivers/isdn/gigaset/interface.c
32189 +++ b/drivers/isdn/gigaset/interface.c
32190 @@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
32191 }
32192 tty->driver_data = cs;
32193
32194 - ++cs->open_count;
32195 -
32196 - if (cs->open_count == 1) {
32197 + if (local_inc_return(&cs->open_count) == 1) {
32198 spin_lock_irqsave(&cs->lock, flags);
32199 cs->tty = tty;
32200 spin_unlock_irqrestore(&cs->lock, flags);
32201 @@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
32202
32203 if (!cs->connected)
32204 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32205 - else if (!cs->open_count)
32206 + else if (!local_read(&cs->open_count))
32207 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32208 else {
32209 - if (!--cs->open_count) {
32210 + if (!local_dec_return(&cs->open_count)) {
32211 spin_lock_irqsave(&cs->lock, flags);
32212 cs->tty = NULL;
32213 spin_unlock_irqrestore(&cs->lock, flags);
32214 @@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
32215 if (!cs->connected) {
32216 gig_dbg(DEBUG_IF, "not connected");
32217 retval = -ENODEV;
32218 - } else if (!cs->open_count)
32219 + } else if (!local_read(&cs->open_count))
32220 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32221 else {
32222 retval = 0;
32223 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
32224 retval = -ENODEV;
32225 goto done;
32226 }
32227 - if (!cs->open_count) {
32228 + if (!local_read(&cs->open_count)) {
32229 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32230 retval = -ENODEV;
32231 goto done;
32232 @@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
32233 if (!cs->connected) {
32234 gig_dbg(DEBUG_IF, "not connected");
32235 retval = -ENODEV;
32236 - } else if (!cs->open_count)
32237 + } else if (!local_read(&cs->open_count))
32238 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32239 else if (cs->mstate != MS_LOCKED) {
32240 dev_warn(cs->dev, "can't write to unlocked device\n");
32241 @@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
32242
32243 if (!cs->connected)
32244 gig_dbg(DEBUG_IF, "not connected");
32245 - else if (!cs->open_count)
32246 + else if (!local_read(&cs->open_count))
32247 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32248 else if (cs->mstate != MS_LOCKED)
32249 dev_warn(cs->dev, "can't write to unlocked device\n");
32250 @@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
32251
32252 if (!cs->connected)
32253 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32254 - else if (!cs->open_count)
32255 + else if (!local_read(&cs->open_count))
32256 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32257 else
32258 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
32259 @@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
32260
32261 if (!cs->connected)
32262 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32263 - else if (!cs->open_count)
32264 + else if (!local_read(&cs->open_count))
32265 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32266 else
32267 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
32268 @@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
32269 goto out;
32270 }
32271
32272 - if (!cs->open_count) {
32273 + if (!local_read(&cs->open_count)) {
32274 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32275 goto out;
32276 }
32277 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
32278 index 2a57da59..e7a12ed 100644
32279 --- a/drivers/isdn/hardware/avm/b1.c
32280 +++ b/drivers/isdn/hardware/avm/b1.c
32281 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
32282 }
32283 if (left) {
32284 if (t4file->user) {
32285 - if (copy_from_user(buf, dp, left))
32286 + if (left > sizeof buf || copy_from_user(buf, dp, left))
32287 return -EFAULT;
32288 } else {
32289 memcpy(buf, dp, left);
32290 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
32291 }
32292 if (left) {
32293 if (config->user) {
32294 - if (copy_from_user(buf, dp, left))
32295 + if (left > sizeof buf || copy_from_user(buf, dp, left))
32296 return -EFAULT;
32297 } else {
32298 memcpy(buf, dp, left);
32299 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
32300 index 85784a7..a19ca98 100644
32301 --- a/drivers/isdn/hardware/eicon/divasync.h
32302 +++ b/drivers/isdn/hardware/eicon/divasync.h
32303 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
32304 } diva_didd_add_adapter_t;
32305 typedef struct _diva_didd_remove_adapter {
32306 IDI_CALL p_request;
32307 -} diva_didd_remove_adapter_t;
32308 +} __no_const diva_didd_remove_adapter_t;
32309 typedef struct _diva_didd_read_adapter_array {
32310 void * buffer;
32311 dword length;
32312 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
32313 index a3bd163..8956575 100644
32314 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
32315 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
32316 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
32317 typedef struct _diva_os_idi_adapter_interface {
32318 diva_init_card_proc_t cleanup_adapter_proc;
32319 diva_cmd_card_proc_t cmd_proc;
32320 -} diva_os_idi_adapter_interface_t;
32321 +} __no_const diva_os_idi_adapter_interface_t;
32322
32323 typedef struct _diva_os_xdi_adapter {
32324 struct list_head link;
32325 diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
32326 index 2339d73..802ab87 100644
32327 --- a/drivers/isdn/i4l/isdn_net.c
32328 +++ b/drivers/isdn/i4l/isdn_net.c
32329 @@ -1901,7 +1901,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
32330 {
32331 isdn_net_local *lp = netdev_priv(dev);
32332 unsigned char *p;
32333 - ushort len = 0;
32334 + int len = 0;
32335
32336 switch (lp->p_encap) {
32337 case ISDN_NET_ENCAP_ETHER:
32338 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
32339 index 1f355bb..43f1fea 100644
32340 --- a/drivers/isdn/icn/icn.c
32341 +++ b/drivers/isdn/icn/icn.c
32342 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
32343 if (count > len)
32344 count = len;
32345 if (user) {
32346 - if (copy_from_user(msg, buf, count))
32347 + if (count > sizeof msg || copy_from_user(msg, buf, count))
32348 return -EFAULT;
32349 } else
32350 memcpy(msg, buf, count);
32351 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
32352 index b5fdcb7..5b6c59f 100644
32353 --- a/drivers/lguest/core.c
32354 +++ b/drivers/lguest/core.c
32355 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
32356 * it's worked so far. The end address needs +1 because __get_vm_area
32357 * allocates an extra guard page, so we need space for that.
32358 */
32359 +
32360 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32361 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32362 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
32363 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32364 +#else
32365 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32366 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
32367 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32368 +#endif
32369 +
32370 if (!switcher_vma) {
32371 err = -ENOMEM;
32372 printk("lguest: could not map switcher pages high\n");
32373 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
32374 * Now the Switcher is mapped at the right address, we can't fail!
32375 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
32376 */
32377 - memcpy(switcher_vma->addr, start_switcher_text,
32378 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
32379 end_switcher_text - start_switcher_text);
32380
32381 printk(KERN_INFO "lguest: mapped switcher at %p\n",
32382 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
32383 index 65af42f..530c87a 100644
32384 --- a/drivers/lguest/x86/core.c
32385 +++ b/drivers/lguest/x86/core.c
32386 @@ -59,7 +59,7 @@ static struct {
32387 /* Offset from where switcher.S was compiled to where we've copied it */
32388 static unsigned long switcher_offset(void)
32389 {
32390 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
32391 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
32392 }
32393
32394 /* This cpu's struct lguest_pages. */
32395 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
32396 * These copies are pretty cheap, so we do them unconditionally: */
32397 /* Save the current Host top-level page directory.
32398 */
32399 +
32400 +#ifdef CONFIG_PAX_PER_CPU_PGD
32401 + pages->state.host_cr3 = read_cr3();
32402 +#else
32403 pages->state.host_cr3 = __pa(current->mm->pgd);
32404 +#endif
32405 +
32406 /*
32407 * Set up the Guest's page tables to see this CPU's pages (and no
32408 * other CPU's pages).
32409 @@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
32410 * compiled-in switcher code and the high-mapped copy we just made.
32411 */
32412 for (i = 0; i < IDT_ENTRIES; i++)
32413 - default_idt_entries[i] += switcher_offset();
32414 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
32415
32416 /*
32417 * Set up the Switcher's per-cpu areas.
32418 @@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
32419 * it will be undisturbed when we switch. To change %cs and jump we
32420 * need this structure to feed to Intel's "lcall" instruction.
32421 */
32422 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
32423 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
32424 lguest_entry.segment = LGUEST_CS;
32425
32426 /*
32427 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
32428 index 40634b0..4f5855e 100644
32429 --- a/drivers/lguest/x86/switcher_32.S
32430 +++ b/drivers/lguest/x86/switcher_32.S
32431 @@ -87,6 +87,7 @@
32432 #include <asm/page.h>
32433 #include <asm/segment.h>
32434 #include <asm/lguest.h>
32435 +#include <asm/processor-flags.h>
32436
32437 // We mark the start of the code to copy
32438 // It's placed in .text tho it's never run here
32439 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
32440 // Changes type when we load it: damn Intel!
32441 // For after we switch over our page tables
32442 // That entry will be read-only: we'd crash.
32443 +
32444 +#ifdef CONFIG_PAX_KERNEXEC
32445 + mov %cr0, %edx
32446 + xor $X86_CR0_WP, %edx
32447 + mov %edx, %cr0
32448 +#endif
32449 +
32450 movl $(GDT_ENTRY_TSS*8), %edx
32451 ltr %dx
32452
32453 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
32454 // Let's clear it again for our return.
32455 // The GDT descriptor of the Host
32456 // Points to the table after two "size" bytes
32457 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
32458 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
32459 // Clear "used" from type field (byte 5, bit 2)
32460 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
32461 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
32462 +
32463 +#ifdef CONFIG_PAX_KERNEXEC
32464 + mov %cr0, %eax
32465 + xor $X86_CR0_WP, %eax
32466 + mov %eax, %cr0
32467 +#endif
32468
32469 // Once our page table's switched, the Guest is live!
32470 // The Host fades as we run this final step.
32471 @@ -295,13 +309,12 @@ deliver_to_host:
32472 // I consulted gcc, and it gave
32473 // These instructions, which I gladly credit:
32474 leal (%edx,%ebx,8), %eax
32475 - movzwl (%eax),%edx
32476 - movl 4(%eax), %eax
32477 - xorw %ax, %ax
32478 - orl %eax, %edx
32479 + movl 4(%eax), %edx
32480 + movw (%eax), %dx
32481 // Now the address of the handler's in %edx
32482 // We call it now: its "iret" drops us home.
32483 - jmp *%edx
32484 + ljmp $__KERNEL_CS, $1f
32485 +1: jmp *%edx
32486
32487 // Every interrupt can come to us here
32488 // But we must truly tell each apart.
32489 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
32490 index 4daf9e5..b8d1d0f 100644
32491 --- a/drivers/macintosh/macio_asic.c
32492 +++ b/drivers/macintosh/macio_asic.c
32493 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
32494 * MacIO is matched against any Apple ID, it's probe() function
32495 * will then decide wether it applies or not
32496 */
32497 -static const struct pci_device_id __devinitdata pci_ids [] = { {
32498 +static const struct pci_device_id __devinitconst pci_ids [] = { {
32499 .vendor = PCI_VENDOR_ID_APPLE,
32500 .device = PCI_ANY_ID,
32501 .subvendor = PCI_ANY_ID,
32502 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
32503 index 1ce84ed..0fdd40a 100644
32504 --- a/drivers/md/dm-ioctl.c
32505 +++ b/drivers/md/dm-ioctl.c
32506 @@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
32507 cmd == DM_LIST_VERSIONS_CMD)
32508 return 0;
32509
32510 - if ((cmd == DM_DEV_CREATE_CMD)) {
32511 + if (cmd == DM_DEV_CREATE_CMD) {
32512 if (!*param->name) {
32513 DMWARN("name not supplied when creating device");
32514 return -EINVAL;
32515 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
32516 index 9bfd057..01180bc 100644
32517 --- a/drivers/md/dm-raid1.c
32518 +++ b/drivers/md/dm-raid1.c
32519 @@ -40,7 +40,7 @@ enum dm_raid1_error {
32520
32521 struct mirror {
32522 struct mirror_set *ms;
32523 - atomic_t error_count;
32524 + atomic_unchecked_t error_count;
32525 unsigned long error_type;
32526 struct dm_dev *dev;
32527 sector_t offset;
32528 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
32529 struct mirror *m;
32530
32531 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
32532 - if (!atomic_read(&m->error_count))
32533 + if (!atomic_read_unchecked(&m->error_count))
32534 return m;
32535
32536 return NULL;
32537 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
32538 * simple way to tell if a device has encountered
32539 * errors.
32540 */
32541 - atomic_inc(&m->error_count);
32542 + atomic_inc_unchecked(&m->error_count);
32543
32544 if (test_and_set_bit(error_type, &m->error_type))
32545 return;
32546 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
32547 struct mirror *m = get_default_mirror(ms);
32548
32549 do {
32550 - if (likely(!atomic_read(&m->error_count)))
32551 + if (likely(!atomic_read_unchecked(&m->error_count)))
32552 return m;
32553
32554 if (m-- == ms->mirror)
32555 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
32556 {
32557 struct mirror *default_mirror = get_default_mirror(m->ms);
32558
32559 - return !atomic_read(&default_mirror->error_count);
32560 + return !atomic_read_unchecked(&default_mirror->error_count);
32561 }
32562
32563 static int mirror_available(struct mirror_set *ms, struct bio *bio)
32564 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
32565 */
32566 if (likely(region_in_sync(ms, region, 1)))
32567 m = choose_mirror(ms, bio->bi_sector);
32568 - else if (m && atomic_read(&m->error_count))
32569 + else if (m && atomic_read_unchecked(&m->error_count))
32570 m = NULL;
32571
32572 if (likely(m))
32573 @@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
32574 }
32575
32576 ms->mirror[mirror].ms = ms;
32577 - atomic_set(&(ms->mirror[mirror].error_count), 0);
32578 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
32579 ms->mirror[mirror].error_type = 0;
32580 ms->mirror[mirror].offset = offset;
32581
32582 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
32583 */
32584 static char device_status_char(struct mirror *m)
32585 {
32586 - if (!atomic_read(&(m->error_count)))
32587 + if (!atomic_read_unchecked(&(m->error_count)))
32588 return 'A';
32589
32590 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
32591 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
32592 index 3d80cf0..b77cc47 100644
32593 --- a/drivers/md/dm-stripe.c
32594 +++ b/drivers/md/dm-stripe.c
32595 @@ -20,7 +20,7 @@ struct stripe {
32596 struct dm_dev *dev;
32597 sector_t physical_start;
32598
32599 - atomic_t error_count;
32600 + atomic_unchecked_t error_count;
32601 };
32602
32603 struct stripe_c {
32604 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
32605 kfree(sc);
32606 return r;
32607 }
32608 - atomic_set(&(sc->stripe[i].error_count), 0);
32609 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
32610 }
32611
32612 ti->private = sc;
32613 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti,
32614 DMEMIT("%d ", sc->stripes);
32615 for (i = 0; i < sc->stripes; i++) {
32616 DMEMIT("%s ", sc->stripe[i].dev->name);
32617 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
32618 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
32619 'D' : 'A';
32620 }
32621 buffer[i] = '\0';
32622 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
32623 */
32624 for (i = 0; i < sc->stripes; i++)
32625 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
32626 - atomic_inc(&(sc->stripe[i].error_count));
32627 - if (atomic_read(&(sc->stripe[i].error_count)) <
32628 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
32629 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
32630 DM_IO_ERROR_THRESHOLD)
32631 schedule_work(&sc->trigger_event);
32632 }
32633 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
32634 index 8e91321..fd17aef 100644
32635 --- a/drivers/md/dm-table.c
32636 +++ b/drivers/md/dm-table.c
32637 @@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
32638 if (!dev_size)
32639 return 0;
32640
32641 - if ((start >= dev_size) || (start + len > dev_size)) {
32642 + if ((start >= dev_size) || (len > dev_size - start)) {
32643 DMWARN("%s: %s too small for target: "
32644 "start=%llu, len=%llu, dev_size=%llu",
32645 dm_device_name(ti->table->md), bdevname(bdev, b),
32646 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
32647 index 237571a..fb6d19b 100644
32648 --- a/drivers/md/dm-thin-metadata.c
32649 +++ b/drivers/md/dm-thin-metadata.c
32650 @@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
32651
32652 pmd->info.tm = tm;
32653 pmd->info.levels = 2;
32654 - pmd->info.value_type.context = pmd->data_sm;
32655 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
32656 pmd->info.value_type.size = sizeof(__le64);
32657 pmd->info.value_type.inc = data_block_inc;
32658 pmd->info.value_type.dec = data_block_dec;
32659 @@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
32660
32661 pmd->bl_info.tm = tm;
32662 pmd->bl_info.levels = 1;
32663 - pmd->bl_info.value_type.context = pmd->data_sm;
32664 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
32665 pmd->bl_info.value_type.size = sizeof(__le64);
32666 pmd->bl_info.value_type.inc = data_block_inc;
32667 pmd->bl_info.value_type.dec = data_block_dec;
32668 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
32669 index 4720f68..78d1df7 100644
32670 --- a/drivers/md/dm.c
32671 +++ b/drivers/md/dm.c
32672 @@ -177,9 +177,9 @@ struct mapped_device {
32673 /*
32674 * Event handling.
32675 */
32676 - atomic_t event_nr;
32677 + atomic_unchecked_t event_nr;
32678 wait_queue_head_t eventq;
32679 - atomic_t uevent_seq;
32680 + atomic_unchecked_t uevent_seq;
32681 struct list_head uevent_list;
32682 spinlock_t uevent_lock; /* Protect access to uevent_list */
32683
32684 @@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
32685 rwlock_init(&md->map_lock);
32686 atomic_set(&md->holders, 1);
32687 atomic_set(&md->open_count, 0);
32688 - atomic_set(&md->event_nr, 0);
32689 - atomic_set(&md->uevent_seq, 0);
32690 + atomic_set_unchecked(&md->event_nr, 0);
32691 + atomic_set_unchecked(&md->uevent_seq, 0);
32692 INIT_LIST_HEAD(&md->uevent_list);
32693 spin_lock_init(&md->uevent_lock);
32694
32695 @@ -1980,7 +1980,7 @@ static void event_callback(void *context)
32696
32697 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
32698
32699 - atomic_inc(&md->event_nr);
32700 + atomic_inc_unchecked(&md->event_nr);
32701 wake_up(&md->eventq);
32702 }
32703
32704 @@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
32705
32706 uint32_t dm_next_uevent_seq(struct mapped_device *md)
32707 {
32708 - return atomic_add_return(1, &md->uevent_seq);
32709 + return atomic_add_return_unchecked(1, &md->uevent_seq);
32710 }
32711
32712 uint32_t dm_get_event_nr(struct mapped_device *md)
32713 {
32714 - return atomic_read(&md->event_nr);
32715 + return atomic_read_unchecked(&md->event_nr);
32716 }
32717
32718 int dm_wait_event(struct mapped_device *md, int event_nr)
32719 {
32720 return wait_event_interruptible(md->eventq,
32721 - (event_nr != atomic_read(&md->event_nr)));
32722 + (event_nr != atomic_read_unchecked(&md->event_nr)));
32723 }
32724
32725 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
32726 diff --git a/drivers/md/md.c b/drivers/md/md.c
32727 index f47f1f8..b7f559e 100644
32728 --- a/drivers/md/md.c
32729 +++ b/drivers/md/md.c
32730 @@ -278,10 +278,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
32731 * start build, activate spare
32732 */
32733 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
32734 -static atomic_t md_event_count;
32735 +static atomic_unchecked_t md_event_count;
32736 void md_new_event(struct mddev *mddev)
32737 {
32738 - atomic_inc(&md_event_count);
32739 + atomic_inc_unchecked(&md_event_count);
32740 wake_up(&md_event_waiters);
32741 }
32742 EXPORT_SYMBOL_GPL(md_new_event);
32743 @@ -291,7 +291,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
32744 */
32745 static void md_new_event_inintr(struct mddev *mddev)
32746 {
32747 - atomic_inc(&md_event_count);
32748 + atomic_inc_unchecked(&md_event_count);
32749 wake_up(&md_event_waiters);
32750 }
32751
32752 @@ -1525,7 +1525,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
32753
32754 rdev->preferred_minor = 0xffff;
32755 rdev->data_offset = le64_to_cpu(sb->data_offset);
32756 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
32757 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
32758
32759 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
32760 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
32761 @@ -1742,7 +1742,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
32762 else
32763 sb->resync_offset = cpu_to_le64(0);
32764
32765 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
32766 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
32767
32768 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
32769 sb->size = cpu_to_le64(mddev->dev_sectors);
32770 @@ -2639,7 +2639,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
32771 static ssize_t
32772 errors_show(struct md_rdev *rdev, char *page)
32773 {
32774 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
32775 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
32776 }
32777
32778 static ssize_t
32779 @@ -2648,7 +2648,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
32780 char *e;
32781 unsigned long n = simple_strtoul(buf, &e, 10);
32782 if (*buf && (*e == 0 || *e == '\n')) {
32783 - atomic_set(&rdev->corrected_errors, n);
32784 + atomic_set_unchecked(&rdev->corrected_errors, n);
32785 return len;
32786 }
32787 return -EINVAL;
32788 @@ -3039,8 +3039,8 @@ int md_rdev_init(struct md_rdev *rdev)
32789 rdev->sb_loaded = 0;
32790 rdev->bb_page = NULL;
32791 atomic_set(&rdev->nr_pending, 0);
32792 - atomic_set(&rdev->read_errors, 0);
32793 - atomic_set(&rdev->corrected_errors, 0);
32794 + atomic_set_unchecked(&rdev->read_errors, 0);
32795 + atomic_set_unchecked(&rdev->corrected_errors, 0);
32796
32797 INIT_LIST_HEAD(&rdev->same_set);
32798 init_waitqueue_head(&rdev->blocked_wait);
32799 @@ -6683,7 +6683,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
32800
32801 spin_unlock(&pers_lock);
32802 seq_printf(seq, "\n");
32803 - seq->poll_event = atomic_read(&md_event_count);
32804 + seq->poll_event = atomic_read_unchecked(&md_event_count);
32805 return 0;
32806 }
32807 if (v == (void*)2) {
32808 @@ -6772,7 +6772,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
32809 chunk_kb ? "KB" : "B");
32810 if (bitmap->file) {
32811 seq_printf(seq, ", file: ");
32812 - seq_path(seq, &bitmap->file->f_path, " \t\n");
32813 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
32814 }
32815
32816 seq_printf(seq, "\n");
32817 @@ -6803,7 +6803,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
32818 return error;
32819
32820 seq = file->private_data;
32821 - seq->poll_event = atomic_read(&md_event_count);
32822 + seq->poll_event = atomic_read_unchecked(&md_event_count);
32823 return error;
32824 }
32825
32826 @@ -6817,7 +6817,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
32827 /* always allow read */
32828 mask = POLLIN | POLLRDNORM;
32829
32830 - if (seq->poll_event != atomic_read(&md_event_count))
32831 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
32832 mask |= POLLERR | POLLPRI;
32833 return mask;
32834 }
32835 @@ -6861,7 +6861,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
32836 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
32837 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
32838 (int)part_stat_read(&disk->part0, sectors[1]) -
32839 - atomic_read(&disk->sync_io);
32840 + atomic_read_unchecked(&disk->sync_io);
32841 /* sync IO will cause sync_io to increase before the disk_stats
32842 * as sync_io is counted when a request starts, and
32843 * disk_stats is counted when it completes.
32844 diff --git a/drivers/md/md.h b/drivers/md/md.h
32845 index cf742d9..7c7c745 100644
32846 --- a/drivers/md/md.h
32847 +++ b/drivers/md/md.h
32848 @@ -120,13 +120,13 @@ struct md_rdev {
32849 * only maintained for arrays that
32850 * support hot removal
32851 */
32852 - atomic_t read_errors; /* number of consecutive read errors that
32853 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
32854 * we have tried to ignore.
32855 */
32856 struct timespec last_read_error; /* monotonic time since our
32857 * last read error
32858 */
32859 - atomic_t corrected_errors; /* number of corrected read errors,
32860 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
32861 * for reporting to userspace and storing
32862 * in superblock.
32863 */
32864 @@ -410,7 +410,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
32865
32866 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
32867 {
32868 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32869 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32870 }
32871
32872 struct md_personality
32873 diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
32874 index 50ed53b..4f29d7d 100644
32875 --- a/drivers/md/persistent-data/dm-space-map-checker.c
32876 +++ b/drivers/md/persistent-data/dm-space-map-checker.c
32877 @@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
32878 /*----------------------------------------------------------------*/
32879
32880 struct sm_checker {
32881 - struct dm_space_map sm;
32882 + dm_space_map_no_const sm;
32883
32884 struct count_array old_counts;
32885 struct count_array counts;
32886 diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
32887 index fc469ba..2d91555 100644
32888 --- a/drivers/md/persistent-data/dm-space-map-disk.c
32889 +++ b/drivers/md/persistent-data/dm-space-map-disk.c
32890 @@ -23,7 +23,7 @@
32891 * Space map interface.
32892 */
32893 struct sm_disk {
32894 - struct dm_space_map sm;
32895 + dm_space_map_no_const sm;
32896
32897 struct ll_disk ll;
32898 struct ll_disk old_ll;
32899 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
32900 index e89ae5e..062e4c2 100644
32901 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
32902 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
32903 @@ -43,7 +43,7 @@ struct block_op {
32904 };
32905
32906 struct sm_metadata {
32907 - struct dm_space_map sm;
32908 + dm_space_map_no_const sm;
32909
32910 struct ll_disk ll;
32911 struct ll_disk old_ll;
32912 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
32913 index 1cbfc6b..56e1dbb 100644
32914 --- a/drivers/md/persistent-data/dm-space-map.h
32915 +++ b/drivers/md/persistent-data/dm-space-map.h
32916 @@ -60,6 +60,7 @@ struct dm_space_map {
32917 int (*root_size)(struct dm_space_map *sm, size_t *result);
32918 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
32919 };
32920 +typedef struct dm_space_map __no_const dm_space_map_no_const;
32921
32922 /*----------------------------------------------------------------*/
32923
32924 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
32925 index 7d9e071..015b1d5 100644
32926 --- a/drivers/md/raid1.c
32927 +++ b/drivers/md/raid1.c
32928 @@ -1568,7 +1568,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
32929 if (r1_sync_page_io(rdev, sect, s,
32930 bio->bi_io_vec[idx].bv_page,
32931 READ) != 0)
32932 - atomic_add(s, &rdev->corrected_errors);
32933 + atomic_add_unchecked(s, &rdev->corrected_errors);
32934 }
32935 sectors -= s;
32936 sect += s;
32937 @@ -1781,7 +1781,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
32938 test_bit(In_sync, &rdev->flags)) {
32939 if (r1_sync_page_io(rdev, sect, s,
32940 conf->tmppage, READ)) {
32941 - atomic_add(s, &rdev->corrected_errors);
32942 + atomic_add_unchecked(s, &rdev->corrected_errors);
32943 printk(KERN_INFO
32944 "md/raid1:%s: read error corrected "
32945 "(%d sectors at %llu on %s)\n",
32946 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
32947 index 685ddf3..955b087 100644
32948 --- a/drivers/md/raid10.c
32949 +++ b/drivers/md/raid10.c
32950 @@ -1440,7 +1440,7 @@ static void end_sync_read(struct bio *bio, int error)
32951 /* The write handler will notice the lack of
32952 * R10BIO_Uptodate and record any errors etc
32953 */
32954 - atomic_add(r10_bio->sectors,
32955 + atomic_add_unchecked(r10_bio->sectors,
32956 &conf->mirrors[d].rdev->corrected_errors);
32957
32958 /* for reconstruct, we always reschedule after a read.
32959 @@ -1740,7 +1740,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
32960 {
32961 struct timespec cur_time_mon;
32962 unsigned long hours_since_last;
32963 - unsigned int read_errors = atomic_read(&rdev->read_errors);
32964 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
32965
32966 ktime_get_ts(&cur_time_mon);
32967
32968 @@ -1762,9 +1762,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
32969 * overflowing the shift of read_errors by hours_since_last.
32970 */
32971 if (hours_since_last >= 8 * sizeof(read_errors))
32972 - atomic_set(&rdev->read_errors, 0);
32973 + atomic_set_unchecked(&rdev->read_errors, 0);
32974 else
32975 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
32976 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
32977 }
32978
32979 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
32980 @@ -1814,8 +1814,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
32981 return;
32982
32983 check_decay_read_errors(mddev, rdev);
32984 - atomic_inc(&rdev->read_errors);
32985 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
32986 + atomic_inc_unchecked(&rdev->read_errors);
32987 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
32988 char b[BDEVNAME_SIZE];
32989 bdevname(rdev->bdev, b);
32990
32991 @@ -1823,7 +1823,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
32992 "md/raid10:%s: %s: Raid device exceeded "
32993 "read_error threshold [cur %d:max %d]\n",
32994 mdname(mddev), b,
32995 - atomic_read(&rdev->read_errors), max_read_errors);
32996 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
32997 printk(KERN_NOTICE
32998 "md/raid10:%s: %s: Failing raid device\n",
32999 mdname(mddev), b);
33000 @@ -1968,7 +1968,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33001 (unsigned long long)(
33002 sect + rdev->data_offset),
33003 bdevname(rdev->bdev, b));
33004 - atomic_add(s, &rdev->corrected_errors);
33005 + atomic_add_unchecked(s, &rdev->corrected_errors);
33006 }
33007
33008 rdev_dec_pending(rdev, mddev);
33009 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
33010 index 858fdbb..b2dac95 100644
33011 --- a/drivers/md/raid5.c
33012 +++ b/drivers/md/raid5.c
33013 @@ -1610,19 +1610,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
33014 (unsigned long long)(sh->sector
33015 + rdev->data_offset),
33016 bdevname(rdev->bdev, b));
33017 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
33018 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
33019 clear_bit(R5_ReadError, &sh->dev[i].flags);
33020 clear_bit(R5_ReWrite, &sh->dev[i].flags);
33021 }
33022 - if (atomic_read(&conf->disks[i].rdev->read_errors))
33023 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
33024 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
33025 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
33026 } else {
33027 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
33028 int retry = 0;
33029 rdev = conf->disks[i].rdev;
33030
33031 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
33032 - atomic_inc(&rdev->read_errors);
33033 + atomic_inc_unchecked(&rdev->read_errors);
33034 if (conf->mddev->degraded >= conf->max_degraded)
33035 printk_ratelimited(
33036 KERN_WARNING
33037 @@ -1642,7 +1642,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
33038 (unsigned long long)(sh->sector
33039 + rdev->data_offset),
33040 bdn);
33041 - else if (atomic_read(&rdev->read_errors)
33042 + else if (atomic_read_unchecked(&rdev->read_errors)
33043 > conf->max_nr_stripes)
33044 printk(KERN_WARNING
33045 "md/raid:%s: Too many read errors, failing device %s.\n",
33046 diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
33047 index ba9a643..e474ab5 100644
33048 --- a/drivers/media/dvb/ddbridge/ddbridge-core.c
33049 +++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
33050 @@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
33051 .subvendor = _subvend, .subdevice = _subdev, \
33052 .driver_data = (unsigned long)&_driverdata }
33053
33054 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
33055 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
33056 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
33057 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
33058 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
33059 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
33060 index a7d876f..8c21b61 100644
33061 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
33062 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
33063 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
33064 union {
33065 dmx_ts_cb ts;
33066 dmx_section_cb sec;
33067 - } cb;
33068 + } __no_const cb;
33069
33070 struct dvb_demux *demux;
33071 void *priv;
33072 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
33073 index f732877..d38c35a 100644
33074 --- a/drivers/media/dvb/dvb-core/dvbdev.c
33075 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
33076 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
33077 const struct dvb_device *template, void *priv, int type)
33078 {
33079 struct dvb_device *dvbdev;
33080 - struct file_operations *dvbdevfops;
33081 + file_operations_no_const *dvbdevfops;
33082 struct device *clsdev;
33083 int minor;
33084 int id;
33085 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
33086 index 9f2a02c..5920f88 100644
33087 --- a/drivers/media/dvb/dvb-usb/cxusb.c
33088 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
33089 @@ -1069,7 +1069,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
33090 struct dib0700_adapter_state {
33091 int (*set_param_save) (struct dvb_frontend *,
33092 struct dvb_frontend_parameters *);
33093 -};
33094 +} __no_const;
33095
33096 static int dib7070_set_param_override(struct dvb_frontend *fe,
33097 struct dvb_frontend_parameters *fep)
33098 diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
33099 index f103ec1..5e8968b 100644
33100 --- a/drivers/media/dvb/dvb-usb/dw2102.c
33101 +++ b/drivers/media/dvb/dvb-usb/dw2102.c
33102 @@ -95,7 +95,7 @@ struct su3000_state {
33103
33104 struct s6x0_state {
33105 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
33106 -};
33107 +} __no_const;
33108
33109 /* debug */
33110 static int dvb_usb_dw2102_debug;
33111 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
33112 index 404f63a..4796533 100644
33113 --- a/drivers/media/dvb/frontends/dib3000.h
33114 +++ b/drivers/media/dvb/frontends/dib3000.h
33115 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
33116 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
33117 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
33118 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
33119 -};
33120 +} __no_const;
33121
33122 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
33123 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
33124 diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
33125 index 90bf573..e8463da 100644
33126 --- a/drivers/media/dvb/frontends/ds3000.c
33127 +++ b/drivers/media/dvb/frontends/ds3000.c
33128 @@ -1210,7 +1210,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe,
33129
33130 for (i = 0; i < 30 ; i++) {
33131 ds3000_read_status(fe, &status);
33132 - if (status && FE_HAS_LOCK)
33133 + if (status & FE_HAS_LOCK)
33134 break;
33135
33136 msleep(10);
33137 diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
33138 index 0564192..75b16f5 100644
33139 --- a/drivers/media/dvb/ngene/ngene-cards.c
33140 +++ b/drivers/media/dvb/ngene/ngene-cards.c
33141 @@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
33142
33143 /****************************************************************************/
33144
33145 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
33146 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
33147 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
33148 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
33149 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
33150 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
33151 index 16a089f..ab1667d 100644
33152 --- a/drivers/media/radio/radio-cadet.c
33153 +++ b/drivers/media/radio/radio-cadet.c
33154 @@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33155 unsigned char readbuf[RDS_BUFFER];
33156 int i = 0;
33157
33158 + if (count > RDS_BUFFER)
33159 + return -EFAULT;
33160 mutex_lock(&dev->lock);
33161 if (dev->rdsstat == 0) {
33162 dev->rdsstat = 1;
33163 diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
33164 index 61287fc..8b08712 100644
33165 --- a/drivers/media/rc/redrat3.c
33166 +++ b/drivers/media/rc/redrat3.c
33167 @@ -905,7 +905,7 @@ static int redrat3_set_tx_carrier(struct rc_dev *dev, u32 carrier)
33168 return carrier;
33169 }
33170
33171 -static int redrat3_transmit_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
33172 +static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf, u32 n)
33173 {
33174 struct redrat3_dev *rr3 = rcdev->priv;
33175 struct device *dev = rr3->dev;
33176 diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
33177 index 9cde353..8c6a1c3 100644
33178 --- a/drivers/media/video/au0828/au0828.h
33179 +++ b/drivers/media/video/au0828/au0828.h
33180 @@ -191,7 +191,7 @@ struct au0828_dev {
33181
33182 /* I2C */
33183 struct i2c_adapter i2c_adap;
33184 - struct i2c_algorithm i2c_algo;
33185 + i2c_algorithm_no_const i2c_algo;
33186 struct i2c_client i2c_client;
33187 u32 i2c_rc;
33188
33189 diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
33190 index 68d1240..46b32eb 100644
33191 --- a/drivers/media/video/cx88/cx88-alsa.c
33192 +++ b/drivers/media/video/cx88/cx88-alsa.c
33193 @@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
33194 * Only boards with eeprom and byte 1 at eeprom=1 have it
33195 */
33196
33197 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
33198 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
33199 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33200 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33201 {0, }
33202 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33203 index 305e6aa..0143317 100644
33204 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33205 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33206 @@ -196,7 +196,7 @@ struct pvr2_hdw {
33207
33208 /* I2C stuff */
33209 struct i2c_adapter i2c_adap;
33210 - struct i2c_algorithm i2c_algo;
33211 + i2c_algorithm_no_const i2c_algo;
33212 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
33213 int i2c_cx25840_hack_state;
33214 int i2c_linked;
33215 diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
33216 index a0895bf..b7ebb1b 100644
33217 --- a/drivers/media/video/timblogiw.c
33218 +++ b/drivers/media/video/timblogiw.c
33219 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
33220
33221 /* Platform device functions */
33222
33223 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33224 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
33225 .vidioc_querycap = timblogiw_querycap,
33226 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
33227 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
33228 @@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33229 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
33230 };
33231
33232 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
33233 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
33234 .owner = THIS_MODULE,
33235 .open = timblogiw_open,
33236 .release = timblogiw_close,
33237 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
33238 index e9c6a60..daf6a33 100644
33239 --- a/drivers/message/fusion/mptbase.c
33240 +++ b/drivers/message/fusion/mptbase.c
33241 @@ -6753,8 +6753,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
33242 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
33243 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
33244
33245 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33246 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
33247 +#else
33248 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
33249 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
33250 +#endif
33251 +
33252 /*
33253 * Rounding UP to nearest 4-kB boundary here...
33254 */
33255 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
33256 index 9d95042..b808101 100644
33257 --- a/drivers/message/fusion/mptsas.c
33258 +++ b/drivers/message/fusion/mptsas.c
33259 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
33260 return 0;
33261 }
33262
33263 +static inline void
33264 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33265 +{
33266 + if (phy_info->port_details) {
33267 + phy_info->port_details->rphy = rphy;
33268 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33269 + ioc->name, rphy));
33270 + }
33271 +
33272 + if (rphy) {
33273 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33274 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33275 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33276 + ioc->name, rphy, rphy->dev.release));
33277 + }
33278 +}
33279 +
33280 /* no mutex */
33281 static void
33282 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
33283 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
33284 return NULL;
33285 }
33286
33287 -static inline void
33288 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33289 -{
33290 - if (phy_info->port_details) {
33291 - phy_info->port_details->rphy = rphy;
33292 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33293 - ioc->name, rphy));
33294 - }
33295 -
33296 - if (rphy) {
33297 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33298 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33299 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33300 - ioc->name, rphy, rphy->dev.release));
33301 - }
33302 -}
33303 -
33304 static inline struct sas_port *
33305 mptsas_get_port(struct mptsas_phyinfo *phy_info)
33306 {
33307 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
33308 index 0c3ced7..1fe34ec 100644
33309 --- a/drivers/message/fusion/mptscsih.c
33310 +++ b/drivers/message/fusion/mptscsih.c
33311 @@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
33312
33313 h = shost_priv(SChost);
33314
33315 - if (h) {
33316 - if (h->info_kbuf == NULL)
33317 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33318 - return h->info_kbuf;
33319 - h->info_kbuf[0] = '\0';
33320 + if (!h)
33321 + return NULL;
33322
33323 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33324 - h->info_kbuf[size-1] = '\0';
33325 - }
33326 + if (h->info_kbuf == NULL)
33327 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33328 + return h->info_kbuf;
33329 + h->info_kbuf[0] = '\0';
33330 +
33331 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33332 + h->info_kbuf[size-1] = '\0';
33333
33334 return h->info_kbuf;
33335 }
33336 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
33337 index 07dbeaf..5533142 100644
33338 --- a/drivers/message/i2o/i2o_proc.c
33339 +++ b/drivers/message/i2o/i2o_proc.c
33340 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
33341 "Array Controller Device"
33342 };
33343
33344 -static char *chtostr(u8 * chars, int n)
33345 -{
33346 - char tmp[256];
33347 - tmp[0] = 0;
33348 - return strncat(tmp, (char *)chars, n);
33349 -}
33350 -
33351 static int i2o_report_query_status(struct seq_file *seq, int block_status,
33352 char *group)
33353 {
33354 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
33355
33356 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
33357 seq_printf(seq, "%-#8x", ddm_table.module_id);
33358 - seq_printf(seq, "%-29s",
33359 - chtostr(ddm_table.module_name_version, 28));
33360 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
33361 seq_printf(seq, "%9d ", ddm_table.data_size);
33362 seq_printf(seq, "%8d", ddm_table.code_size);
33363
33364 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
33365
33366 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
33367 seq_printf(seq, "%-#8x", dst->module_id);
33368 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
33369 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
33370 + seq_printf(seq, "%-.28s", dst->module_name_version);
33371 + seq_printf(seq, "%-.8s", dst->date);
33372 seq_printf(seq, "%8d ", dst->module_size);
33373 seq_printf(seq, "%8d ", dst->mpb_size);
33374 seq_printf(seq, "0x%04x", dst->module_flags);
33375 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
33376 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
33377 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
33378 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
33379 - seq_printf(seq, "Vendor info : %s\n",
33380 - chtostr((u8 *) (work32 + 2), 16));
33381 - seq_printf(seq, "Product info : %s\n",
33382 - chtostr((u8 *) (work32 + 6), 16));
33383 - seq_printf(seq, "Description : %s\n",
33384 - chtostr((u8 *) (work32 + 10), 16));
33385 - seq_printf(seq, "Product rev. : %s\n",
33386 - chtostr((u8 *) (work32 + 14), 8));
33387 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
33388 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
33389 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
33390 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
33391
33392 seq_printf(seq, "Serial number : ");
33393 print_serial_number(seq, (u8 *) (work32 + 16),
33394 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
33395 }
33396
33397 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
33398 - seq_printf(seq, "Module name : %s\n",
33399 - chtostr(result.module_name, 24));
33400 - seq_printf(seq, "Module revision : %s\n",
33401 - chtostr(result.module_rev, 8));
33402 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
33403 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
33404
33405 seq_printf(seq, "Serial number : ");
33406 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
33407 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
33408 return 0;
33409 }
33410
33411 - seq_printf(seq, "Device name : %s\n",
33412 - chtostr(result.device_name, 64));
33413 - seq_printf(seq, "Service name : %s\n",
33414 - chtostr(result.service_name, 64));
33415 - seq_printf(seq, "Physical name : %s\n",
33416 - chtostr(result.physical_location, 64));
33417 - seq_printf(seq, "Instance number : %s\n",
33418 - chtostr(result.instance_number, 4));
33419 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
33420 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
33421 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
33422 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
33423
33424 return 0;
33425 }
33426 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
33427 index a8c08f3..155fe3d 100644
33428 --- a/drivers/message/i2o/iop.c
33429 +++ b/drivers/message/i2o/iop.c
33430 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
33431
33432 spin_lock_irqsave(&c->context_list_lock, flags);
33433
33434 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
33435 - atomic_inc(&c->context_list_counter);
33436 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
33437 + atomic_inc_unchecked(&c->context_list_counter);
33438
33439 - entry->context = atomic_read(&c->context_list_counter);
33440 + entry->context = atomic_read_unchecked(&c->context_list_counter);
33441
33442 list_add(&entry->list, &c->context_list);
33443
33444 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
33445
33446 #if BITS_PER_LONG == 64
33447 spin_lock_init(&c->context_list_lock);
33448 - atomic_set(&c->context_list_counter, 0);
33449 + atomic_set_unchecked(&c->context_list_counter, 0);
33450 INIT_LIST_HEAD(&c->context_list);
33451 #endif
33452
33453 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
33454 index 7ce65f4..e66e9bc 100644
33455 --- a/drivers/mfd/abx500-core.c
33456 +++ b/drivers/mfd/abx500-core.c
33457 @@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
33458
33459 struct abx500_device_entry {
33460 struct list_head list;
33461 - struct abx500_ops ops;
33462 + abx500_ops_no_const ops;
33463 struct device *dev;
33464 };
33465
33466 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
33467 index 5c2a06a..8fa077c 100644
33468 --- a/drivers/mfd/janz-cmodio.c
33469 +++ b/drivers/mfd/janz-cmodio.c
33470 @@ -13,6 +13,7 @@
33471
33472 #include <linux/kernel.h>
33473 #include <linux/module.h>
33474 +#include <linux/slab.h>
33475 #include <linux/init.h>
33476 #include <linux/pci.h>
33477 #include <linux/interrupt.h>
33478 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
33479 index 29d12a7..f900ba4 100644
33480 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
33481 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
33482 @@ -464,7 +464,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
33483 * the lid is closed. This leads to interrupts as soon as a little move
33484 * is done.
33485 */
33486 - atomic_inc(&lis3->count);
33487 + atomic_inc_unchecked(&lis3->count);
33488
33489 wake_up_interruptible(&lis3->misc_wait);
33490 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
33491 @@ -550,7 +550,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
33492 if (lis3->pm_dev)
33493 pm_runtime_get_sync(lis3->pm_dev);
33494
33495 - atomic_set(&lis3->count, 0);
33496 + atomic_set_unchecked(&lis3->count, 0);
33497 return 0;
33498 }
33499
33500 @@ -583,7 +583,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
33501 add_wait_queue(&lis3->misc_wait, &wait);
33502 while (true) {
33503 set_current_state(TASK_INTERRUPTIBLE);
33504 - data = atomic_xchg(&lis3->count, 0);
33505 + data = atomic_xchg_unchecked(&lis3->count, 0);
33506 if (data)
33507 break;
33508
33509 @@ -624,7 +624,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
33510 struct lis3lv02d, miscdev);
33511
33512 poll_wait(file, &lis3->misc_wait, wait);
33513 - if (atomic_read(&lis3->count))
33514 + if (atomic_read_unchecked(&lis3->count))
33515 return POLLIN | POLLRDNORM;
33516 return 0;
33517 }
33518 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
33519 index 2b1482a..5d33616 100644
33520 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
33521 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
33522 @@ -266,7 +266,7 @@ struct lis3lv02d {
33523 struct input_polled_dev *idev; /* input device */
33524 struct platform_device *pdev; /* platform device */
33525 struct regulator_bulk_data regulators[2];
33526 - atomic_t count; /* interrupt count after last read */
33527 + atomic_unchecked_t count; /* interrupt count after last read */
33528 union axis_conversion ac; /* hw -> logical axis */
33529 int mapped_btns[3];
33530
33531 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
33532 index 2f30bad..c4c13d0 100644
33533 --- a/drivers/misc/sgi-gru/gruhandles.c
33534 +++ b/drivers/misc/sgi-gru/gruhandles.c
33535 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
33536 unsigned long nsec;
33537
33538 nsec = CLKS2NSEC(clks);
33539 - atomic_long_inc(&mcs_op_statistics[op].count);
33540 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
33541 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
33542 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
33543 if (mcs_op_statistics[op].max < nsec)
33544 mcs_op_statistics[op].max = nsec;
33545 }
33546 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
33547 index 7768b87..f8aac38 100644
33548 --- a/drivers/misc/sgi-gru/gruprocfs.c
33549 +++ b/drivers/misc/sgi-gru/gruprocfs.c
33550 @@ -32,9 +32,9 @@
33551
33552 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
33553
33554 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
33555 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
33556 {
33557 - unsigned long val = atomic_long_read(v);
33558 + unsigned long val = atomic_long_read_unchecked(v);
33559
33560 seq_printf(s, "%16lu %s\n", val, id);
33561 }
33562 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
33563
33564 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
33565 for (op = 0; op < mcsop_last; op++) {
33566 - count = atomic_long_read(&mcs_op_statistics[op].count);
33567 - total = atomic_long_read(&mcs_op_statistics[op].total);
33568 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
33569 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
33570 max = mcs_op_statistics[op].max;
33571 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
33572 count ? total / count : 0, max);
33573 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
33574 index 5c3ce24..4915ccb 100644
33575 --- a/drivers/misc/sgi-gru/grutables.h
33576 +++ b/drivers/misc/sgi-gru/grutables.h
33577 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
33578 * GRU statistics.
33579 */
33580 struct gru_stats_s {
33581 - atomic_long_t vdata_alloc;
33582 - atomic_long_t vdata_free;
33583 - atomic_long_t gts_alloc;
33584 - atomic_long_t gts_free;
33585 - atomic_long_t gms_alloc;
33586 - atomic_long_t gms_free;
33587 - atomic_long_t gts_double_allocate;
33588 - atomic_long_t assign_context;
33589 - atomic_long_t assign_context_failed;
33590 - atomic_long_t free_context;
33591 - atomic_long_t load_user_context;
33592 - atomic_long_t load_kernel_context;
33593 - atomic_long_t lock_kernel_context;
33594 - atomic_long_t unlock_kernel_context;
33595 - atomic_long_t steal_user_context;
33596 - atomic_long_t steal_kernel_context;
33597 - atomic_long_t steal_context_failed;
33598 - atomic_long_t nopfn;
33599 - atomic_long_t asid_new;
33600 - atomic_long_t asid_next;
33601 - atomic_long_t asid_wrap;
33602 - atomic_long_t asid_reuse;
33603 - atomic_long_t intr;
33604 - atomic_long_t intr_cbr;
33605 - atomic_long_t intr_tfh;
33606 - atomic_long_t intr_spurious;
33607 - atomic_long_t intr_mm_lock_failed;
33608 - atomic_long_t call_os;
33609 - atomic_long_t call_os_wait_queue;
33610 - atomic_long_t user_flush_tlb;
33611 - atomic_long_t user_unload_context;
33612 - atomic_long_t user_exception;
33613 - atomic_long_t set_context_option;
33614 - atomic_long_t check_context_retarget_intr;
33615 - atomic_long_t check_context_unload;
33616 - atomic_long_t tlb_dropin;
33617 - atomic_long_t tlb_preload_page;
33618 - atomic_long_t tlb_dropin_fail_no_asid;
33619 - atomic_long_t tlb_dropin_fail_upm;
33620 - atomic_long_t tlb_dropin_fail_invalid;
33621 - atomic_long_t tlb_dropin_fail_range_active;
33622 - atomic_long_t tlb_dropin_fail_idle;
33623 - atomic_long_t tlb_dropin_fail_fmm;
33624 - atomic_long_t tlb_dropin_fail_no_exception;
33625 - atomic_long_t tfh_stale_on_fault;
33626 - atomic_long_t mmu_invalidate_range;
33627 - atomic_long_t mmu_invalidate_page;
33628 - atomic_long_t flush_tlb;
33629 - atomic_long_t flush_tlb_gru;
33630 - atomic_long_t flush_tlb_gru_tgh;
33631 - atomic_long_t flush_tlb_gru_zero_asid;
33632 + atomic_long_unchecked_t vdata_alloc;
33633 + atomic_long_unchecked_t vdata_free;
33634 + atomic_long_unchecked_t gts_alloc;
33635 + atomic_long_unchecked_t gts_free;
33636 + atomic_long_unchecked_t gms_alloc;
33637 + atomic_long_unchecked_t gms_free;
33638 + atomic_long_unchecked_t gts_double_allocate;
33639 + atomic_long_unchecked_t assign_context;
33640 + atomic_long_unchecked_t assign_context_failed;
33641 + atomic_long_unchecked_t free_context;
33642 + atomic_long_unchecked_t load_user_context;
33643 + atomic_long_unchecked_t load_kernel_context;
33644 + atomic_long_unchecked_t lock_kernel_context;
33645 + atomic_long_unchecked_t unlock_kernel_context;
33646 + atomic_long_unchecked_t steal_user_context;
33647 + atomic_long_unchecked_t steal_kernel_context;
33648 + atomic_long_unchecked_t steal_context_failed;
33649 + atomic_long_unchecked_t nopfn;
33650 + atomic_long_unchecked_t asid_new;
33651 + atomic_long_unchecked_t asid_next;
33652 + atomic_long_unchecked_t asid_wrap;
33653 + atomic_long_unchecked_t asid_reuse;
33654 + atomic_long_unchecked_t intr;
33655 + atomic_long_unchecked_t intr_cbr;
33656 + atomic_long_unchecked_t intr_tfh;
33657 + atomic_long_unchecked_t intr_spurious;
33658 + atomic_long_unchecked_t intr_mm_lock_failed;
33659 + atomic_long_unchecked_t call_os;
33660 + atomic_long_unchecked_t call_os_wait_queue;
33661 + atomic_long_unchecked_t user_flush_tlb;
33662 + atomic_long_unchecked_t user_unload_context;
33663 + atomic_long_unchecked_t user_exception;
33664 + atomic_long_unchecked_t set_context_option;
33665 + atomic_long_unchecked_t check_context_retarget_intr;
33666 + atomic_long_unchecked_t check_context_unload;
33667 + atomic_long_unchecked_t tlb_dropin;
33668 + atomic_long_unchecked_t tlb_preload_page;
33669 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
33670 + atomic_long_unchecked_t tlb_dropin_fail_upm;
33671 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
33672 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
33673 + atomic_long_unchecked_t tlb_dropin_fail_idle;
33674 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
33675 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
33676 + atomic_long_unchecked_t tfh_stale_on_fault;
33677 + atomic_long_unchecked_t mmu_invalidate_range;
33678 + atomic_long_unchecked_t mmu_invalidate_page;
33679 + atomic_long_unchecked_t flush_tlb;
33680 + atomic_long_unchecked_t flush_tlb_gru;
33681 + atomic_long_unchecked_t flush_tlb_gru_tgh;
33682 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
33683
33684 - atomic_long_t copy_gpa;
33685 - atomic_long_t read_gpa;
33686 + atomic_long_unchecked_t copy_gpa;
33687 + atomic_long_unchecked_t read_gpa;
33688
33689 - atomic_long_t mesq_receive;
33690 - atomic_long_t mesq_receive_none;
33691 - atomic_long_t mesq_send;
33692 - atomic_long_t mesq_send_failed;
33693 - atomic_long_t mesq_noop;
33694 - atomic_long_t mesq_send_unexpected_error;
33695 - atomic_long_t mesq_send_lb_overflow;
33696 - atomic_long_t mesq_send_qlimit_reached;
33697 - atomic_long_t mesq_send_amo_nacked;
33698 - atomic_long_t mesq_send_put_nacked;
33699 - atomic_long_t mesq_page_overflow;
33700 - atomic_long_t mesq_qf_locked;
33701 - atomic_long_t mesq_qf_noop_not_full;
33702 - atomic_long_t mesq_qf_switch_head_failed;
33703 - atomic_long_t mesq_qf_unexpected_error;
33704 - atomic_long_t mesq_noop_unexpected_error;
33705 - atomic_long_t mesq_noop_lb_overflow;
33706 - atomic_long_t mesq_noop_qlimit_reached;
33707 - atomic_long_t mesq_noop_amo_nacked;
33708 - atomic_long_t mesq_noop_put_nacked;
33709 - atomic_long_t mesq_noop_page_overflow;
33710 + atomic_long_unchecked_t mesq_receive;
33711 + atomic_long_unchecked_t mesq_receive_none;
33712 + atomic_long_unchecked_t mesq_send;
33713 + atomic_long_unchecked_t mesq_send_failed;
33714 + atomic_long_unchecked_t mesq_noop;
33715 + atomic_long_unchecked_t mesq_send_unexpected_error;
33716 + atomic_long_unchecked_t mesq_send_lb_overflow;
33717 + atomic_long_unchecked_t mesq_send_qlimit_reached;
33718 + atomic_long_unchecked_t mesq_send_amo_nacked;
33719 + atomic_long_unchecked_t mesq_send_put_nacked;
33720 + atomic_long_unchecked_t mesq_page_overflow;
33721 + atomic_long_unchecked_t mesq_qf_locked;
33722 + atomic_long_unchecked_t mesq_qf_noop_not_full;
33723 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
33724 + atomic_long_unchecked_t mesq_qf_unexpected_error;
33725 + atomic_long_unchecked_t mesq_noop_unexpected_error;
33726 + atomic_long_unchecked_t mesq_noop_lb_overflow;
33727 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
33728 + atomic_long_unchecked_t mesq_noop_amo_nacked;
33729 + atomic_long_unchecked_t mesq_noop_put_nacked;
33730 + atomic_long_unchecked_t mesq_noop_page_overflow;
33731
33732 };
33733
33734 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
33735 tghop_invalidate, mcsop_last};
33736
33737 struct mcs_op_statistic {
33738 - atomic_long_t count;
33739 - atomic_long_t total;
33740 + atomic_long_unchecked_t count;
33741 + atomic_long_unchecked_t total;
33742 unsigned long max;
33743 };
33744
33745 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
33746
33747 #define STAT(id) do { \
33748 if (gru_options & OPT_STATS) \
33749 - atomic_long_inc(&gru_stats.id); \
33750 + atomic_long_inc_unchecked(&gru_stats.id); \
33751 } while (0)
33752
33753 #ifdef CONFIG_SGI_GRU_DEBUG
33754 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
33755 index 851b2f2..a4ec097 100644
33756 --- a/drivers/misc/sgi-xp/xp.h
33757 +++ b/drivers/misc/sgi-xp/xp.h
33758 @@ -289,7 +289,7 @@ struct xpc_interface {
33759 xpc_notify_func, void *);
33760 void (*received) (short, int, void *);
33761 enum xp_retval (*partid_to_nasids) (short, void *);
33762 -};
33763 +} __no_const;
33764
33765 extern struct xpc_interface xpc_interface;
33766
33767 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
33768 index b94d5f7..7f494c5 100644
33769 --- a/drivers/misc/sgi-xp/xpc.h
33770 +++ b/drivers/misc/sgi-xp/xpc.h
33771 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
33772 void (*received_payload) (struct xpc_channel *, void *);
33773 void (*notify_senders_of_disconnect) (struct xpc_channel *);
33774 };
33775 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
33776
33777 /* struct xpc_partition act_state values (for XPC HB) */
33778
33779 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
33780 /* found in xpc_main.c */
33781 extern struct device *xpc_part;
33782 extern struct device *xpc_chan;
33783 -extern struct xpc_arch_operations xpc_arch_ops;
33784 +extern xpc_arch_operations_no_const xpc_arch_ops;
33785 extern int xpc_disengage_timelimit;
33786 extern int xpc_disengage_timedout;
33787 extern int xpc_activate_IRQ_rcvd;
33788 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
33789 index 8d082b4..aa749ae 100644
33790 --- a/drivers/misc/sgi-xp/xpc_main.c
33791 +++ b/drivers/misc/sgi-xp/xpc_main.c
33792 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
33793 .notifier_call = xpc_system_die,
33794 };
33795
33796 -struct xpc_arch_operations xpc_arch_ops;
33797 +xpc_arch_operations_no_const xpc_arch_ops;
33798
33799 /*
33800 * Timer function to enforce the timelimit on the partition disengage.
33801 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
33802 index 6878a94..fe5c5f1 100644
33803 --- a/drivers/mmc/host/sdhci-pci.c
33804 +++ b/drivers/mmc/host/sdhci-pci.c
33805 @@ -673,7 +673,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
33806 .probe = via_probe,
33807 };
33808
33809 -static const struct pci_device_id pci_ids[] __devinitdata = {
33810 +static const struct pci_device_id pci_ids[] __devinitconst = {
33811 {
33812 .vendor = PCI_VENDOR_ID_RICOH,
33813 .device = PCI_DEVICE_ID_RICOH_R5C822,
33814 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
33815 index e9fad91..0a7a16a 100644
33816 --- a/drivers/mtd/devices/doc2000.c
33817 +++ b/drivers/mtd/devices/doc2000.c
33818 @@ -773,7 +773,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
33819
33820 /* The ECC will not be calculated correctly if less than 512 is written */
33821 /* DBB-
33822 - if (len != 0x200 && eccbuf)
33823 + if (len != 0x200)
33824 printk(KERN_WARNING
33825 "ECC needs a full sector write (adr: %lx size %lx)\n",
33826 (long) to, (long) len);
33827 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
33828 index a3f7a27..234016e 100644
33829 --- a/drivers/mtd/devices/doc2001.c
33830 +++ b/drivers/mtd/devices/doc2001.c
33831 @@ -392,7 +392,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
33832 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
33833
33834 /* Don't allow read past end of device */
33835 - if (from >= this->totlen)
33836 + if (from >= this->totlen || !len)
33837 return -EINVAL;
33838
33839 /* Don't allow a single read to cross a 512-byte block boundary */
33840 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
33841 index 3984d48..28aa897 100644
33842 --- a/drivers/mtd/nand/denali.c
33843 +++ b/drivers/mtd/nand/denali.c
33844 @@ -26,6 +26,7 @@
33845 #include <linux/pci.h>
33846 #include <linux/mtd/mtd.h>
33847 #include <linux/module.h>
33848 +#include <linux/slab.h>
33849
33850 #include "denali.h"
33851
33852 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
33853 index ac40925..483b753 100644
33854 --- a/drivers/mtd/nftlmount.c
33855 +++ b/drivers/mtd/nftlmount.c
33856 @@ -24,6 +24,7 @@
33857 #include <asm/errno.h>
33858 #include <linux/delay.h>
33859 #include <linux/slab.h>
33860 +#include <linux/sched.h>
33861 #include <linux/mtd/mtd.h>
33862 #include <linux/mtd/nand.h>
33863 #include <linux/mtd/nftl.h>
33864 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
33865 index 6c3fb5a..c542a81 100644
33866 --- a/drivers/mtd/ubi/build.c
33867 +++ b/drivers/mtd/ubi/build.c
33868 @@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
33869 static int __init bytes_str_to_int(const char *str)
33870 {
33871 char *endp;
33872 - unsigned long result;
33873 + unsigned long result, scale = 1;
33874
33875 result = simple_strtoul(str, &endp, 0);
33876 if (str == endp || result >= INT_MAX) {
33877 @@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str)
33878
33879 switch (*endp) {
33880 case 'G':
33881 - result *= 1024;
33882 + scale *= 1024;
33883 case 'M':
33884 - result *= 1024;
33885 + scale *= 1024;
33886 case 'K':
33887 - result *= 1024;
33888 + scale *= 1024;
33889 if (endp[1] == 'i' && endp[2] == 'B')
33890 endp += 2;
33891 case '\0':
33892 @@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str)
33893 return -EINVAL;
33894 }
33895
33896 - return result;
33897 + if ((intoverflow_t)result*scale >= INT_MAX) {
33898 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
33899 + str);
33900 + return -EINVAL;
33901 + }
33902 +
33903 + return result*scale;
33904 }
33905
33906 /**
33907 diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
33908 index 1feae59..c2a61d2 100644
33909 --- a/drivers/net/ethernet/atheros/atlx/atl2.c
33910 +++ b/drivers/net/ethernet/atheros/atlx/atl2.c
33911 @@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
33912 */
33913
33914 #define ATL2_PARAM(X, desc) \
33915 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
33916 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
33917 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
33918 MODULE_PARM_DESC(X, desc);
33919 #else
33920 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
33921 index 9a517c2..a50cfcb 100644
33922 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
33923 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
33924 @@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
33925
33926 int (*wait_comp)(struct bnx2x *bp,
33927 struct bnx2x_rx_mode_ramrod_params *p);
33928 -};
33929 +} __no_const;
33930
33931 /********************** Set multicast group ***********************************/
33932
33933 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
33934 index 94b4bd0..73c02de 100644
33935 --- a/drivers/net/ethernet/broadcom/tg3.h
33936 +++ b/drivers/net/ethernet/broadcom/tg3.h
33937 @@ -134,6 +134,7 @@
33938 #define CHIPREV_ID_5750_A0 0x4000
33939 #define CHIPREV_ID_5750_A1 0x4001
33940 #define CHIPREV_ID_5750_A3 0x4003
33941 +#define CHIPREV_ID_5750_C1 0x4201
33942 #define CHIPREV_ID_5750_C2 0x4202
33943 #define CHIPREV_ID_5752_A0_HW 0x5000
33944 #define CHIPREV_ID_5752_A0 0x6000
33945 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
33946 index c5f5479..2e8c260 100644
33947 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
33948 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
33949 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
33950 */
33951 struct l2t_skb_cb {
33952 arp_failure_handler_func arp_failure_handler;
33953 -};
33954 +} __no_const;
33955
33956 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
33957
33958 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
33959 index 871bcaa..4043505 100644
33960 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
33961 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
33962 @@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
33963 for (i=0; i<ETH_ALEN; i++) {
33964 tmp.addr[i] = dev->dev_addr[i];
33965 }
33966 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
33967 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
33968 break;
33969
33970 case DE4X5_SET_HWADDR: /* Set the hardware address */
33971 @@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
33972 spin_lock_irqsave(&lp->lock, flags);
33973 memcpy(&statbuf, &lp->pktStats, ioc->len);
33974 spin_unlock_irqrestore(&lp->lock, flags);
33975 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
33976 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
33977 return -EFAULT;
33978 break;
33979 }
33980 diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
33981 index 14d5b61..1398636 100644
33982 --- a/drivers/net/ethernet/dec/tulip/eeprom.c
33983 +++ b/drivers/net/ethernet/dec/tulip/eeprom.c
33984 @@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
33985 {NULL}};
33986
33987
33988 -static const char *block_name[] __devinitdata = {
33989 +static const char *block_name[] __devinitconst = {
33990 "21140 non-MII",
33991 "21140 MII PHY",
33992 "21142 Serial PHY",
33993 diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
33994 index 4d01219..b58d26d 100644
33995 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c
33996 +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
33997 @@ -236,7 +236,7 @@ struct pci_id_info {
33998 int drv_flags; /* Driver use, intended as capability flags. */
33999 };
34000
34001 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34002 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34003 { /* Sometime a Level-One switch card. */
34004 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
34005 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
34006 diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
34007 index dcd7f7a..ecb7fb3 100644
34008 --- a/drivers/net/ethernet/dlink/sundance.c
34009 +++ b/drivers/net/ethernet/dlink/sundance.c
34010 @@ -218,7 +218,7 @@ enum {
34011 struct pci_id_info {
34012 const char *name;
34013 };
34014 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34015 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34016 {"D-Link DFE-550TX FAST Ethernet Adapter"},
34017 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
34018 {"D-Link DFE-580TX 4 port Server Adapter"},
34019 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
34020 index bf266a0..e024af7 100644
34021 --- a/drivers/net/ethernet/emulex/benet/be_main.c
34022 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
34023 @@ -397,7 +397,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
34024
34025 if (wrapped)
34026 newacc += 65536;
34027 - ACCESS_ONCE(*acc) = newacc;
34028 + ACCESS_ONCE_RW(*acc) = newacc;
34029 }
34030
34031 void be_parse_stats(struct be_adapter *adapter)
34032 diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
34033 index 61d2bdd..7f1154a 100644
34034 --- a/drivers/net/ethernet/fealnx.c
34035 +++ b/drivers/net/ethernet/fealnx.c
34036 @@ -150,7 +150,7 @@ struct chip_info {
34037 int flags;
34038 };
34039
34040 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
34041 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
34042 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34043 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
34044 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34045 diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
34046 index e1159e5..e18684d 100644
34047 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
34048 +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
34049 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
34050 {
34051 struct e1000_hw *hw = &adapter->hw;
34052 struct e1000_mac_info *mac = &hw->mac;
34053 - struct e1000_mac_operations *func = &mac->ops;
34054 + e1000_mac_operations_no_const *func = &mac->ops;
34055
34056 /* Set media type */
34057 switch (adapter->pdev->device) {
34058 diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
34059 index a3e65fd..f451444 100644
34060 --- a/drivers/net/ethernet/intel/e1000e/82571.c
34061 +++ b/drivers/net/ethernet/intel/e1000e/82571.c
34062 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
34063 {
34064 struct e1000_hw *hw = &adapter->hw;
34065 struct e1000_mac_info *mac = &hw->mac;
34066 - struct e1000_mac_operations *func = &mac->ops;
34067 + e1000_mac_operations_no_const *func = &mac->ops;
34068 u32 swsm = 0;
34069 u32 swsm2 = 0;
34070 bool force_clear_smbi = false;
34071 diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
34072 index 2967039..ca8c40c 100644
34073 --- a/drivers/net/ethernet/intel/e1000e/hw.h
34074 +++ b/drivers/net/ethernet/intel/e1000e/hw.h
34075 @@ -778,6 +778,7 @@ struct e1000_mac_operations {
34076 void (*write_vfta)(struct e1000_hw *, u32, u32);
34077 s32 (*read_mac_addr)(struct e1000_hw *);
34078 };
34079 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34080
34081 /*
34082 * When to use various PHY register access functions:
34083 @@ -818,6 +819,7 @@ struct e1000_phy_operations {
34084 void (*power_up)(struct e1000_hw *);
34085 void (*power_down)(struct e1000_hw *);
34086 };
34087 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34088
34089 /* Function pointers for the NVM. */
34090 struct e1000_nvm_operations {
34091 @@ -829,9 +831,10 @@ struct e1000_nvm_operations {
34092 s32 (*validate)(struct e1000_hw *);
34093 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
34094 };
34095 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34096
34097 struct e1000_mac_info {
34098 - struct e1000_mac_operations ops;
34099 + e1000_mac_operations_no_const ops;
34100 u8 addr[ETH_ALEN];
34101 u8 perm_addr[ETH_ALEN];
34102
34103 @@ -872,7 +875,7 @@ struct e1000_mac_info {
34104 };
34105
34106 struct e1000_phy_info {
34107 - struct e1000_phy_operations ops;
34108 + e1000_phy_operations_no_const ops;
34109
34110 enum e1000_phy_type type;
34111
34112 @@ -906,7 +909,7 @@ struct e1000_phy_info {
34113 };
34114
34115 struct e1000_nvm_info {
34116 - struct e1000_nvm_operations ops;
34117 + e1000_nvm_operations_no_const ops;
34118
34119 enum e1000_nvm_type type;
34120 enum e1000_nvm_override override;
34121 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
34122 index 4519a13..f97fcd0 100644
34123 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h
34124 +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
34125 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
34126 s32 (*read_mac_addr)(struct e1000_hw *);
34127 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
34128 };
34129 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34130
34131 struct e1000_phy_operations {
34132 s32 (*acquire)(struct e1000_hw *);
34133 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
34134 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
34135 s32 (*write_reg)(struct e1000_hw *, u32, u16);
34136 };
34137 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34138
34139 struct e1000_nvm_operations {
34140 s32 (*acquire)(struct e1000_hw *);
34141 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
34142 s32 (*update)(struct e1000_hw *);
34143 s32 (*validate)(struct e1000_hw *);
34144 };
34145 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34146
34147 struct e1000_info {
34148 s32 (*get_invariants)(struct e1000_hw *);
34149 @@ -350,7 +353,7 @@ struct e1000_info {
34150 extern const struct e1000_info e1000_82575_info;
34151
34152 struct e1000_mac_info {
34153 - struct e1000_mac_operations ops;
34154 + e1000_mac_operations_no_const ops;
34155
34156 u8 addr[6];
34157 u8 perm_addr[6];
34158 @@ -388,7 +391,7 @@ struct e1000_mac_info {
34159 };
34160
34161 struct e1000_phy_info {
34162 - struct e1000_phy_operations ops;
34163 + e1000_phy_operations_no_const ops;
34164
34165 enum e1000_phy_type type;
34166
34167 @@ -423,7 +426,7 @@ struct e1000_phy_info {
34168 };
34169
34170 struct e1000_nvm_info {
34171 - struct e1000_nvm_operations ops;
34172 + e1000_nvm_operations_no_const ops;
34173 enum e1000_nvm_type type;
34174 enum e1000_nvm_override override;
34175
34176 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
34177 s32 (*check_for_ack)(struct e1000_hw *, u16);
34178 s32 (*check_for_rst)(struct e1000_hw *, u16);
34179 };
34180 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34181
34182 struct e1000_mbx_stats {
34183 u32 msgs_tx;
34184 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
34185 };
34186
34187 struct e1000_mbx_info {
34188 - struct e1000_mbx_operations ops;
34189 + e1000_mbx_operations_no_const ops;
34190 struct e1000_mbx_stats stats;
34191 u32 timeout;
34192 u32 usec_delay;
34193 diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
34194 index d7ed58f..64cde36 100644
34195 --- a/drivers/net/ethernet/intel/igbvf/vf.h
34196 +++ b/drivers/net/ethernet/intel/igbvf/vf.h
34197 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
34198 s32 (*read_mac_addr)(struct e1000_hw *);
34199 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
34200 };
34201 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34202
34203 struct e1000_mac_info {
34204 - struct e1000_mac_operations ops;
34205 + e1000_mac_operations_no_const ops;
34206 u8 addr[6];
34207 u8 perm_addr[6];
34208
34209 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
34210 s32 (*check_for_ack)(struct e1000_hw *);
34211 s32 (*check_for_rst)(struct e1000_hw *);
34212 };
34213 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34214
34215 struct e1000_mbx_stats {
34216 u32 msgs_tx;
34217 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
34218 };
34219
34220 struct e1000_mbx_info {
34221 - struct e1000_mbx_operations ops;
34222 + e1000_mbx_operations_no_const ops;
34223 struct e1000_mbx_stats stats;
34224 u32 timeout;
34225 u32 usec_delay;
34226 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34227 index 6c5cca8..de8ef63 100644
34228 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34229 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34230 @@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations {
34231 s32 (*update_checksum)(struct ixgbe_hw *);
34232 u16 (*calc_checksum)(struct ixgbe_hw *);
34233 };
34234 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
34235
34236 struct ixgbe_mac_operations {
34237 s32 (*init_hw)(struct ixgbe_hw *);
34238 @@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations {
34239 /* Manageability interface */
34240 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
34241 };
34242 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
34243
34244 struct ixgbe_phy_operations {
34245 s32 (*identify)(struct ixgbe_hw *);
34246 @@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations {
34247 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
34248 s32 (*check_overtemp)(struct ixgbe_hw *);
34249 };
34250 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
34251
34252 struct ixgbe_eeprom_info {
34253 - struct ixgbe_eeprom_operations ops;
34254 + ixgbe_eeprom_operations_no_const ops;
34255 enum ixgbe_eeprom_type type;
34256 u32 semaphore_delay;
34257 u16 word_size;
34258 @@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info {
34259
34260 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
34261 struct ixgbe_mac_info {
34262 - struct ixgbe_mac_operations ops;
34263 + ixgbe_mac_operations_no_const ops;
34264 enum ixgbe_mac_type type;
34265 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
34266 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
34267 @@ -2828,7 +2831,7 @@ struct ixgbe_mac_info {
34268 };
34269
34270 struct ixgbe_phy_info {
34271 - struct ixgbe_phy_operations ops;
34272 + ixgbe_phy_operations_no_const ops;
34273 struct mdio_if_info mdio;
34274 enum ixgbe_phy_type type;
34275 u32 id;
34276 @@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations {
34277 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
34278 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
34279 };
34280 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
34281
34282 struct ixgbe_mbx_stats {
34283 u32 msgs_tx;
34284 @@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats {
34285 };
34286
34287 struct ixgbe_mbx_info {
34288 - struct ixgbe_mbx_operations ops;
34289 + ixgbe_mbx_operations_no_const ops;
34290 struct ixgbe_mbx_stats stats;
34291 u32 timeout;
34292 u32 usec_delay;
34293 diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
34294 index 10306b4..28df758 100644
34295 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h
34296 +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
34297 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
34298 s32 (*clear_vfta)(struct ixgbe_hw *);
34299 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
34300 };
34301 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
34302
34303 enum ixgbe_mac_type {
34304 ixgbe_mac_unknown = 0,
34305 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
34306 };
34307
34308 struct ixgbe_mac_info {
34309 - struct ixgbe_mac_operations ops;
34310 + ixgbe_mac_operations_no_const ops;
34311 u8 addr[6];
34312 u8 perm_addr[6];
34313
34314 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
34315 s32 (*check_for_ack)(struct ixgbe_hw *);
34316 s32 (*check_for_rst)(struct ixgbe_hw *);
34317 };
34318 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
34319
34320 struct ixgbe_mbx_stats {
34321 u32 msgs_tx;
34322 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
34323 };
34324
34325 struct ixgbe_mbx_info {
34326 - struct ixgbe_mbx_operations ops;
34327 + ixgbe_mbx_operations_no_const ops;
34328 struct ixgbe_mbx_stats stats;
34329 u32 timeout;
34330 u32 udelay;
34331 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
34332 index 94bbc85..78c12e6 100644
34333 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
34334 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
34335 @@ -40,6 +40,7 @@
34336 #include <linux/dma-mapping.h>
34337 #include <linux/slab.h>
34338 #include <linux/io-mapping.h>
34339 +#include <linux/sched.h>
34340
34341 #include <linux/mlx4/device.h>
34342 #include <linux/mlx4/doorbell.h>
34343 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
34344 index 5046a64..71ca936 100644
34345 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
34346 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
34347 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
34348 void (*link_down)(struct __vxge_hw_device *devh);
34349 void (*crit_err)(struct __vxge_hw_device *devh,
34350 enum vxge_hw_event type, u64 ext_data);
34351 -};
34352 +} __no_const;
34353
34354 /*
34355 * struct __vxge_hw_blockpool_entry - Block private data structure
34356 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34357 index 4a518a3..936b334 100644
34358 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34359 +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34360 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
34361 struct vxge_hw_mempool_dma *dma_object,
34362 u32 index,
34363 u32 is_last);
34364 -};
34365 +} __no_const;
34366
34367 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
34368 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
34369 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
34370 index c8f47f1..5da9840 100644
34371 --- a/drivers/net/ethernet/realtek/r8169.c
34372 +++ b/drivers/net/ethernet/realtek/r8169.c
34373 @@ -698,17 +698,17 @@ struct rtl8169_private {
34374 struct mdio_ops {
34375 void (*write)(void __iomem *, int, int);
34376 int (*read)(void __iomem *, int);
34377 - } mdio_ops;
34378 + } __no_const mdio_ops;
34379
34380 struct pll_power_ops {
34381 void (*down)(struct rtl8169_private *);
34382 void (*up)(struct rtl8169_private *);
34383 - } pll_power_ops;
34384 + } __no_const pll_power_ops;
34385
34386 struct jumbo_ops {
34387 void (*enable)(struct rtl8169_private *);
34388 void (*disable)(struct rtl8169_private *);
34389 - } jumbo_ops;
34390 + } __no_const jumbo_ops;
34391
34392 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
34393 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
34394 diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
34395 index 1b4658c..a30dabb 100644
34396 --- a/drivers/net/ethernet/sis/sis190.c
34397 +++ b/drivers/net/ethernet/sis/sis190.c
34398 @@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
34399 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
34400 struct net_device *dev)
34401 {
34402 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
34403 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
34404 struct sis190_private *tp = netdev_priv(dev);
34405 struct pci_dev *isa_bridge;
34406 u8 reg, tmp8;
34407 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
34408 index edfa15d..002bfa9 100644
34409 --- a/drivers/net/ppp/ppp_generic.c
34410 +++ b/drivers/net/ppp/ppp_generic.c
34411 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34412 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
34413 struct ppp_stats stats;
34414 struct ppp_comp_stats cstats;
34415 - char *vers;
34416
34417 switch (cmd) {
34418 case SIOCGPPPSTATS:
34419 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34420 break;
34421
34422 case SIOCGPPPVER:
34423 - vers = PPP_VERSION;
34424 - if (copy_to_user(addr, vers, strlen(vers) + 1))
34425 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
34426 break;
34427 err = 0;
34428 break;
34429 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
34430 index 515f122..41dd273 100644
34431 --- a/drivers/net/tokenring/abyss.c
34432 +++ b/drivers/net/tokenring/abyss.c
34433 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
34434
34435 static int __init abyss_init (void)
34436 {
34437 - abyss_netdev_ops = tms380tr_netdev_ops;
34438 + pax_open_kernel();
34439 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34440
34441 - abyss_netdev_ops.ndo_open = abyss_open;
34442 - abyss_netdev_ops.ndo_stop = abyss_close;
34443 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
34444 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
34445 + pax_close_kernel();
34446
34447 return pci_register_driver(&abyss_driver);
34448 }
34449 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
34450 index 6153cfd..cf69c1c 100644
34451 --- a/drivers/net/tokenring/madgemc.c
34452 +++ b/drivers/net/tokenring/madgemc.c
34453 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
34454
34455 static int __init madgemc_init (void)
34456 {
34457 - madgemc_netdev_ops = tms380tr_netdev_ops;
34458 - madgemc_netdev_ops.ndo_open = madgemc_open;
34459 - madgemc_netdev_ops.ndo_stop = madgemc_close;
34460 + pax_open_kernel();
34461 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34462 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
34463 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
34464 + pax_close_kernel();
34465
34466 return mca_register_driver (&madgemc_driver);
34467 }
34468 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
34469 index 8d362e6..f91cc52 100644
34470 --- a/drivers/net/tokenring/proteon.c
34471 +++ b/drivers/net/tokenring/proteon.c
34472 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
34473 struct platform_device *pdev;
34474 int i, num = 0, err = 0;
34475
34476 - proteon_netdev_ops = tms380tr_netdev_ops;
34477 - proteon_netdev_ops.ndo_open = proteon_open;
34478 - proteon_netdev_ops.ndo_stop = tms380tr_close;
34479 + pax_open_kernel();
34480 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34481 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
34482 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
34483 + pax_close_kernel();
34484
34485 err = platform_driver_register(&proteon_driver);
34486 if (err)
34487 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
34488 index 46db5c5..37c1536 100644
34489 --- a/drivers/net/tokenring/skisa.c
34490 +++ b/drivers/net/tokenring/skisa.c
34491 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
34492 struct platform_device *pdev;
34493 int i, num = 0, err = 0;
34494
34495 - sk_isa_netdev_ops = tms380tr_netdev_ops;
34496 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
34497 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34498 + pax_open_kernel();
34499 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34500 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
34501 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34502 + pax_close_kernel();
34503
34504 err = platform_driver_register(&sk_isa_driver);
34505 if (err)
34506 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
34507 index 304fe78..db112fa 100644
34508 --- a/drivers/net/usb/hso.c
34509 +++ b/drivers/net/usb/hso.c
34510 @@ -71,7 +71,7 @@
34511 #include <asm/byteorder.h>
34512 #include <linux/serial_core.h>
34513 #include <linux/serial.h>
34514 -
34515 +#include <asm/local.h>
34516
34517 #define MOD_AUTHOR "Option Wireless"
34518 #define MOD_DESCRIPTION "USB High Speed Option driver"
34519 @@ -257,7 +257,7 @@ struct hso_serial {
34520
34521 /* from usb_serial_port */
34522 struct tty_struct *tty;
34523 - int open_count;
34524 + local_t open_count;
34525 spinlock_t serial_lock;
34526
34527 int (*write_data) (struct hso_serial *serial);
34528 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
34529 struct urb *urb;
34530
34531 urb = serial->rx_urb[0];
34532 - if (serial->open_count > 0) {
34533 + if (local_read(&serial->open_count) > 0) {
34534 count = put_rxbuf_data(urb, serial);
34535 if (count == -1)
34536 return;
34537 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
34538 DUMP1(urb->transfer_buffer, urb->actual_length);
34539
34540 /* Anyone listening? */
34541 - if (serial->open_count == 0)
34542 + if (local_read(&serial->open_count) == 0)
34543 return;
34544
34545 if (status == 0) {
34546 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
34547 spin_unlock_irq(&serial->serial_lock);
34548
34549 /* check for port already opened, if not set the termios */
34550 - serial->open_count++;
34551 - if (serial->open_count == 1) {
34552 + if (local_inc_return(&serial->open_count) == 1) {
34553 serial->rx_state = RX_IDLE;
34554 /* Force default termio settings */
34555 _hso_serial_set_termios(tty, NULL);
34556 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
34557 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
34558 if (result) {
34559 hso_stop_serial_device(serial->parent);
34560 - serial->open_count--;
34561 + local_dec(&serial->open_count);
34562 kref_put(&serial->parent->ref, hso_serial_ref_free);
34563 }
34564 } else {
34565 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
34566
34567 /* reset the rts and dtr */
34568 /* do the actual close */
34569 - serial->open_count--;
34570 + local_dec(&serial->open_count);
34571
34572 - if (serial->open_count <= 0) {
34573 - serial->open_count = 0;
34574 + if (local_read(&serial->open_count) <= 0) {
34575 + local_set(&serial->open_count, 0);
34576 spin_lock_irq(&serial->serial_lock);
34577 if (serial->tty == tty) {
34578 serial->tty->driver_data = NULL;
34579 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
34580
34581 /* the actual setup */
34582 spin_lock_irqsave(&serial->serial_lock, flags);
34583 - if (serial->open_count)
34584 + if (local_read(&serial->open_count))
34585 _hso_serial_set_termios(tty, old);
34586 else
34587 tty->termios = old;
34588 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
34589 D1("Pending read interrupt on port %d\n", i);
34590 spin_lock(&serial->serial_lock);
34591 if (serial->rx_state == RX_IDLE &&
34592 - serial->open_count > 0) {
34593 + local_read(&serial->open_count) > 0) {
34594 /* Setup and send a ctrl req read on
34595 * port i */
34596 if (!serial->rx_urb_filled[0]) {
34597 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
34598 /* Start all serial ports */
34599 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
34600 if (serial_table[i] && (serial_table[i]->interface == iface)) {
34601 - if (dev2ser(serial_table[i])->open_count) {
34602 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
34603 result =
34604 hso_start_serial_device(serial_table[i], GFP_NOIO);
34605 hso_kick_transmit(dev2ser(serial_table[i]));
34606 diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
34607 index e662cbc..8d4a102 100644
34608 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
34609 +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
34610 @@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
34611 * Return with error code if any of the queue indices
34612 * is out of range
34613 */
34614 - if (p->ring_index[i] < 0 ||
34615 - p->ring_index[i] >= adapter->num_rx_queues)
34616 + if (p->ring_index[i] >= adapter->num_rx_queues)
34617 return -EINVAL;
34618 }
34619
34620 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
34621 index 0f9ee46..e2d6e65 100644
34622 --- a/drivers/net/wireless/ath/ath.h
34623 +++ b/drivers/net/wireless/ath/ath.h
34624 @@ -119,6 +119,7 @@ struct ath_ops {
34625 void (*write_flush) (void *);
34626 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
34627 };
34628 +typedef struct ath_ops __no_const ath_ops_no_const;
34629
34630 struct ath_common;
34631 struct ath_bus_ops;
34632 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
34633 index b592016..fe47870 100644
34634 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
34635 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
34636 @@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34637 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
34638 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
34639
34640 - ACCESS_ONCE(ads->ds_link) = i->link;
34641 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
34642 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
34643 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
34644
34645 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
34646 ctl6 = SM(i->keytype, AR_EncrType);
34647 @@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34648
34649 if ((i->is_first || i->is_last) &&
34650 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
34651 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
34652 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
34653 | set11nTries(i->rates, 1)
34654 | set11nTries(i->rates, 2)
34655 | set11nTries(i->rates, 3)
34656 | (i->dur_update ? AR_DurUpdateEna : 0)
34657 | SM(0, AR_BurstDur);
34658
34659 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
34660 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
34661 | set11nRate(i->rates, 1)
34662 | set11nRate(i->rates, 2)
34663 | set11nRate(i->rates, 3);
34664 } else {
34665 - ACCESS_ONCE(ads->ds_ctl2) = 0;
34666 - ACCESS_ONCE(ads->ds_ctl3) = 0;
34667 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
34668 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
34669 }
34670
34671 if (!i->is_first) {
34672 - ACCESS_ONCE(ads->ds_ctl0) = 0;
34673 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
34674 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
34675 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
34676 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
34677 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
34678 return;
34679 }
34680
34681 @@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34682 break;
34683 }
34684
34685 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
34686 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
34687 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
34688 | SM(i->txpower, AR_XmitPower)
34689 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
34690 @@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34691 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
34692 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
34693
34694 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
34695 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
34696 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
34697 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
34698
34699 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
34700 return;
34701
34702 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
34703 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
34704 | set11nPktDurRTSCTS(i->rates, 1);
34705
34706 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
34707 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
34708 | set11nPktDurRTSCTS(i->rates, 3);
34709
34710 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
34711 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
34712 | set11nRateFlags(i->rates, 1)
34713 | set11nRateFlags(i->rates, 2)
34714 | set11nRateFlags(i->rates, 3)
34715 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
34716 index f5ae3c6..7936af3 100644
34717 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
34718 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
34719 @@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34720 (i->qcu << AR_TxQcuNum_S) | 0x17;
34721
34722 checksum += val;
34723 - ACCESS_ONCE(ads->info) = val;
34724 + ACCESS_ONCE_RW(ads->info) = val;
34725
34726 checksum += i->link;
34727 - ACCESS_ONCE(ads->link) = i->link;
34728 + ACCESS_ONCE_RW(ads->link) = i->link;
34729
34730 checksum += i->buf_addr[0];
34731 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
34732 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
34733 checksum += i->buf_addr[1];
34734 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
34735 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
34736 checksum += i->buf_addr[2];
34737 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
34738 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
34739 checksum += i->buf_addr[3];
34740 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
34741 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
34742
34743 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
34744 - ACCESS_ONCE(ads->ctl3) = val;
34745 + ACCESS_ONCE_RW(ads->ctl3) = val;
34746 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
34747 - ACCESS_ONCE(ads->ctl5) = val;
34748 + ACCESS_ONCE_RW(ads->ctl5) = val;
34749 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
34750 - ACCESS_ONCE(ads->ctl7) = val;
34751 + ACCESS_ONCE_RW(ads->ctl7) = val;
34752 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
34753 - ACCESS_ONCE(ads->ctl9) = val;
34754 + ACCESS_ONCE_RW(ads->ctl9) = val;
34755
34756 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
34757 - ACCESS_ONCE(ads->ctl10) = checksum;
34758 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
34759
34760 if (i->is_first || i->is_last) {
34761 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
34762 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
34763 | set11nTries(i->rates, 1)
34764 | set11nTries(i->rates, 2)
34765 | set11nTries(i->rates, 3)
34766 | (i->dur_update ? AR_DurUpdateEna : 0)
34767 | SM(0, AR_BurstDur);
34768
34769 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
34770 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
34771 | set11nRate(i->rates, 1)
34772 | set11nRate(i->rates, 2)
34773 | set11nRate(i->rates, 3);
34774 } else {
34775 - ACCESS_ONCE(ads->ctl13) = 0;
34776 - ACCESS_ONCE(ads->ctl14) = 0;
34777 + ACCESS_ONCE_RW(ads->ctl13) = 0;
34778 + ACCESS_ONCE_RW(ads->ctl14) = 0;
34779 }
34780
34781 ads->ctl20 = 0;
34782 @@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34783
34784 ctl17 = SM(i->keytype, AR_EncrType);
34785 if (!i->is_first) {
34786 - ACCESS_ONCE(ads->ctl11) = 0;
34787 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
34788 - ACCESS_ONCE(ads->ctl15) = 0;
34789 - ACCESS_ONCE(ads->ctl16) = 0;
34790 - ACCESS_ONCE(ads->ctl17) = ctl17;
34791 - ACCESS_ONCE(ads->ctl18) = 0;
34792 - ACCESS_ONCE(ads->ctl19) = 0;
34793 + ACCESS_ONCE_RW(ads->ctl11) = 0;
34794 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
34795 + ACCESS_ONCE_RW(ads->ctl15) = 0;
34796 + ACCESS_ONCE_RW(ads->ctl16) = 0;
34797 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
34798 + ACCESS_ONCE_RW(ads->ctl18) = 0;
34799 + ACCESS_ONCE_RW(ads->ctl19) = 0;
34800 return;
34801 }
34802
34803 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
34804 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
34805 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
34806 | SM(i->txpower, AR_XmitPower)
34807 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
34808 @@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34809 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
34810 ctl12 |= SM(val, AR_PAPRDChainMask);
34811
34812 - ACCESS_ONCE(ads->ctl12) = ctl12;
34813 - ACCESS_ONCE(ads->ctl17) = ctl17;
34814 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
34815 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
34816
34817 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
34818 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
34819 | set11nPktDurRTSCTS(i->rates, 1);
34820
34821 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
34822 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
34823 | set11nPktDurRTSCTS(i->rates, 3);
34824
34825 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
34826 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
34827 | set11nRateFlags(i->rates, 1)
34828 | set11nRateFlags(i->rates, 2)
34829 | set11nRateFlags(i->rates, 3)
34830 | SM(i->rtscts_rate, AR_RTSCTSRate);
34831
34832 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
34833 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
34834 }
34835
34836 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
34837 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
34838 index 1bd8edf..10c6d30 100644
34839 --- a/drivers/net/wireless/ath/ath9k/hw.h
34840 +++ b/drivers/net/wireless/ath/ath9k/hw.h
34841 @@ -605,7 +605,7 @@ struct ath_hw_private_ops {
34842
34843 /* ANI */
34844 void (*ani_cache_ini_regs)(struct ath_hw *ah);
34845 -};
34846 +} __no_const;
34847
34848 /**
34849 * struct ath_hw_ops - callbacks used by hardware code and driver code
34850 @@ -635,7 +635,7 @@ struct ath_hw_ops {
34851 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
34852 struct ath_hw_antcomb_conf *antconf);
34853
34854 -};
34855 +} __no_const;
34856
34857 struct ath_nf_limits {
34858 s16 max;
34859 @@ -655,7 +655,7 @@ enum ath_cal_list {
34860 #define AH_FASTCC 0x4
34861
34862 struct ath_hw {
34863 - struct ath_ops reg_ops;
34864 + ath_ops_no_const reg_ops;
34865
34866 struct ieee80211_hw *hw;
34867 struct ath_common common;
34868 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
34869 index bea8524..c677c06 100644
34870 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
34871 +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
34872 @@ -547,7 +547,7 @@ struct phy_func_ptr {
34873 void (*carrsuppr)(struct brcms_phy *);
34874 s32 (*rxsigpwr)(struct brcms_phy *, s32);
34875 void (*detach)(struct brcms_phy *);
34876 -};
34877 +} __no_const;
34878
34879 struct brcms_phy {
34880 struct brcms_phy_pub pubpi_ro;
34881 diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
34882 index 05f2ad1..ae00eea 100644
34883 --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
34884 +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
34885 @@ -3685,7 +3685,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
34886 */
34887 if (iwl3945_mod_params.disable_hw_scan) {
34888 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
34889 - iwl3945_hw_ops.hw_scan = NULL;
34890 + pax_open_kernel();
34891 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
34892 + pax_close_kernel();
34893 }
34894
34895 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
34896 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
34897 index 69a77e2..552b42c 100644
34898 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
34899 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
34900 @@ -71,8 +71,8 @@ do { \
34901 } while (0)
34902
34903 #else
34904 -#define IWL_DEBUG(m, level, fmt, args...)
34905 -#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
34906 +#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
34907 +#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
34908 #define iwl_print_hex_dump(m, level, p, len)
34909 #endif /* CONFIG_IWLWIFI_DEBUG */
34910
34911 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
34912 index 523ad55..f8c5dc5 100644
34913 --- a/drivers/net/wireless/mac80211_hwsim.c
34914 +++ b/drivers/net/wireless/mac80211_hwsim.c
34915 @@ -1678,9 +1678,11 @@ static int __init init_mac80211_hwsim(void)
34916 return -EINVAL;
34917
34918 if (fake_hw_scan) {
34919 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
34920 - mac80211_hwsim_ops.sw_scan_start = NULL;
34921 - mac80211_hwsim_ops.sw_scan_complete = NULL;
34922 + pax_open_kernel();
34923 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
34924 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
34925 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
34926 + pax_close_kernel();
34927 }
34928
34929 spin_lock_init(&hwsim_radio_lock);
34930 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
34931 index 30f138b..c904585 100644
34932 --- a/drivers/net/wireless/mwifiex/main.h
34933 +++ b/drivers/net/wireless/mwifiex/main.h
34934 @@ -543,7 +543,7 @@ struct mwifiex_if_ops {
34935 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
34936 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
34937 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
34938 -};
34939 +} __no_const;
34940
34941 struct mwifiex_adapter {
34942 u8 iface_type;
34943 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
34944 index 0c13840..a5c3ed6 100644
34945 --- a/drivers/net/wireless/rndis_wlan.c
34946 +++ b/drivers/net/wireless/rndis_wlan.c
34947 @@ -1275,7 +1275,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
34948
34949 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
34950
34951 - if (rts_threshold < 0 || rts_threshold > 2347)
34952 + if (rts_threshold > 2347)
34953 rts_threshold = 2347;
34954
34955 tmp = cpu_to_le32(rts_threshold);
34956 diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
34957 index a77f1bb..c608b2b 100644
34958 --- a/drivers/net/wireless/wl1251/wl1251.h
34959 +++ b/drivers/net/wireless/wl1251/wl1251.h
34960 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
34961 void (*reset)(struct wl1251 *wl);
34962 void (*enable_irq)(struct wl1251 *wl);
34963 void (*disable_irq)(struct wl1251 *wl);
34964 -};
34965 +} __no_const;
34966
34967 struct wl1251 {
34968 struct ieee80211_hw *hw;
34969 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
34970 index f34b5b2..b5abb9f 100644
34971 --- a/drivers/oprofile/buffer_sync.c
34972 +++ b/drivers/oprofile/buffer_sync.c
34973 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
34974 if (cookie == NO_COOKIE)
34975 offset = pc;
34976 if (cookie == INVALID_COOKIE) {
34977 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
34978 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
34979 offset = pc;
34980 }
34981 if (cookie != last_cookie) {
34982 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
34983 /* add userspace sample */
34984
34985 if (!mm) {
34986 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
34987 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
34988 return 0;
34989 }
34990
34991 cookie = lookup_dcookie(mm, s->eip, &offset);
34992
34993 if (cookie == INVALID_COOKIE) {
34994 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
34995 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
34996 return 0;
34997 }
34998
34999 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
35000 /* ignore backtraces if failed to add a sample */
35001 if (state == sb_bt_start) {
35002 state = sb_bt_ignore;
35003 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35004 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35005 }
35006 }
35007 release_mm(mm);
35008 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
35009 index c0cc4e7..44d4e54 100644
35010 --- a/drivers/oprofile/event_buffer.c
35011 +++ b/drivers/oprofile/event_buffer.c
35012 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
35013 }
35014
35015 if (buffer_pos == buffer_size) {
35016 - atomic_inc(&oprofile_stats.event_lost_overflow);
35017 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35018 return;
35019 }
35020
35021 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
35022 index f8c752e..28bf4fc 100644
35023 --- a/drivers/oprofile/oprof.c
35024 +++ b/drivers/oprofile/oprof.c
35025 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
35026 if (oprofile_ops.switch_events())
35027 return;
35028
35029 - atomic_inc(&oprofile_stats.multiplex_counter);
35030 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35031 start_switch_worker();
35032 }
35033
35034 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
35035 index 917d28e..d62d981 100644
35036 --- a/drivers/oprofile/oprofile_stats.c
35037 +++ b/drivers/oprofile/oprofile_stats.c
35038 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35039 cpu_buf->sample_invalid_eip = 0;
35040 }
35041
35042 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35043 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35044 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
35045 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35046 - atomic_set(&oprofile_stats.multiplex_counter, 0);
35047 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35048 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35049 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35050 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35051 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35052 }
35053
35054
35055 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
35056 index 38b6fc0..b5cbfce 100644
35057 --- a/drivers/oprofile/oprofile_stats.h
35058 +++ b/drivers/oprofile/oprofile_stats.h
35059 @@ -13,11 +13,11 @@
35060 #include <linux/atomic.h>
35061
35062 struct oprofile_stat_struct {
35063 - atomic_t sample_lost_no_mm;
35064 - atomic_t sample_lost_no_mapping;
35065 - atomic_t bt_lost_no_mapping;
35066 - atomic_t event_lost_overflow;
35067 - atomic_t multiplex_counter;
35068 + atomic_unchecked_t sample_lost_no_mm;
35069 + atomic_unchecked_t sample_lost_no_mapping;
35070 + atomic_unchecked_t bt_lost_no_mapping;
35071 + atomic_unchecked_t event_lost_overflow;
35072 + atomic_unchecked_t multiplex_counter;
35073 };
35074
35075 extern struct oprofile_stat_struct oprofile_stats;
35076 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
35077 index 2f0aa0f..90fab02 100644
35078 --- a/drivers/oprofile/oprofilefs.c
35079 +++ b/drivers/oprofile/oprofilefs.c
35080 @@ -193,7 +193,7 @@ static const struct file_operations atomic_ro_fops = {
35081
35082
35083 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35084 - char const *name, atomic_t *val)
35085 + char const *name, atomic_unchecked_t *val)
35086 {
35087 return __oprofilefs_create_file(sb, root, name,
35088 &atomic_ro_fops, 0444, val);
35089 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
35090 index 3f56bc0..707d642 100644
35091 --- a/drivers/parport/procfs.c
35092 +++ b/drivers/parport/procfs.c
35093 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
35094
35095 *ppos += len;
35096
35097 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35098 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35099 }
35100
35101 #ifdef CONFIG_PARPORT_1284
35102 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
35103
35104 *ppos += len;
35105
35106 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35107 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35108 }
35109 #endif /* IEEE1284.3 support. */
35110
35111 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
35112 index 9fff878..ad0ad53 100644
35113 --- a/drivers/pci/hotplug/cpci_hotplug.h
35114 +++ b/drivers/pci/hotplug/cpci_hotplug.h
35115 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35116 int (*hardware_test) (struct slot* slot, u32 value);
35117 u8 (*get_power) (struct slot* slot);
35118 int (*set_power) (struct slot* slot, int value);
35119 -};
35120 +} __no_const;
35121
35122 struct cpci_hp_controller {
35123 unsigned int irq;
35124 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
35125 index 76ba8a1..20ca857 100644
35126 --- a/drivers/pci/hotplug/cpqphp_nvram.c
35127 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
35128 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
35129
35130 void compaq_nvram_init (void __iomem *rom_start)
35131 {
35132 +
35133 +#ifndef CONFIG_PAX_KERNEXEC
35134 if (rom_start) {
35135 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35136 }
35137 +#endif
35138 +
35139 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35140
35141 /* initialize our int15 lock */
35142 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
35143 index 1cfbf22..be96487 100644
35144 --- a/drivers/pci/pcie/aspm.c
35145 +++ b/drivers/pci/pcie/aspm.c
35146 @@ -27,9 +27,9 @@
35147 #define MODULE_PARAM_PREFIX "pcie_aspm."
35148
35149 /* Note: those are not register definitions */
35150 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35151 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35152 -#define ASPM_STATE_L1 (4) /* L1 state */
35153 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35154 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35155 +#define ASPM_STATE_L1 (4U) /* L1 state */
35156 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35157 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35158
35159 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
35160 index dfee1b3..a454fb6 100644
35161 --- a/drivers/pci/probe.c
35162 +++ b/drivers/pci/probe.c
35163 @@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
35164 u32 l, sz, mask;
35165 u16 orig_cmd;
35166
35167 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
35168 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
35169
35170 if (!dev->mmio_always_on) {
35171 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
35172 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
35173 index 27911b5..5b6db88 100644
35174 --- a/drivers/pci/proc.c
35175 +++ b/drivers/pci/proc.c
35176 @@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
35177 static int __init pci_proc_init(void)
35178 {
35179 struct pci_dev *dev = NULL;
35180 +
35181 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
35182 +#ifdef CONFIG_GRKERNSEC_PROC_USER
35183 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
35184 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35185 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35186 +#endif
35187 +#else
35188 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35189 +#endif
35190 proc_create("devices", 0, proc_bus_pci_dir,
35191 &proc_bus_pci_dev_operations);
35192 proc_initialized = 1;
35193 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
35194 index 7b82868..b9344c9 100644
35195 --- a/drivers/platform/x86/thinkpad_acpi.c
35196 +++ b/drivers/platform/x86/thinkpad_acpi.c
35197 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
35198 return 0;
35199 }
35200
35201 -void static hotkey_mask_warn_incomplete_mask(void)
35202 +static void hotkey_mask_warn_incomplete_mask(void)
35203 {
35204 /* log only what the user can fix... */
35205 const u32 wantedmask = hotkey_driver_mask &
35206 @@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
35207 }
35208 }
35209
35210 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35211 - struct tp_nvram_state *newn,
35212 - const u32 event_mask)
35213 -{
35214 -
35215 #define TPACPI_COMPARE_KEY(__scancode, __member) \
35216 do { \
35217 if ((event_mask & (1 << __scancode)) && \
35218 @@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35219 tpacpi_hotkey_send_key(__scancode); \
35220 } while (0)
35221
35222 - void issue_volchange(const unsigned int oldvol,
35223 - const unsigned int newvol)
35224 - {
35225 - unsigned int i = oldvol;
35226 +static void issue_volchange(const unsigned int oldvol,
35227 + const unsigned int newvol,
35228 + const u32 event_mask)
35229 +{
35230 + unsigned int i = oldvol;
35231
35232 - while (i > newvol) {
35233 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35234 - i--;
35235 - }
35236 - while (i < newvol) {
35237 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35238 - i++;
35239 - }
35240 + while (i > newvol) {
35241 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35242 + i--;
35243 }
35244 + while (i < newvol) {
35245 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35246 + i++;
35247 + }
35248 +}
35249
35250 - void issue_brightnesschange(const unsigned int oldbrt,
35251 - const unsigned int newbrt)
35252 - {
35253 - unsigned int i = oldbrt;
35254 +static void issue_brightnesschange(const unsigned int oldbrt,
35255 + const unsigned int newbrt,
35256 + const u32 event_mask)
35257 +{
35258 + unsigned int i = oldbrt;
35259
35260 - while (i > newbrt) {
35261 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35262 - i--;
35263 - }
35264 - while (i < newbrt) {
35265 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35266 - i++;
35267 - }
35268 + while (i > newbrt) {
35269 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35270 + i--;
35271 + }
35272 + while (i < newbrt) {
35273 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35274 + i++;
35275 }
35276 +}
35277
35278 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35279 + struct tp_nvram_state *newn,
35280 + const u32 event_mask)
35281 +{
35282 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
35283 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
35284 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
35285 @@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35286 oldn->volume_level != newn->volume_level) {
35287 /* recently muted, or repeated mute keypress, or
35288 * multiple presses ending in mute */
35289 - issue_volchange(oldn->volume_level, newn->volume_level);
35290 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35291 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
35292 }
35293 } else {
35294 @@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35295 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35296 }
35297 if (oldn->volume_level != newn->volume_level) {
35298 - issue_volchange(oldn->volume_level, newn->volume_level);
35299 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35300 } else if (oldn->volume_toggle != newn->volume_toggle) {
35301 /* repeated vol up/down keypress at end of scale ? */
35302 if (newn->volume_level == 0)
35303 @@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35304 /* handle brightness */
35305 if (oldn->brightness_level != newn->brightness_level) {
35306 issue_brightnesschange(oldn->brightness_level,
35307 - newn->brightness_level);
35308 + newn->brightness_level,
35309 + event_mask);
35310 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
35311 /* repeated key presses that didn't change state */
35312 if (newn->brightness_level == 0)
35313 @@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35314 && !tp_features.bright_unkfw)
35315 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35316 }
35317 +}
35318
35319 #undef TPACPI_COMPARE_KEY
35320 #undef TPACPI_MAY_SEND_KEY
35321 -}
35322
35323 /*
35324 * Polling driver
35325 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
35326 index b859d16..5cc6b1a 100644
35327 --- a/drivers/pnp/pnpbios/bioscalls.c
35328 +++ b/drivers/pnp/pnpbios/bioscalls.c
35329 @@ -59,7 +59,7 @@ do { \
35330 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
35331 } while(0)
35332
35333 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
35334 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
35335 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
35336
35337 /*
35338 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35339
35340 cpu = get_cpu();
35341 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
35342 +
35343 + pax_open_kernel();
35344 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
35345 + pax_close_kernel();
35346
35347 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
35348 spin_lock_irqsave(&pnp_bios_lock, flags);
35349 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35350 :"memory");
35351 spin_unlock_irqrestore(&pnp_bios_lock, flags);
35352
35353 + pax_open_kernel();
35354 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
35355 + pax_close_kernel();
35356 +
35357 put_cpu();
35358
35359 /* If we get here and this is set then the PnP BIOS faulted on us. */
35360 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
35361 return status;
35362 }
35363
35364 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
35365 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
35366 {
35367 int i;
35368
35369 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35370 pnp_bios_callpoint.offset = header->fields.pm16offset;
35371 pnp_bios_callpoint.segment = PNP_CS16;
35372
35373 + pax_open_kernel();
35374 +
35375 for_each_possible_cpu(i) {
35376 struct desc_struct *gdt = get_cpu_gdt_table(i);
35377 if (!gdt)
35378 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35379 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
35380 (unsigned long)__va(header->fields.pm16dseg));
35381 }
35382 +
35383 + pax_close_kernel();
35384 }
35385 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
35386 index b0ecacb..7c9da2e 100644
35387 --- a/drivers/pnp/resource.c
35388 +++ b/drivers/pnp/resource.c
35389 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
35390 return 1;
35391
35392 /* check if the resource is valid */
35393 - if (*irq < 0 || *irq > 15)
35394 + if (*irq > 15)
35395 return 0;
35396
35397 /* check if the resource is reserved */
35398 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
35399 return 1;
35400
35401 /* check if the resource is valid */
35402 - if (*dma < 0 || *dma == 4 || *dma > 7)
35403 + if (*dma == 4 || *dma > 7)
35404 return 0;
35405
35406 /* check if the resource is reserved */
35407 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
35408 index bb16f5b..c751eef 100644
35409 --- a/drivers/power/bq27x00_battery.c
35410 +++ b/drivers/power/bq27x00_battery.c
35411 @@ -67,7 +67,7 @@
35412 struct bq27x00_device_info;
35413 struct bq27x00_access_methods {
35414 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
35415 -};
35416 +} __no_const;
35417
35418 enum bq27x00_chip { BQ27000, BQ27500 };
35419
35420 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
35421 index 33f5d9a..d957d3f 100644
35422 --- a/drivers/regulator/max8660.c
35423 +++ b/drivers/regulator/max8660.c
35424 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
35425 max8660->shadow_regs[MAX8660_OVER1] = 5;
35426 } else {
35427 /* Otherwise devices can be toggled via software */
35428 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
35429 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
35430 + pax_open_kernel();
35431 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
35432 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
35433 + pax_close_kernel();
35434 }
35435
35436 /*
35437 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
35438 index 023d17d..74ef35b 100644
35439 --- a/drivers/regulator/mc13892-regulator.c
35440 +++ b/drivers/regulator/mc13892-regulator.c
35441 @@ -565,10 +565,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
35442 }
35443 mc13xxx_unlock(mc13892);
35444
35445 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35446 + pax_open_kernel();
35447 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35448 = mc13892_vcam_set_mode;
35449 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35450 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35451 = mc13892_vcam_get_mode;
35452 + pax_close_kernel();
35453 for (i = 0; i < pdata->num_regulators; i++) {
35454 init_data = &pdata->regulators[i];
35455 priv->regulators[i] = regulator_register(
35456 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
35457 index cace6d3..f623fda 100644
35458 --- a/drivers/rtc/rtc-dev.c
35459 +++ b/drivers/rtc/rtc-dev.c
35460 @@ -14,6 +14,7 @@
35461 #include <linux/module.h>
35462 #include <linux/rtc.h>
35463 #include <linux/sched.h>
35464 +#include <linux/grsecurity.h>
35465 #include "rtc-core.h"
35466
35467 static dev_t rtc_devt;
35468 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
35469 if (copy_from_user(&tm, uarg, sizeof(tm)))
35470 return -EFAULT;
35471
35472 + gr_log_timechange();
35473 +
35474 return rtc_set_time(rtc, &tm);
35475
35476 case RTC_PIE_ON:
35477 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
35478 index ffb5878..e6d785c 100644
35479 --- a/drivers/scsi/aacraid/aacraid.h
35480 +++ b/drivers/scsi/aacraid/aacraid.h
35481 @@ -492,7 +492,7 @@ struct adapter_ops
35482 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
35483 /* Administrative operations */
35484 int (*adapter_comm)(struct aac_dev * dev, int comm);
35485 -};
35486 +} __no_const;
35487
35488 /*
35489 * Define which interrupt handler needs to be installed
35490 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
35491 index 705e13e..91c873c 100644
35492 --- a/drivers/scsi/aacraid/linit.c
35493 +++ b/drivers/scsi/aacraid/linit.c
35494 @@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
35495 #elif defined(__devinitconst)
35496 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35497 #else
35498 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
35499 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35500 #endif
35501 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
35502 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
35503 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
35504 index d5ff142..49c0ebb 100644
35505 --- a/drivers/scsi/aic94xx/aic94xx_init.c
35506 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
35507 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
35508 .lldd_control_phy = asd_control_phy,
35509 };
35510
35511 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
35512 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
35513 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
35514 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
35515 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
35516 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
35517 index a796de9..1ef20e1 100644
35518 --- a/drivers/scsi/bfa/bfa.h
35519 +++ b/drivers/scsi/bfa/bfa.h
35520 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
35521 u32 *end);
35522 int cpe_vec_q0;
35523 int rme_vec_q0;
35524 -};
35525 +} __no_const;
35526 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
35527
35528 struct bfa_faa_cbfn_s {
35529 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
35530 index e07bd47..cd1bbbb 100644
35531 --- a/drivers/scsi/bfa/bfa_fcpim.c
35532 +++ b/drivers/scsi/bfa/bfa_fcpim.c
35533 @@ -4121,7 +4121,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
35534
35535 bfa_iotag_attach(fcp);
35536
35537 - fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
35538 + fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
35539 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
35540 (fcp->num_itns * sizeof(struct bfa_itn_s));
35541 memset(fcp->itn_arr, 0,
35542 @@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
35543 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
35544 {
35545 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
35546 - struct bfa_itn_s *itn;
35547 + bfa_itn_s_no_const *itn;
35548
35549 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
35550 itn->isr = isr;
35551 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
35552 index 1080bcb..a3b39e3 100644
35553 --- a/drivers/scsi/bfa/bfa_fcpim.h
35554 +++ b/drivers/scsi/bfa/bfa_fcpim.h
35555 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
35556 struct bfa_itn_s {
35557 bfa_isr_func_t isr;
35558 };
35559 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
35560
35561 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
35562 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
35563 @@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
35564 struct list_head iotag_tio_free_q; /* free IO resources */
35565 struct list_head iotag_unused_q; /* unused IO resources*/
35566 struct bfa_iotag_s *iotag_arr;
35567 - struct bfa_itn_s *itn_arr;
35568 + bfa_itn_s_no_const *itn_arr;
35569 int num_ioim_reqs;
35570 int num_fwtio_reqs;
35571 int num_itns;
35572 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
35573 index 546d46b..642fa5b 100644
35574 --- a/drivers/scsi/bfa/bfa_ioc.h
35575 +++ b/drivers/scsi/bfa/bfa_ioc.h
35576 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
35577 bfa_ioc_disable_cbfn_t disable_cbfn;
35578 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
35579 bfa_ioc_reset_cbfn_t reset_cbfn;
35580 -};
35581 +} __no_const;
35582
35583 /*
35584 * IOC event notification mechanism.
35585 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
35586 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
35587 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
35588 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
35589 -};
35590 +} __no_const;
35591
35592 /*
35593 * Queue element to wait for room in request queue. FIFO order is
35594 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
35595 index 351dc0b..951dc32 100644
35596 --- a/drivers/scsi/hosts.c
35597 +++ b/drivers/scsi/hosts.c
35598 @@ -42,7 +42,7 @@
35599 #include "scsi_logging.h"
35600
35601
35602 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
35603 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
35604
35605
35606 static void scsi_host_cls_release(struct device *dev)
35607 @@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
35608 * subtract one because we increment first then return, but we need to
35609 * know what the next host number was before increment
35610 */
35611 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
35612 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
35613 shost->dma_channel = 0xff;
35614
35615 /* These three are default values which can be overridden */
35616 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
35617 index 865d452..e9b7fa7 100644
35618 --- a/drivers/scsi/hpsa.c
35619 +++ b/drivers/scsi/hpsa.c
35620 @@ -505,7 +505,7 @@ static inline u32 next_command(struct ctlr_info *h)
35621 u32 a;
35622
35623 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
35624 - return h->access.command_completed(h);
35625 + return h->access->command_completed(h);
35626
35627 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
35628 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
35629 @@ -2989,7 +2989,7 @@ static void start_io(struct ctlr_info *h)
35630 while (!list_empty(&h->reqQ)) {
35631 c = list_entry(h->reqQ.next, struct CommandList, list);
35632 /* can't do anything if fifo is full */
35633 - if ((h->access.fifo_full(h))) {
35634 + if ((h->access->fifo_full(h))) {
35635 dev_warn(&h->pdev->dev, "fifo full\n");
35636 break;
35637 }
35638 @@ -2999,7 +2999,7 @@ static void start_io(struct ctlr_info *h)
35639 h->Qdepth--;
35640
35641 /* Tell the controller execute command */
35642 - h->access.submit_command(h, c);
35643 + h->access->submit_command(h, c);
35644
35645 /* Put job onto the completed Q */
35646 addQ(&h->cmpQ, c);
35647 @@ -3008,17 +3008,17 @@ static void start_io(struct ctlr_info *h)
35648
35649 static inline unsigned long get_next_completion(struct ctlr_info *h)
35650 {
35651 - return h->access.command_completed(h);
35652 + return h->access->command_completed(h);
35653 }
35654
35655 static inline bool interrupt_pending(struct ctlr_info *h)
35656 {
35657 - return h->access.intr_pending(h);
35658 + return h->access->intr_pending(h);
35659 }
35660
35661 static inline long interrupt_not_for_us(struct ctlr_info *h)
35662 {
35663 - return (h->access.intr_pending(h) == 0) ||
35664 + return (h->access->intr_pending(h) == 0) ||
35665 (h->interrupts_enabled == 0);
35666 }
35667
35668 @@ -3917,7 +3917,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
35669 if (prod_index < 0)
35670 return -ENODEV;
35671 h->product_name = products[prod_index].product_name;
35672 - h->access = *(products[prod_index].access);
35673 + h->access = products[prod_index].access;
35674
35675 if (hpsa_board_disabled(h->pdev)) {
35676 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
35677 @@ -4162,7 +4162,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
35678
35679 assert_spin_locked(&lockup_detector_lock);
35680 remove_ctlr_from_lockup_detector_list(h);
35681 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35682 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35683 spin_lock_irqsave(&h->lock, flags);
35684 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
35685 spin_unlock_irqrestore(&h->lock, flags);
35686 @@ -4340,7 +4340,7 @@ reinit_after_soft_reset:
35687 }
35688
35689 /* make sure the board interrupts are off */
35690 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35691 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35692
35693 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
35694 goto clean2;
35695 @@ -4374,7 +4374,7 @@ reinit_after_soft_reset:
35696 * fake ones to scoop up any residual completions.
35697 */
35698 spin_lock_irqsave(&h->lock, flags);
35699 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35700 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35701 spin_unlock_irqrestore(&h->lock, flags);
35702 free_irq(h->intr[h->intr_mode], h);
35703 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
35704 @@ -4393,9 +4393,9 @@ reinit_after_soft_reset:
35705 dev_info(&h->pdev->dev, "Board READY.\n");
35706 dev_info(&h->pdev->dev,
35707 "Waiting for stale completions to drain.\n");
35708 - h->access.set_intr_mask(h, HPSA_INTR_ON);
35709 + h->access->set_intr_mask(h, HPSA_INTR_ON);
35710 msleep(10000);
35711 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35712 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35713
35714 rc = controller_reset_failed(h->cfgtable);
35715 if (rc)
35716 @@ -4416,7 +4416,7 @@ reinit_after_soft_reset:
35717 }
35718
35719 /* Turn the interrupts on so we can service requests */
35720 - h->access.set_intr_mask(h, HPSA_INTR_ON);
35721 + h->access->set_intr_mask(h, HPSA_INTR_ON);
35722
35723 hpsa_hba_inquiry(h);
35724 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
35725 @@ -4468,7 +4468,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
35726 * To write all data in the battery backed cache to disks
35727 */
35728 hpsa_flush_cache(h);
35729 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35730 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35731 free_irq(h->intr[h->intr_mode], h);
35732 #ifdef CONFIG_PCI_MSI
35733 if (h->msix_vector)
35734 @@ -4632,7 +4632,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
35735 return;
35736 }
35737 /* Change the access methods to the performant access methods */
35738 - h->access = SA5_performant_access;
35739 + h->access = &SA5_performant_access;
35740 h->transMethod = CFGTBL_Trans_Performant;
35741 }
35742
35743 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
35744 index 91edafb..a9b88ec 100644
35745 --- a/drivers/scsi/hpsa.h
35746 +++ b/drivers/scsi/hpsa.h
35747 @@ -73,7 +73,7 @@ struct ctlr_info {
35748 unsigned int msix_vector;
35749 unsigned int msi_vector;
35750 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
35751 - struct access_method access;
35752 + struct access_method *access;
35753
35754 /* queue and queue Info */
35755 struct list_head reqQ;
35756 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
35757 index f2df059..a3a9930 100644
35758 --- a/drivers/scsi/ips.h
35759 +++ b/drivers/scsi/ips.h
35760 @@ -1027,7 +1027,7 @@ typedef struct {
35761 int (*intr)(struct ips_ha *);
35762 void (*enableint)(struct ips_ha *);
35763 uint32_t (*statupd)(struct ips_ha *);
35764 -} ips_hw_func_t;
35765 +} __no_const ips_hw_func_t;
35766
35767 typedef struct ips_ha {
35768 uint8_t ha_id[IPS_MAX_CHANNELS+1];
35769 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
35770 index 9de9db2..1e09660 100644
35771 --- a/drivers/scsi/libfc/fc_exch.c
35772 +++ b/drivers/scsi/libfc/fc_exch.c
35773 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
35774 * all together if not used XXX
35775 */
35776 struct {
35777 - atomic_t no_free_exch;
35778 - atomic_t no_free_exch_xid;
35779 - atomic_t xid_not_found;
35780 - atomic_t xid_busy;
35781 - atomic_t seq_not_found;
35782 - atomic_t non_bls_resp;
35783 + atomic_unchecked_t no_free_exch;
35784 + atomic_unchecked_t no_free_exch_xid;
35785 + atomic_unchecked_t xid_not_found;
35786 + atomic_unchecked_t xid_busy;
35787 + atomic_unchecked_t seq_not_found;
35788 + atomic_unchecked_t non_bls_resp;
35789 } stats;
35790 };
35791
35792 @@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
35793 /* allocate memory for exchange */
35794 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
35795 if (!ep) {
35796 - atomic_inc(&mp->stats.no_free_exch);
35797 + atomic_inc_unchecked(&mp->stats.no_free_exch);
35798 goto out;
35799 }
35800 memset(ep, 0, sizeof(*ep));
35801 @@ -780,7 +780,7 @@ out:
35802 return ep;
35803 err:
35804 spin_unlock_bh(&pool->lock);
35805 - atomic_inc(&mp->stats.no_free_exch_xid);
35806 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
35807 mempool_free(ep, mp->ep_pool);
35808 return NULL;
35809 }
35810 @@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35811 xid = ntohs(fh->fh_ox_id); /* we originated exch */
35812 ep = fc_exch_find(mp, xid);
35813 if (!ep) {
35814 - atomic_inc(&mp->stats.xid_not_found);
35815 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35816 reject = FC_RJT_OX_ID;
35817 goto out;
35818 }
35819 @@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35820 ep = fc_exch_find(mp, xid);
35821 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
35822 if (ep) {
35823 - atomic_inc(&mp->stats.xid_busy);
35824 + atomic_inc_unchecked(&mp->stats.xid_busy);
35825 reject = FC_RJT_RX_ID;
35826 goto rel;
35827 }
35828 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35829 }
35830 xid = ep->xid; /* get our XID */
35831 } else if (!ep) {
35832 - atomic_inc(&mp->stats.xid_not_found);
35833 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35834 reject = FC_RJT_RX_ID; /* XID not found */
35835 goto out;
35836 }
35837 @@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35838 } else {
35839 sp = &ep->seq;
35840 if (sp->id != fh->fh_seq_id) {
35841 - atomic_inc(&mp->stats.seq_not_found);
35842 + atomic_inc_unchecked(&mp->stats.seq_not_found);
35843 if (f_ctl & FC_FC_END_SEQ) {
35844 /*
35845 * Update sequence_id based on incoming last
35846 @@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
35847
35848 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
35849 if (!ep) {
35850 - atomic_inc(&mp->stats.xid_not_found);
35851 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35852 goto out;
35853 }
35854 if (ep->esb_stat & ESB_ST_COMPLETE) {
35855 - atomic_inc(&mp->stats.xid_not_found);
35856 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35857 goto rel;
35858 }
35859 if (ep->rxid == FC_XID_UNKNOWN)
35860 ep->rxid = ntohs(fh->fh_rx_id);
35861 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
35862 - atomic_inc(&mp->stats.xid_not_found);
35863 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35864 goto rel;
35865 }
35866 if (ep->did != ntoh24(fh->fh_s_id) &&
35867 ep->did != FC_FID_FLOGI) {
35868 - atomic_inc(&mp->stats.xid_not_found);
35869 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35870 goto rel;
35871 }
35872 sof = fr_sof(fp);
35873 @@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
35874 sp->ssb_stat |= SSB_ST_RESP;
35875 sp->id = fh->fh_seq_id;
35876 } else if (sp->id != fh->fh_seq_id) {
35877 - atomic_inc(&mp->stats.seq_not_found);
35878 + atomic_inc_unchecked(&mp->stats.seq_not_found);
35879 goto rel;
35880 }
35881
35882 @@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
35883 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
35884
35885 if (!sp)
35886 - atomic_inc(&mp->stats.xid_not_found);
35887 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35888 else
35889 - atomic_inc(&mp->stats.non_bls_resp);
35890 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
35891
35892 fc_frame_free(fp);
35893 }
35894 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
35895 index db9238f..4378ed2 100644
35896 --- a/drivers/scsi/libsas/sas_ata.c
35897 +++ b/drivers/scsi/libsas/sas_ata.c
35898 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
35899 .postreset = ata_std_postreset,
35900 .error_handler = ata_std_error_handler,
35901 .post_internal_cmd = sas_ata_post_internal,
35902 - .qc_defer = ata_std_qc_defer,
35903 + .qc_defer = ata_std_qc_defer,
35904 .qc_prep = ata_noop_qc_prep,
35905 .qc_issue = sas_ata_qc_issue,
35906 .qc_fill_rtf = sas_ata_qc_fill_rtf,
35907 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
35908 index bb4c8e0..f33d849 100644
35909 --- a/drivers/scsi/lpfc/lpfc.h
35910 +++ b/drivers/scsi/lpfc/lpfc.h
35911 @@ -425,7 +425,7 @@ struct lpfc_vport {
35912 struct dentry *debug_nodelist;
35913 struct dentry *vport_debugfs_root;
35914 struct lpfc_debugfs_trc *disc_trc;
35915 - atomic_t disc_trc_cnt;
35916 + atomic_unchecked_t disc_trc_cnt;
35917 #endif
35918 uint8_t stat_data_enabled;
35919 uint8_t stat_data_blocked;
35920 @@ -835,8 +835,8 @@ struct lpfc_hba {
35921 struct timer_list fabric_block_timer;
35922 unsigned long bit_flags;
35923 #define FABRIC_COMANDS_BLOCKED 0
35924 - atomic_t num_rsrc_err;
35925 - atomic_t num_cmd_success;
35926 + atomic_unchecked_t num_rsrc_err;
35927 + atomic_unchecked_t num_cmd_success;
35928 unsigned long last_rsrc_error_time;
35929 unsigned long last_ramp_down_time;
35930 unsigned long last_ramp_up_time;
35931 @@ -866,7 +866,7 @@ struct lpfc_hba {
35932
35933 struct dentry *debug_slow_ring_trc;
35934 struct lpfc_debugfs_trc *slow_ring_trc;
35935 - atomic_t slow_ring_trc_cnt;
35936 + atomic_unchecked_t slow_ring_trc_cnt;
35937 /* iDiag debugfs sub-directory */
35938 struct dentry *idiag_root;
35939 struct dentry *idiag_pci_cfg;
35940 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
35941 index 2838259..a07cfb5 100644
35942 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
35943 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
35944 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
35945
35946 #include <linux/debugfs.h>
35947
35948 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
35949 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
35950 static unsigned long lpfc_debugfs_start_time = 0L;
35951
35952 /* iDiag */
35953 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
35954 lpfc_debugfs_enable = 0;
35955
35956 len = 0;
35957 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
35958 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
35959 (lpfc_debugfs_max_disc_trc - 1);
35960 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
35961 dtp = vport->disc_trc + i;
35962 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
35963 lpfc_debugfs_enable = 0;
35964
35965 len = 0;
35966 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
35967 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
35968 (lpfc_debugfs_max_slow_ring_trc - 1);
35969 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
35970 dtp = phba->slow_ring_trc + i;
35971 @@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
35972 !vport || !vport->disc_trc)
35973 return;
35974
35975 - index = atomic_inc_return(&vport->disc_trc_cnt) &
35976 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
35977 (lpfc_debugfs_max_disc_trc - 1);
35978 dtp = vport->disc_trc + index;
35979 dtp->fmt = fmt;
35980 dtp->data1 = data1;
35981 dtp->data2 = data2;
35982 dtp->data3 = data3;
35983 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
35984 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
35985 dtp->jif = jiffies;
35986 #endif
35987 return;
35988 @@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
35989 !phba || !phba->slow_ring_trc)
35990 return;
35991
35992 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
35993 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
35994 (lpfc_debugfs_max_slow_ring_trc - 1);
35995 dtp = phba->slow_ring_trc + index;
35996 dtp->fmt = fmt;
35997 dtp->data1 = data1;
35998 dtp->data2 = data2;
35999 dtp->data3 = data3;
36000 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36001 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36002 dtp->jif = jiffies;
36003 #endif
36004 return;
36005 @@ -3986,7 +3986,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36006 "slow_ring buffer\n");
36007 goto debug_failed;
36008 }
36009 - atomic_set(&phba->slow_ring_trc_cnt, 0);
36010 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36011 memset(phba->slow_ring_trc, 0,
36012 (sizeof(struct lpfc_debugfs_trc) *
36013 lpfc_debugfs_max_slow_ring_trc));
36014 @@ -4032,7 +4032,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36015 "buffer\n");
36016 goto debug_failed;
36017 }
36018 - atomic_set(&vport->disc_trc_cnt, 0);
36019 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36020
36021 snprintf(name, sizeof(name), "discovery_trace");
36022 vport->debug_disc_trc =
36023 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
36024 index 55bc4fc..a2a109c 100644
36025 --- a/drivers/scsi/lpfc/lpfc_init.c
36026 +++ b/drivers/scsi/lpfc/lpfc_init.c
36027 @@ -10027,8 +10027,10 @@ lpfc_init(void)
36028 printk(LPFC_COPYRIGHT "\n");
36029
36030 if (lpfc_enable_npiv) {
36031 - lpfc_transport_functions.vport_create = lpfc_vport_create;
36032 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36033 + pax_open_kernel();
36034 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36035 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36036 + pax_close_kernel();
36037 }
36038 lpfc_transport_template =
36039 fc_attach_transport(&lpfc_transport_functions);
36040 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
36041 index 2e1e54e..1af0a0d 100644
36042 --- a/drivers/scsi/lpfc/lpfc_scsi.c
36043 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
36044 @@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
36045 uint32_t evt_posted;
36046
36047 spin_lock_irqsave(&phba->hbalock, flags);
36048 - atomic_inc(&phba->num_rsrc_err);
36049 + atomic_inc_unchecked(&phba->num_rsrc_err);
36050 phba->last_rsrc_error_time = jiffies;
36051
36052 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36053 @@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
36054 unsigned long flags;
36055 struct lpfc_hba *phba = vport->phba;
36056 uint32_t evt_posted;
36057 - atomic_inc(&phba->num_cmd_success);
36058 + atomic_inc_unchecked(&phba->num_cmd_success);
36059
36060 if (vport->cfg_lun_queue_depth <= queue_depth)
36061 return;
36062 @@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36063 unsigned long num_rsrc_err, num_cmd_success;
36064 int i;
36065
36066 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36067 - num_cmd_success = atomic_read(&phba->num_cmd_success);
36068 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36069 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36070
36071 vports = lpfc_create_vport_work_array(phba);
36072 if (vports != NULL)
36073 @@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36074 }
36075 }
36076 lpfc_destroy_vport_work_array(phba, vports);
36077 - atomic_set(&phba->num_rsrc_err, 0);
36078 - atomic_set(&phba->num_cmd_success, 0);
36079 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36080 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36081 }
36082
36083 /**
36084 @@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
36085 }
36086 }
36087 lpfc_destroy_vport_work_array(phba, vports);
36088 - atomic_set(&phba->num_rsrc_err, 0);
36089 - atomic_set(&phba->num_cmd_success, 0);
36090 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36091 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36092 }
36093
36094 /**
36095 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
36096 index 5163edb..7b142bc 100644
36097 --- a/drivers/scsi/pmcraid.c
36098 +++ b/drivers/scsi/pmcraid.c
36099 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
36100 res->scsi_dev = scsi_dev;
36101 scsi_dev->hostdata = res;
36102 res->change_detected = 0;
36103 - atomic_set(&res->read_failures, 0);
36104 - atomic_set(&res->write_failures, 0);
36105 + atomic_set_unchecked(&res->read_failures, 0);
36106 + atomic_set_unchecked(&res->write_failures, 0);
36107 rc = 0;
36108 }
36109 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
36110 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
36111
36112 /* If this was a SCSI read/write command keep count of errors */
36113 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36114 - atomic_inc(&res->read_failures);
36115 + atomic_inc_unchecked(&res->read_failures);
36116 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36117 - atomic_inc(&res->write_failures);
36118 + atomic_inc_unchecked(&res->write_failures);
36119
36120 if (!RES_IS_GSCSI(res->cfg_entry) &&
36121 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
36122 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
36123 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36124 * hrrq_id assigned here in queuecommand
36125 */
36126 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36127 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36128 pinstance->num_hrrq;
36129 cmd->cmd_done = pmcraid_io_done;
36130
36131 @@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
36132 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36133 * hrrq_id assigned here in queuecommand
36134 */
36135 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36136 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36137 pinstance->num_hrrq;
36138
36139 if (request_size) {
36140 @@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
36141
36142 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36143 /* add resources only after host is added into system */
36144 - if (!atomic_read(&pinstance->expose_resources))
36145 + if (!atomic_read_unchecked(&pinstance->expose_resources))
36146 return;
36147
36148 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
36149 @@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
36150 init_waitqueue_head(&pinstance->reset_wait_q);
36151
36152 atomic_set(&pinstance->outstanding_cmds, 0);
36153 - atomic_set(&pinstance->last_message_id, 0);
36154 - atomic_set(&pinstance->expose_resources, 0);
36155 + atomic_set_unchecked(&pinstance->last_message_id, 0);
36156 + atomic_set_unchecked(&pinstance->expose_resources, 0);
36157
36158 INIT_LIST_HEAD(&pinstance->free_res_q);
36159 INIT_LIST_HEAD(&pinstance->used_res_q);
36160 @@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
36161 /* Schedule worker thread to handle CCN and take care of adding and
36162 * removing devices to OS
36163 */
36164 - atomic_set(&pinstance->expose_resources, 1);
36165 + atomic_set_unchecked(&pinstance->expose_resources, 1);
36166 schedule_work(&pinstance->worker_q);
36167 return rc;
36168
36169 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
36170 index ca496c7..9c791d5 100644
36171 --- a/drivers/scsi/pmcraid.h
36172 +++ b/drivers/scsi/pmcraid.h
36173 @@ -748,7 +748,7 @@ struct pmcraid_instance {
36174 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
36175
36176 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
36177 - atomic_t last_message_id;
36178 + atomic_unchecked_t last_message_id;
36179
36180 /* configuration table */
36181 struct pmcraid_config_table *cfg_table;
36182 @@ -777,7 +777,7 @@ struct pmcraid_instance {
36183 atomic_t outstanding_cmds;
36184
36185 /* should add/delete resources to mid-layer now ?*/
36186 - atomic_t expose_resources;
36187 + atomic_unchecked_t expose_resources;
36188
36189
36190
36191 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
36192 struct pmcraid_config_table_entry_ext cfg_entry_ext;
36193 };
36194 struct scsi_device *scsi_dev; /* Link scsi_device structure */
36195 - atomic_t read_failures; /* count of failed READ commands */
36196 - atomic_t write_failures; /* count of failed WRITE commands */
36197 + atomic_unchecked_t read_failures; /* count of failed READ commands */
36198 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
36199
36200 /* To indicate add/delete/modify during CCN */
36201 u8 change_detected;
36202 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
36203 index fcf052c..a8025a4 100644
36204 --- a/drivers/scsi/qla2xxx/qla_def.h
36205 +++ b/drivers/scsi/qla2xxx/qla_def.h
36206 @@ -2244,7 +2244,7 @@ struct isp_operations {
36207 int (*get_flash_version) (struct scsi_qla_host *, void *);
36208 int (*start_scsi) (srb_t *);
36209 int (*abort_isp) (struct scsi_qla_host *);
36210 -};
36211 +} __no_const;
36212
36213 /* MSI-X Support *************************************************************/
36214
36215 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
36216 index fd5edc6..4906148 100644
36217 --- a/drivers/scsi/qla4xxx/ql4_def.h
36218 +++ b/drivers/scsi/qla4xxx/ql4_def.h
36219 @@ -258,7 +258,7 @@ struct ddb_entry {
36220 * (4000 only) */
36221 atomic_t relogin_timer; /* Max Time to wait for
36222 * relogin to complete */
36223 - atomic_t relogin_retry_count; /* Num of times relogin has been
36224 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
36225 * retried */
36226 uint32_t default_time2wait; /* Default Min time between
36227 * relogins (+aens) */
36228 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
36229 index 4169c8b..a8b896b 100644
36230 --- a/drivers/scsi/qla4xxx/ql4_os.c
36231 +++ b/drivers/scsi/qla4xxx/ql4_os.c
36232 @@ -2104,12 +2104,12 @@ void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
36233 */
36234 if (!iscsi_is_session_online(cls_sess)) {
36235 /* Reset retry relogin timer */
36236 - atomic_inc(&ddb_entry->relogin_retry_count);
36237 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
36238 DEBUG2(ql4_printk(KERN_INFO, ha,
36239 "%s: index[%d] relogin timed out-retrying"
36240 " relogin (%d), retry (%d)\n", __func__,
36241 ddb_entry->fw_ddb_index,
36242 - atomic_read(&ddb_entry->relogin_retry_count),
36243 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
36244 ddb_entry->default_time2wait + 4));
36245 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
36246 atomic_set(&ddb_entry->retry_relogin_timer,
36247 @@ -3835,7 +3835,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
36248
36249 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
36250 atomic_set(&ddb_entry->relogin_timer, 0);
36251 - atomic_set(&ddb_entry->relogin_retry_count, 0);
36252 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36253
36254 ddb_entry->default_relogin_timeout =
36255 le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
36256 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
36257 index 2aeb2e9..46e3925 100644
36258 --- a/drivers/scsi/scsi.c
36259 +++ b/drivers/scsi/scsi.c
36260 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
36261 unsigned long timeout;
36262 int rtn = 0;
36263
36264 - atomic_inc(&cmd->device->iorequest_cnt);
36265 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36266
36267 /* check if the device is still usable */
36268 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
36269 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
36270 index f85cfa6..a57c9e8 100644
36271 --- a/drivers/scsi/scsi_lib.c
36272 +++ b/drivers/scsi/scsi_lib.c
36273 @@ -1416,7 +1416,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
36274 shost = sdev->host;
36275 scsi_init_cmd_errh(cmd);
36276 cmd->result = DID_NO_CONNECT << 16;
36277 - atomic_inc(&cmd->device->iorequest_cnt);
36278 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36279
36280 /*
36281 * SCSI request completion path will do scsi_device_unbusy(),
36282 @@ -1442,9 +1442,9 @@ static void scsi_softirq_done(struct request *rq)
36283
36284 INIT_LIST_HEAD(&cmd->eh_entry);
36285
36286 - atomic_inc(&cmd->device->iodone_cnt);
36287 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
36288 if (cmd->result)
36289 - atomic_inc(&cmd->device->ioerr_cnt);
36290 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
36291
36292 disposition = scsi_decide_disposition(cmd);
36293 if (disposition != SUCCESS &&
36294 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
36295 index 04c2a27..9d8bd66 100644
36296 --- a/drivers/scsi/scsi_sysfs.c
36297 +++ b/drivers/scsi/scsi_sysfs.c
36298 @@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
36299 char *buf) \
36300 { \
36301 struct scsi_device *sdev = to_scsi_device(dev); \
36302 - unsigned long long count = atomic_read(&sdev->field); \
36303 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
36304 return snprintf(buf, 20, "0x%llx\n", count); \
36305 } \
36306 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
36307 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
36308 index 84a1fdf..693b0d6 100644
36309 --- a/drivers/scsi/scsi_tgt_lib.c
36310 +++ b/drivers/scsi/scsi_tgt_lib.c
36311 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
36312 int err;
36313
36314 dprintk("%lx %u\n", uaddr, len);
36315 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
36316 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
36317 if (err) {
36318 /*
36319 * TODO: need to fixup sg_tablesize, max_segment_size,
36320 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
36321 index 1b21491..1b7f60e 100644
36322 --- a/drivers/scsi/scsi_transport_fc.c
36323 +++ b/drivers/scsi/scsi_transport_fc.c
36324 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
36325 * Netlink Infrastructure
36326 */
36327
36328 -static atomic_t fc_event_seq;
36329 +static atomic_unchecked_t fc_event_seq;
36330
36331 /**
36332 * fc_get_event_number - Obtain the next sequential FC event number
36333 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
36334 u32
36335 fc_get_event_number(void)
36336 {
36337 - return atomic_add_return(1, &fc_event_seq);
36338 + return atomic_add_return_unchecked(1, &fc_event_seq);
36339 }
36340 EXPORT_SYMBOL(fc_get_event_number);
36341
36342 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
36343 {
36344 int error;
36345
36346 - atomic_set(&fc_event_seq, 0);
36347 + atomic_set_unchecked(&fc_event_seq, 0);
36348
36349 error = transport_class_register(&fc_host_class);
36350 if (error)
36351 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
36352 char *cp;
36353
36354 *val = simple_strtoul(buf, &cp, 0);
36355 - if ((*cp && (*cp != '\n')) || (*val < 0))
36356 + if (*cp && (*cp != '\n'))
36357 return -EINVAL;
36358 /*
36359 * Check for overflow; dev_loss_tmo is u32
36360 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
36361 index 96029e6..4d77fa0 100644
36362 --- a/drivers/scsi/scsi_transport_iscsi.c
36363 +++ b/drivers/scsi/scsi_transport_iscsi.c
36364 @@ -79,7 +79,7 @@ struct iscsi_internal {
36365 struct transport_container session_cont;
36366 };
36367
36368 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
36369 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
36370 static struct workqueue_struct *iscsi_eh_timer_workq;
36371
36372 static DEFINE_IDA(iscsi_sess_ida);
36373 @@ -1062,7 +1062,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
36374 int err;
36375
36376 ihost = shost->shost_data;
36377 - session->sid = atomic_add_return(1, &iscsi_session_nr);
36378 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
36379
36380 if (target_id == ISCSI_MAX_TARGET) {
36381 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
36382 @@ -2663,7 +2663,7 @@ static __init int iscsi_transport_init(void)
36383 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
36384 ISCSI_TRANSPORT_VERSION);
36385
36386 - atomic_set(&iscsi_session_nr, 0);
36387 + atomic_set_unchecked(&iscsi_session_nr, 0);
36388
36389 err = class_register(&iscsi_transport_class);
36390 if (err)
36391 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
36392 index 21a045e..ec89e03 100644
36393 --- a/drivers/scsi/scsi_transport_srp.c
36394 +++ b/drivers/scsi/scsi_transport_srp.c
36395 @@ -33,7 +33,7 @@
36396 #include "scsi_transport_srp_internal.h"
36397
36398 struct srp_host_attrs {
36399 - atomic_t next_port_id;
36400 + atomic_unchecked_t next_port_id;
36401 };
36402 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
36403
36404 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
36405 struct Scsi_Host *shost = dev_to_shost(dev);
36406 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
36407
36408 - atomic_set(&srp_host->next_port_id, 0);
36409 + atomic_set_unchecked(&srp_host->next_port_id, 0);
36410 return 0;
36411 }
36412
36413 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
36414 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
36415 rport->roles = ids->roles;
36416
36417 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
36418 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
36419 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
36420
36421 transport_setup_device(&rport->dev);
36422 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
36423 index 441a1c5..07cece7 100644
36424 --- a/drivers/scsi/sg.c
36425 +++ b/drivers/scsi/sg.c
36426 @@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
36427 sdp->disk->disk_name,
36428 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
36429 NULL,
36430 - (char *)arg);
36431 + (char __user *)arg);
36432 case BLKTRACESTART:
36433 return blk_trace_startstop(sdp->device->request_queue, 1);
36434 case BLKTRACESTOP:
36435 @@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
36436 const struct file_operations * fops;
36437 };
36438
36439 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
36440 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
36441 {"allow_dio", &adio_fops},
36442 {"debug", &debug_fops},
36443 {"def_reserved_size", &dressz_fops},
36444 @@ -2327,7 +2327,7 @@ sg_proc_init(void)
36445 {
36446 int k, mask;
36447 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
36448 - struct sg_proc_leaf * leaf;
36449 + const struct sg_proc_leaf * leaf;
36450
36451 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
36452 if (!sg_proc_sgp)
36453 diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
36454 index f64250e..1ee3049 100644
36455 --- a/drivers/spi/spi-dw-pci.c
36456 +++ b/drivers/spi/spi-dw-pci.c
36457 @@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
36458 #define spi_resume NULL
36459 #endif
36460
36461 -static const struct pci_device_id pci_ids[] __devinitdata = {
36462 +static const struct pci_device_id pci_ids[] __devinitconst = {
36463 /* Intel MID platform SPI controller 0 */
36464 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
36465 {},
36466 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
36467 index 77eae99..b7cdcc9 100644
36468 --- a/drivers/spi/spi.c
36469 +++ b/drivers/spi/spi.c
36470 @@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
36471 EXPORT_SYMBOL_GPL(spi_bus_unlock);
36472
36473 /* portable code must never pass more than 32 bytes */
36474 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
36475 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
36476
36477 static u8 *buf;
36478
36479 diff --git a/drivers/staging/gma500/power.c b/drivers/staging/gma500/power.c
36480 index 436fe97..4082570 100644
36481 --- a/drivers/staging/gma500/power.c
36482 +++ b/drivers/staging/gma500/power.c
36483 @@ -266,7 +266,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
36484 ret = gma_resume_pci(dev->pdev);
36485 if (ret == 0) {
36486 /* FIXME: we want to defer this for Medfield/Oaktrail */
36487 - gma_resume_display(dev);
36488 + gma_resume_display(dev->pdev);
36489 psb_irq_preinstall(dev);
36490 psb_irq_postinstall(dev);
36491 pm_runtime_get(&dev->pdev->dev);
36492 diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
36493 index bafccb3..e3ac78d 100644
36494 --- a/drivers/staging/hv/rndis_filter.c
36495 +++ b/drivers/staging/hv/rndis_filter.c
36496 @@ -42,7 +42,7 @@ struct rndis_device {
36497
36498 enum rndis_device_state state;
36499 bool link_state;
36500 - atomic_t new_req_id;
36501 + atomic_unchecked_t new_req_id;
36502
36503 spinlock_t request_lock;
36504 struct list_head req_list;
36505 @@ -116,7 +116,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
36506 * template
36507 */
36508 set = &rndis_msg->msg.set_req;
36509 - set->req_id = atomic_inc_return(&dev->new_req_id);
36510 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
36511
36512 /* Add to the request list */
36513 spin_lock_irqsave(&dev->request_lock, flags);
36514 @@ -646,7 +646,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
36515
36516 /* Setup the rndis set */
36517 halt = &request->request_msg.msg.halt_req;
36518 - halt->req_id = atomic_inc_return(&dev->new_req_id);
36519 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
36520
36521 /* Ignore return since this msg is optional. */
36522 rndis_filter_send_request(dev, request);
36523 diff --git a/drivers/staging/iio/buffer_generic.h b/drivers/staging/iio/buffer_generic.h
36524 index 9e8f010..af9efb56 100644
36525 --- a/drivers/staging/iio/buffer_generic.h
36526 +++ b/drivers/staging/iio/buffer_generic.h
36527 @@ -64,7 +64,7 @@ struct iio_buffer_access_funcs {
36528
36529 int (*is_enabled)(struct iio_buffer *buffer);
36530 int (*enable)(struct iio_buffer *buffer);
36531 -};
36532 +} __no_const;
36533
36534 /**
36535 * struct iio_buffer_setup_ops - buffer setup related callbacks
36536 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
36537 index 8b307b4..a97ac91 100644
36538 --- a/drivers/staging/octeon/ethernet-rx.c
36539 +++ b/drivers/staging/octeon/ethernet-rx.c
36540 @@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
36541 /* Increment RX stats for virtual ports */
36542 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
36543 #ifdef CONFIG_64BIT
36544 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
36545 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
36546 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
36547 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
36548 #else
36549 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
36550 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
36551 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
36552 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
36553 #endif
36554 }
36555 netif_receive_skb(skb);
36556 @@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
36557 dev->name);
36558 */
36559 #ifdef CONFIG_64BIT
36560 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
36561 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
36562 #else
36563 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
36564 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
36565 #endif
36566 dev_kfree_skb_irq(skb);
36567 }
36568 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
36569 index 076f866..2308070 100644
36570 --- a/drivers/staging/octeon/ethernet.c
36571 +++ b/drivers/staging/octeon/ethernet.c
36572 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
36573 * since the RX tasklet also increments it.
36574 */
36575 #ifdef CONFIG_64BIT
36576 - atomic64_add(rx_status.dropped_packets,
36577 - (atomic64_t *)&priv->stats.rx_dropped);
36578 + atomic64_add_unchecked(rx_status.dropped_packets,
36579 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
36580 #else
36581 - atomic_add(rx_status.dropped_packets,
36582 - (atomic_t *)&priv->stats.rx_dropped);
36583 + atomic_add_unchecked(rx_status.dropped_packets,
36584 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
36585 #endif
36586 }
36587
36588 diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
36589 index 7a19555..466456d 100644
36590 --- a/drivers/staging/pohmelfs/inode.c
36591 +++ b/drivers/staging/pohmelfs/inode.c
36592 @@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
36593 mutex_init(&psb->mcache_lock);
36594 psb->mcache_root = RB_ROOT;
36595 psb->mcache_timeout = msecs_to_jiffies(5000);
36596 - atomic_long_set(&psb->mcache_gen, 0);
36597 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
36598
36599 psb->trans_max_pages = 100;
36600
36601 @@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
36602 INIT_LIST_HEAD(&psb->crypto_ready_list);
36603 INIT_LIST_HEAD(&psb->crypto_active_list);
36604
36605 - atomic_set(&psb->trans_gen, 1);
36606 + atomic_set_unchecked(&psb->trans_gen, 1);
36607 atomic_long_set(&psb->total_inodes, 0);
36608
36609 mutex_init(&psb->state_lock);
36610 diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
36611 index e22665c..a2a9390 100644
36612 --- a/drivers/staging/pohmelfs/mcache.c
36613 +++ b/drivers/staging/pohmelfs/mcache.c
36614 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
36615 m->data = data;
36616 m->start = start;
36617 m->size = size;
36618 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
36619 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
36620
36621 mutex_lock(&psb->mcache_lock);
36622 err = pohmelfs_mcache_insert(psb, m);
36623 diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
36624 index 985b6b7..7699e05 100644
36625 --- a/drivers/staging/pohmelfs/netfs.h
36626 +++ b/drivers/staging/pohmelfs/netfs.h
36627 @@ -571,14 +571,14 @@ struct pohmelfs_config;
36628 struct pohmelfs_sb {
36629 struct rb_root mcache_root;
36630 struct mutex mcache_lock;
36631 - atomic_long_t mcache_gen;
36632 + atomic_long_unchecked_t mcache_gen;
36633 unsigned long mcache_timeout;
36634
36635 unsigned int idx;
36636
36637 unsigned int trans_retries;
36638
36639 - atomic_t trans_gen;
36640 + atomic_unchecked_t trans_gen;
36641
36642 unsigned int crypto_attached_size;
36643 unsigned int crypto_align_size;
36644 diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
36645 index 06c1a74..866eebc 100644
36646 --- a/drivers/staging/pohmelfs/trans.c
36647 +++ b/drivers/staging/pohmelfs/trans.c
36648 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
36649 int err;
36650 struct netfs_cmd *cmd = t->iovec.iov_base;
36651
36652 - t->gen = atomic_inc_return(&psb->trans_gen);
36653 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
36654
36655 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
36656 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
36657 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
36658 index 86308a0..feaa925 100644
36659 --- a/drivers/staging/rtl8712/rtl871x_io.h
36660 +++ b/drivers/staging/rtl8712/rtl871x_io.h
36661 @@ -108,7 +108,7 @@ struct _io_ops {
36662 u8 *pmem);
36663 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
36664 u8 *pmem);
36665 -};
36666 +} __no_const;
36667
36668 struct io_req {
36669 struct list_head list;
36670 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
36671 index c7b5e8b..783d6cb 100644
36672 --- a/drivers/staging/sbe-2t3e3/netdev.c
36673 +++ b/drivers/staging/sbe-2t3e3/netdev.c
36674 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
36675 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
36676
36677 if (rlen)
36678 - if (copy_to_user(data, &resp, rlen))
36679 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
36680 return -EFAULT;
36681
36682 return 0;
36683 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
36684 index be21617..0954e45 100644
36685 --- a/drivers/staging/usbip/usbip_common.h
36686 +++ b/drivers/staging/usbip/usbip_common.h
36687 @@ -289,7 +289,7 @@ struct usbip_device {
36688 void (*shutdown)(struct usbip_device *);
36689 void (*reset)(struct usbip_device *);
36690 void (*unusable)(struct usbip_device *);
36691 - } eh_ops;
36692 + } __no_const eh_ops;
36693 };
36694
36695 #if 0
36696 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
36697 index 88b3298..3783eee 100644
36698 --- a/drivers/staging/usbip/vhci.h
36699 +++ b/drivers/staging/usbip/vhci.h
36700 @@ -88,7 +88,7 @@ struct vhci_hcd {
36701 unsigned resuming:1;
36702 unsigned long re_timeout;
36703
36704 - atomic_t seqnum;
36705 + atomic_unchecked_t seqnum;
36706
36707 /*
36708 * NOTE:
36709 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
36710 index 2ee97e2..0420b86 100644
36711 --- a/drivers/staging/usbip/vhci_hcd.c
36712 +++ b/drivers/staging/usbip/vhci_hcd.c
36713 @@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
36714 return;
36715 }
36716
36717 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
36718 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
36719 if (priv->seqnum == 0xffff)
36720 dev_info(&urb->dev->dev, "seqnum max\n");
36721
36722 @@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
36723 return -ENOMEM;
36724 }
36725
36726 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
36727 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
36728 if (unlink->seqnum == 0xffff)
36729 pr_info("seqnum max\n");
36730
36731 @@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
36732 vdev->rhport = rhport;
36733 }
36734
36735 - atomic_set(&vhci->seqnum, 0);
36736 + atomic_set_unchecked(&vhci->seqnum, 0);
36737 spin_lock_init(&vhci->lock);
36738
36739 hcd->power_budget = 0; /* no limit */
36740 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
36741 index 3872b8c..fe6d2f4 100644
36742 --- a/drivers/staging/usbip/vhci_rx.c
36743 +++ b/drivers/staging/usbip/vhci_rx.c
36744 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
36745 if (!urb) {
36746 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
36747 pr_info("max seqnum %d\n",
36748 - atomic_read(&the_controller->seqnum));
36749 + atomic_read_unchecked(&the_controller->seqnum));
36750 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
36751 return;
36752 }
36753 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
36754 index 7735027..30eed13 100644
36755 --- a/drivers/staging/vt6655/hostap.c
36756 +++ b/drivers/staging/vt6655/hostap.c
36757 @@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
36758 *
36759 */
36760
36761 +static net_device_ops_no_const apdev_netdev_ops;
36762 +
36763 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
36764 {
36765 PSDevice apdev_priv;
36766 struct net_device *dev = pDevice->dev;
36767 int ret;
36768 - const struct net_device_ops apdev_netdev_ops = {
36769 - .ndo_start_xmit = pDevice->tx_80211,
36770 - };
36771
36772 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
36773
36774 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
36775 *apdev_priv = *pDevice;
36776 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
36777
36778 + /* only half broken now */
36779 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
36780 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
36781
36782 pDevice->apdev->type = ARPHRD_IEEE80211;
36783 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
36784 index 51b5adf..098e320 100644
36785 --- a/drivers/staging/vt6656/hostap.c
36786 +++ b/drivers/staging/vt6656/hostap.c
36787 @@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
36788 *
36789 */
36790
36791 +static net_device_ops_no_const apdev_netdev_ops;
36792 +
36793 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
36794 {
36795 PSDevice apdev_priv;
36796 struct net_device *dev = pDevice->dev;
36797 int ret;
36798 - const struct net_device_ops apdev_netdev_ops = {
36799 - .ndo_start_xmit = pDevice->tx_80211,
36800 - };
36801
36802 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
36803
36804 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
36805 *apdev_priv = *pDevice;
36806 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
36807
36808 + /* only half broken now */
36809 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
36810 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
36811
36812 pDevice->apdev->type = ARPHRD_IEEE80211;
36813 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
36814 index 7843dfd..3db105f 100644
36815 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
36816 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
36817 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
36818
36819 struct usbctlx_completor {
36820 int (*complete) (struct usbctlx_completor *);
36821 -};
36822 +} __no_const;
36823
36824 static int
36825 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
36826 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
36827 index 1ca66ea..76f1343 100644
36828 --- a/drivers/staging/zcache/tmem.c
36829 +++ b/drivers/staging/zcache/tmem.c
36830 @@ -39,7 +39,7 @@
36831 * A tmem host implementation must use this function to register callbacks
36832 * for memory allocation.
36833 */
36834 -static struct tmem_hostops tmem_hostops;
36835 +static tmem_hostops_no_const tmem_hostops;
36836
36837 static void tmem_objnode_tree_init(void);
36838
36839 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
36840 * A tmem host implementation must use this function to register
36841 * callbacks for a page-accessible memory (PAM) implementation
36842 */
36843 -static struct tmem_pamops tmem_pamops;
36844 +static tmem_pamops_no_const tmem_pamops;
36845
36846 void tmem_register_pamops(struct tmem_pamops *m)
36847 {
36848 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
36849 index ed147c4..94fc3c6 100644
36850 --- a/drivers/staging/zcache/tmem.h
36851 +++ b/drivers/staging/zcache/tmem.h
36852 @@ -180,6 +180,7 @@ struct tmem_pamops {
36853 void (*new_obj)(struct tmem_obj *);
36854 int (*replace_in_obj)(void *, struct tmem_obj *);
36855 };
36856 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
36857 extern void tmem_register_pamops(struct tmem_pamops *m);
36858
36859 /* memory allocation methods provided by the host implementation */
36860 @@ -189,6 +190,7 @@ struct tmem_hostops {
36861 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
36862 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
36863 };
36864 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
36865 extern void tmem_register_hostops(struct tmem_hostops *m);
36866
36867 /* core tmem accessor functions */
36868 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
36869 index 0c1d5c73..88e90a8 100644
36870 --- a/drivers/target/iscsi/iscsi_target.c
36871 +++ b/drivers/target/iscsi/iscsi_target.c
36872 @@ -1364,7 +1364,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
36873 * outstanding_r2ts reaches zero, go ahead and send the delayed
36874 * TASK_ABORTED status.
36875 */
36876 - if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
36877 + if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
36878 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
36879 if (--cmd->outstanding_r2ts < 1) {
36880 iscsit_stop_dataout_timer(cmd);
36881 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
36882 index 6845228..df77141 100644
36883 --- a/drivers/target/target_core_tmr.c
36884 +++ b/drivers/target/target_core_tmr.c
36885 @@ -250,7 +250,7 @@ static void core_tmr_drain_task_list(
36886 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
36887 cmd->t_task_list_num,
36888 atomic_read(&cmd->t_task_cdbs_left),
36889 - atomic_read(&cmd->t_task_cdbs_sent),
36890 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
36891 atomic_read(&cmd->t_transport_active),
36892 atomic_read(&cmd->t_transport_stop),
36893 atomic_read(&cmd->t_transport_sent));
36894 @@ -281,7 +281,7 @@ static void core_tmr_drain_task_list(
36895 pr_debug("LUN_RESET: got t_transport_active = 1 for"
36896 " task: %p, t_fe_count: %d dev: %p\n", task,
36897 fe_count, dev);
36898 - atomic_set(&cmd->t_transport_aborted, 1);
36899 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
36900 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
36901
36902 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
36903 @@ -289,7 +289,7 @@ static void core_tmr_drain_task_list(
36904 }
36905 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
36906 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
36907 - atomic_set(&cmd->t_transport_aborted, 1);
36908 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
36909 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
36910
36911 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
36912 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
36913 index e4ddb93..2fc6e0f 100644
36914 --- a/drivers/target/target_core_transport.c
36915 +++ b/drivers/target/target_core_transport.c
36916 @@ -1343,7 +1343,7 @@ struct se_device *transport_add_device_to_core_hba(
36917
36918 dev->queue_depth = dev_limits->queue_depth;
36919 atomic_set(&dev->depth_left, dev->queue_depth);
36920 - atomic_set(&dev->dev_ordered_id, 0);
36921 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
36922
36923 se_dev_set_default_attribs(dev, dev_limits);
36924
36925 @@ -1530,7 +1530,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
36926 * Used to determine when ORDERED commands should go from
36927 * Dormant to Active status.
36928 */
36929 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
36930 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
36931 smp_mb__after_atomic_inc();
36932 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
36933 cmd->se_ordered_id, cmd->sam_task_attr,
36934 @@ -1800,7 +1800,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
36935 " t_transport_active: %d t_transport_stop: %d"
36936 " t_transport_sent: %d\n", cmd->t_task_list_num,
36937 atomic_read(&cmd->t_task_cdbs_left),
36938 - atomic_read(&cmd->t_task_cdbs_sent),
36939 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
36940 atomic_read(&cmd->t_task_cdbs_ex_left),
36941 atomic_read(&cmd->t_transport_active),
36942 atomic_read(&cmd->t_transport_stop),
36943 @@ -2089,9 +2089,9 @@ check_depth:
36944
36945 spin_lock_irqsave(&cmd->t_state_lock, flags);
36946 task->task_flags |= (TF_ACTIVE | TF_SENT);
36947 - atomic_inc(&cmd->t_task_cdbs_sent);
36948 + atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
36949
36950 - if (atomic_read(&cmd->t_task_cdbs_sent) ==
36951 + if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
36952 cmd->t_task_list_num)
36953 atomic_set(&cmd->t_transport_sent, 1);
36954
36955 @@ -4296,7 +4296,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
36956 atomic_set(&cmd->transport_lun_stop, 0);
36957 }
36958 if (!atomic_read(&cmd->t_transport_active) ||
36959 - atomic_read(&cmd->t_transport_aborted)) {
36960 + atomic_read_unchecked(&cmd->t_transport_aborted)) {
36961 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
36962 return false;
36963 }
36964 @@ -4545,7 +4545,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
36965 {
36966 int ret = 0;
36967
36968 - if (atomic_read(&cmd->t_transport_aborted) != 0) {
36969 + if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
36970 if (!send_status ||
36971 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
36972 return 1;
36973 @@ -4582,7 +4582,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
36974 */
36975 if (cmd->data_direction == DMA_TO_DEVICE) {
36976 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
36977 - atomic_inc(&cmd->t_transport_aborted);
36978 + atomic_inc_unchecked(&cmd->t_transport_aborted);
36979 smp_mb__after_atomic_inc();
36980 }
36981 }
36982 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
36983 index b9040be..e3f5aab 100644
36984 --- a/drivers/tty/hvc/hvcs.c
36985 +++ b/drivers/tty/hvc/hvcs.c
36986 @@ -83,6 +83,7 @@
36987 #include <asm/hvcserver.h>
36988 #include <asm/uaccess.h>
36989 #include <asm/vio.h>
36990 +#include <asm/local.h>
36991
36992 /*
36993 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
36994 @@ -270,7 +271,7 @@ struct hvcs_struct {
36995 unsigned int index;
36996
36997 struct tty_struct *tty;
36998 - int open_count;
36999 + local_t open_count;
37000
37001 /*
37002 * Used to tell the driver kernel_thread what operations need to take
37003 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
37004
37005 spin_lock_irqsave(&hvcsd->lock, flags);
37006
37007 - if (hvcsd->open_count > 0) {
37008 + if (local_read(&hvcsd->open_count) > 0) {
37009 spin_unlock_irqrestore(&hvcsd->lock, flags);
37010 printk(KERN_INFO "HVCS: vterm state unchanged. "
37011 "The hvcs device node is still in use.\n");
37012 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
37013 if ((retval = hvcs_partner_connect(hvcsd)))
37014 goto error_release;
37015
37016 - hvcsd->open_count = 1;
37017 + local_set(&hvcsd->open_count, 1);
37018 hvcsd->tty = tty;
37019 tty->driver_data = hvcsd;
37020
37021 @@ -1179,7 +1180,7 @@ fast_open:
37022
37023 spin_lock_irqsave(&hvcsd->lock, flags);
37024 kref_get(&hvcsd->kref);
37025 - hvcsd->open_count++;
37026 + local_inc(&hvcsd->open_count);
37027 hvcsd->todo_mask |= HVCS_SCHED_READ;
37028 spin_unlock_irqrestore(&hvcsd->lock, flags);
37029
37030 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37031 hvcsd = tty->driver_data;
37032
37033 spin_lock_irqsave(&hvcsd->lock, flags);
37034 - if (--hvcsd->open_count == 0) {
37035 + if (local_dec_and_test(&hvcsd->open_count)) {
37036
37037 vio_disable_interrupts(hvcsd->vdev);
37038
37039 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37040 free_irq(irq, hvcsd);
37041 kref_put(&hvcsd->kref, destroy_hvcs_struct);
37042 return;
37043 - } else if (hvcsd->open_count < 0) {
37044 + } else if (local_read(&hvcsd->open_count) < 0) {
37045 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
37046 " is missmanaged.\n",
37047 - hvcsd->vdev->unit_address, hvcsd->open_count);
37048 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
37049 }
37050
37051 spin_unlock_irqrestore(&hvcsd->lock, flags);
37052 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37053
37054 spin_lock_irqsave(&hvcsd->lock, flags);
37055 /* Preserve this so that we know how many kref refs to put */
37056 - temp_open_count = hvcsd->open_count;
37057 + temp_open_count = local_read(&hvcsd->open_count);
37058
37059 /*
37060 * Don't kref put inside the spinlock because the destruction
37061 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37062 hvcsd->tty->driver_data = NULL;
37063 hvcsd->tty = NULL;
37064
37065 - hvcsd->open_count = 0;
37066 + local_set(&hvcsd->open_count, 0);
37067
37068 /* This will drop any buffered data on the floor which is OK in a hangup
37069 * scenario. */
37070 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
37071 * the middle of a write operation? This is a crummy place to do this
37072 * but we want to keep it all in the spinlock.
37073 */
37074 - if (hvcsd->open_count <= 0) {
37075 + if (local_read(&hvcsd->open_count) <= 0) {
37076 spin_unlock_irqrestore(&hvcsd->lock, flags);
37077 return -ENODEV;
37078 }
37079 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
37080 {
37081 struct hvcs_struct *hvcsd = tty->driver_data;
37082
37083 - if (!hvcsd || hvcsd->open_count <= 0)
37084 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
37085 return 0;
37086
37087 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
37088 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
37089 index ef92869..f4ebd88 100644
37090 --- a/drivers/tty/ipwireless/tty.c
37091 +++ b/drivers/tty/ipwireless/tty.c
37092 @@ -29,6 +29,7 @@
37093 #include <linux/tty_driver.h>
37094 #include <linux/tty_flip.h>
37095 #include <linux/uaccess.h>
37096 +#include <asm/local.h>
37097
37098 #include "tty.h"
37099 #include "network.h"
37100 @@ -51,7 +52,7 @@ struct ipw_tty {
37101 int tty_type;
37102 struct ipw_network *network;
37103 struct tty_struct *linux_tty;
37104 - int open_count;
37105 + local_t open_count;
37106 unsigned int control_lines;
37107 struct mutex ipw_tty_mutex;
37108 int tx_bytes_queued;
37109 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37110 mutex_unlock(&tty->ipw_tty_mutex);
37111 return -ENODEV;
37112 }
37113 - if (tty->open_count == 0)
37114 + if (local_read(&tty->open_count) == 0)
37115 tty->tx_bytes_queued = 0;
37116
37117 - tty->open_count++;
37118 + local_inc(&tty->open_count);
37119
37120 tty->linux_tty = linux_tty;
37121 linux_tty->driver_data = tty;
37122 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37123
37124 static void do_ipw_close(struct ipw_tty *tty)
37125 {
37126 - tty->open_count--;
37127 -
37128 - if (tty->open_count == 0) {
37129 + if (local_dec_return(&tty->open_count) == 0) {
37130 struct tty_struct *linux_tty = tty->linux_tty;
37131
37132 if (linux_tty != NULL) {
37133 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
37134 return;
37135
37136 mutex_lock(&tty->ipw_tty_mutex);
37137 - if (tty->open_count == 0) {
37138 + if (local_read(&tty->open_count) == 0) {
37139 mutex_unlock(&tty->ipw_tty_mutex);
37140 return;
37141 }
37142 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
37143 return;
37144 }
37145
37146 - if (!tty->open_count) {
37147 + if (!local_read(&tty->open_count)) {
37148 mutex_unlock(&tty->ipw_tty_mutex);
37149 return;
37150 }
37151 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
37152 return -ENODEV;
37153
37154 mutex_lock(&tty->ipw_tty_mutex);
37155 - if (!tty->open_count) {
37156 + if (!local_read(&tty->open_count)) {
37157 mutex_unlock(&tty->ipw_tty_mutex);
37158 return -EINVAL;
37159 }
37160 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
37161 if (!tty)
37162 return -ENODEV;
37163
37164 - if (!tty->open_count)
37165 + if (!local_read(&tty->open_count))
37166 return -EINVAL;
37167
37168 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
37169 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
37170 if (!tty)
37171 return 0;
37172
37173 - if (!tty->open_count)
37174 + if (!local_read(&tty->open_count))
37175 return 0;
37176
37177 return tty->tx_bytes_queued;
37178 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
37179 if (!tty)
37180 return -ENODEV;
37181
37182 - if (!tty->open_count)
37183 + if (!local_read(&tty->open_count))
37184 return -EINVAL;
37185
37186 return get_control_lines(tty);
37187 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
37188 if (!tty)
37189 return -ENODEV;
37190
37191 - if (!tty->open_count)
37192 + if (!local_read(&tty->open_count))
37193 return -EINVAL;
37194
37195 return set_control_lines(tty, set, clear);
37196 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
37197 if (!tty)
37198 return -ENODEV;
37199
37200 - if (!tty->open_count)
37201 + if (!local_read(&tty->open_count))
37202 return -EINVAL;
37203
37204 /* FIXME: Exactly how is the tty object locked here .. */
37205 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
37206 against a parallel ioctl etc */
37207 mutex_lock(&ttyj->ipw_tty_mutex);
37208 }
37209 - while (ttyj->open_count)
37210 + while (local_read(&ttyj->open_count))
37211 do_ipw_close(ttyj);
37212 ipwireless_disassociate_network_ttys(network,
37213 ttyj->channel_idx);
37214 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
37215 index fc7bbba..9527e93 100644
37216 --- a/drivers/tty/n_gsm.c
37217 +++ b/drivers/tty/n_gsm.c
37218 @@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
37219 kref_init(&dlci->ref);
37220 mutex_init(&dlci->mutex);
37221 dlci->fifo = &dlci->_fifo;
37222 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
37223 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
37224 kfree(dlci);
37225 return NULL;
37226 }
37227 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
37228 index 39d6ab6..eb97f41 100644
37229 --- a/drivers/tty/n_tty.c
37230 +++ b/drivers/tty/n_tty.c
37231 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
37232 {
37233 *ops = tty_ldisc_N_TTY;
37234 ops->owner = NULL;
37235 - ops->refcount = ops->flags = 0;
37236 + atomic_set(&ops->refcount, 0);
37237 + ops->flags = 0;
37238 }
37239 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
37240 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
37241 index e18604b..a7d5a11 100644
37242 --- a/drivers/tty/pty.c
37243 +++ b/drivers/tty/pty.c
37244 @@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
37245 register_sysctl_table(pty_root_table);
37246
37247 /* Now create the /dev/ptmx special device */
37248 + pax_open_kernel();
37249 tty_default_fops(&ptmx_fops);
37250 - ptmx_fops.open = ptmx_open;
37251 + *(void **)&ptmx_fops.open = ptmx_open;
37252 + pax_close_kernel();
37253
37254 cdev_init(&ptmx_cdev, &ptmx_fops);
37255 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
37256 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
37257 index 2b42a01..32a2ed3 100644
37258 --- a/drivers/tty/serial/kgdboc.c
37259 +++ b/drivers/tty/serial/kgdboc.c
37260 @@ -24,8 +24,9 @@
37261 #define MAX_CONFIG_LEN 40
37262
37263 static struct kgdb_io kgdboc_io_ops;
37264 +static struct kgdb_io kgdboc_io_ops_console;
37265
37266 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37267 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
37268 static int configured = -1;
37269
37270 static char config[MAX_CONFIG_LEN];
37271 @@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
37272 kgdboc_unregister_kbd();
37273 if (configured == 1)
37274 kgdb_unregister_io_module(&kgdboc_io_ops);
37275 + else if (configured == 2)
37276 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
37277 }
37278
37279 static int configure_kgdboc(void)
37280 @@ -157,13 +160,13 @@ static int configure_kgdboc(void)
37281 int err;
37282 char *cptr = config;
37283 struct console *cons;
37284 + int is_console = 0;
37285
37286 err = kgdboc_option_setup(config);
37287 if (err || !strlen(config) || isspace(config[0]))
37288 goto noconfig;
37289
37290 err = -ENODEV;
37291 - kgdboc_io_ops.is_console = 0;
37292 kgdb_tty_driver = NULL;
37293
37294 kgdboc_use_kms = 0;
37295 @@ -184,7 +187,7 @@ static int configure_kgdboc(void)
37296 int idx;
37297 if (cons->device && cons->device(cons, &idx) == p &&
37298 idx == tty_line) {
37299 - kgdboc_io_ops.is_console = 1;
37300 + is_console = 1;
37301 break;
37302 }
37303 cons = cons->next;
37304 @@ -194,12 +197,16 @@ static int configure_kgdboc(void)
37305 kgdb_tty_line = tty_line;
37306
37307 do_register:
37308 - err = kgdb_register_io_module(&kgdboc_io_ops);
37309 + if (is_console) {
37310 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
37311 + configured = 2;
37312 + } else {
37313 + err = kgdb_register_io_module(&kgdboc_io_ops);
37314 + configured = 1;
37315 + }
37316 if (err)
37317 goto noconfig;
37318
37319 - configured = 1;
37320 -
37321 return 0;
37322
37323 noconfig:
37324 @@ -213,7 +220,7 @@ noconfig:
37325 static int __init init_kgdboc(void)
37326 {
37327 /* Already configured? */
37328 - if (configured == 1)
37329 + if (configured >= 1)
37330 return 0;
37331
37332 return configure_kgdboc();
37333 @@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
37334 if (config[len - 1] == '\n')
37335 config[len - 1] = '\0';
37336
37337 - if (configured == 1)
37338 + if (configured >= 1)
37339 cleanup_kgdboc();
37340
37341 /* Go and configure with the new params. */
37342 @@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
37343 .post_exception = kgdboc_post_exp_handler,
37344 };
37345
37346 +static struct kgdb_io kgdboc_io_ops_console = {
37347 + .name = "kgdboc",
37348 + .read_char = kgdboc_get_char,
37349 + .write_char = kgdboc_put_char,
37350 + .pre_exception = kgdboc_pre_exp_handler,
37351 + .post_exception = kgdboc_post_exp_handler,
37352 + .is_console = 1
37353 +};
37354 +
37355 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
37356 /* This is only available if kgdboc is a built in for early debugging */
37357 static int __init kgdboc_early_init(char *opt)
37358 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
37359 index 05085be..67eadb0 100644
37360 --- a/drivers/tty/tty_io.c
37361 +++ b/drivers/tty/tty_io.c
37362 @@ -3240,7 +3240,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
37363
37364 void tty_default_fops(struct file_operations *fops)
37365 {
37366 - *fops = tty_fops;
37367 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
37368 }
37369
37370 /*
37371 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
37372 index 8e0924f..4204eb4 100644
37373 --- a/drivers/tty/tty_ldisc.c
37374 +++ b/drivers/tty/tty_ldisc.c
37375 @@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *ld)
37376 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
37377 struct tty_ldisc_ops *ldo = ld->ops;
37378
37379 - ldo->refcount--;
37380 + atomic_dec(&ldo->refcount);
37381 module_put(ldo->owner);
37382 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37383
37384 @@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
37385 spin_lock_irqsave(&tty_ldisc_lock, flags);
37386 tty_ldiscs[disc] = new_ldisc;
37387 new_ldisc->num = disc;
37388 - new_ldisc->refcount = 0;
37389 + atomic_set(&new_ldisc->refcount, 0);
37390 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37391
37392 return ret;
37393 @@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc)
37394 return -EINVAL;
37395
37396 spin_lock_irqsave(&tty_ldisc_lock, flags);
37397 - if (tty_ldiscs[disc]->refcount)
37398 + if (atomic_read(&tty_ldiscs[disc]->refcount))
37399 ret = -EBUSY;
37400 else
37401 tty_ldiscs[disc] = NULL;
37402 @@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
37403 if (ldops) {
37404 ret = ERR_PTR(-EAGAIN);
37405 if (try_module_get(ldops->owner)) {
37406 - ldops->refcount++;
37407 + atomic_inc(&ldops->refcount);
37408 ret = ldops;
37409 }
37410 }
37411 @@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
37412 unsigned long flags;
37413
37414 spin_lock_irqsave(&tty_ldisc_lock, flags);
37415 - ldops->refcount--;
37416 + atomic_dec(&ldops->refcount);
37417 module_put(ldops->owner);
37418 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37419 }
37420 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
37421 index a605549..6bd3c96 100644
37422 --- a/drivers/tty/vt/keyboard.c
37423 +++ b/drivers/tty/vt/keyboard.c
37424 @@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
37425 kbd->kbdmode == VC_OFF) &&
37426 value != KVAL(K_SAK))
37427 return; /* SAK is allowed even in raw mode */
37428 +
37429 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
37430 + {
37431 + void *func = fn_handler[value];
37432 + if (func == fn_show_state || func == fn_show_ptregs ||
37433 + func == fn_show_mem)
37434 + return;
37435 + }
37436 +#endif
37437 +
37438 fn_handler[value](vc);
37439 }
37440
37441 diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
37442 index 65447c5..0526f0a 100644
37443 --- a/drivers/tty/vt/vt_ioctl.c
37444 +++ b/drivers/tty/vt/vt_ioctl.c
37445 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
37446 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
37447 return -EFAULT;
37448
37449 - if (!capable(CAP_SYS_TTY_CONFIG))
37450 - perm = 0;
37451 -
37452 switch (cmd) {
37453 case KDGKBENT:
37454 key_map = key_maps[s];
37455 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
37456 val = (i ? K_HOLE : K_NOSUCHMAP);
37457 return put_user(val, &user_kbe->kb_value);
37458 case KDSKBENT:
37459 + if (!capable(CAP_SYS_TTY_CONFIG))
37460 + perm = 0;
37461 +
37462 if (!perm)
37463 return -EPERM;
37464 if (!i && v == K_NOSUCHMAP) {
37465 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
37466 int i, j, k;
37467 int ret;
37468
37469 - if (!capable(CAP_SYS_TTY_CONFIG))
37470 - perm = 0;
37471 -
37472 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
37473 if (!kbs) {
37474 ret = -ENOMEM;
37475 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
37476 kfree(kbs);
37477 return ((p && *p) ? -EOVERFLOW : 0);
37478 case KDSKBSENT:
37479 + if (!capable(CAP_SYS_TTY_CONFIG))
37480 + perm = 0;
37481 +
37482 if (!perm) {
37483 ret = -EPERM;
37484 goto reterr;
37485 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
37486 index a783d53..cb30d94 100644
37487 --- a/drivers/uio/uio.c
37488 +++ b/drivers/uio/uio.c
37489 @@ -25,6 +25,7 @@
37490 #include <linux/kobject.h>
37491 #include <linux/cdev.h>
37492 #include <linux/uio_driver.h>
37493 +#include <asm/local.h>
37494
37495 #define UIO_MAX_DEVICES (1U << MINORBITS)
37496
37497 @@ -32,10 +33,10 @@ struct uio_device {
37498 struct module *owner;
37499 struct device *dev;
37500 int minor;
37501 - atomic_t event;
37502 + atomic_unchecked_t event;
37503 struct fasync_struct *async_queue;
37504 wait_queue_head_t wait;
37505 - int vma_count;
37506 + local_t vma_count;
37507 struct uio_info *info;
37508 struct kobject *map_dir;
37509 struct kobject *portio_dir;
37510 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
37511 struct device_attribute *attr, char *buf)
37512 {
37513 struct uio_device *idev = dev_get_drvdata(dev);
37514 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
37515 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
37516 }
37517
37518 static struct device_attribute uio_class_attributes[] = {
37519 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
37520 {
37521 struct uio_device *idev = info->uio_dev;
37522
37523 - atomic_inc(&idev->event);
37524 + atomic_inc_unchecked(&idev->event);
37525 wake_up_interruptible(&idev->wait);
37526 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
37527 }
37528 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
37529 }
37530
37531 listener->dev = idev;
37532 - listener->event_count = atomic_read(&idev->event);
37533 + listener->event_count = atomic_read_unchecked(&idev->event);
37534 filep->private_data = listener;
37535
37536 if (idev->info->open) {
37537 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
37538 return -EIO;
37539
37540 poll_wait(filep, &idev->wait, wait);
37541 - if (listener->event_count != atomic_read(&idev->event))
37542 + if (listener->event_count != atomic_read_unchecked(&idev->event))
37543 return POLLIN | POLLRDNORM;
37544 return 0;
37545 }
37546 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
37547 do {
37548 set_current_state(TASK_INTERRUPTIBLE);
37549
37550 - event_count = atomic_read(&idev->event);
37551 + event_count = atomic_read_unchecked(&idev->event);
37552 if (event_count != listener->event_count) {
37553 if (copy_to_user(buf, &event_count, count))
37554 retval = -EFAULT;
37555 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
37556 static void uio_vma_open(struct vm_area_struct *vma)
37557 {
37558 struct uio_device *idev = vma->vm_private_data;
37559 - idev->vma_count++;
37560 + local_inc(&idev->vma_count);
37561 }
37562
37563 static void uio_vma_close(struct vm_area_struct *vma)
37564 {
37565 struct uio_device *idev = vma->vm_private_data;
37566 - idev->vma_count--;
37567 + local_dec(&idev->vma_count);
37568 }
37569
37570 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
37571 @@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
37572 idev->owner = owner;
37573 idev->info = info;
37574 init_waitqueue_head(&idev->wait);
37575 - atomic_set(&idev->event, 0);
37576 + atomic_set_unchecked(&idev->event, 0);
37577
37578 ret = uio_get_minor(idev);
37579 if (ret)
37580 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
37581 index a845f8b..4f54072 100644
37582 --- a/drivers/usb/atm/cxacru.c
37583 +++ b/drivers/usb/atm/cxacru.c
37584 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
37585 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
37586 if (ret < 2)
37587 return -EINVAL;
37588 - if (index < 0 || index > 0x7f)
37589 + if (index > 0x7f)
37590 return -EINVAL;
37591 pos += tmp;
37592
37593 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
37594 index d3448ca..d2864ca 100644
37595 --- a/drivers/usb/atm/usbatm.c
37596 +++ b/drivers/usb/atm/usbatm.c
37597 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37598 if (printk_ratelimit())
37599 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
37600 __func__, vpi, vci);
37601 - atomic_inc(&vcc->stats->rx_err);
37602 + atomic_inc_unchecked(&vcc->stats->rx_err);
37603 return;
37604 }
37605
37606 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37607 if (length > ATM_MAX_AAL5_PDU) {
37608 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
37609 __func__, length, vcc);
37610 - atomic_inc(&vcc->stats->rx_err);
37611 + atomic_inc_unchecked(&vcc->stats->rx_err);
37612 goto out;
37613 }
37614
37615 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37616 if (sarb->len < pdu_length) {
37617 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
37618 __func__, pdu_length, sarb->len, vcc);
37619 - atomic_inc(&vcc->stats->rx_err);
37620 + atomic_inc_unchecked(&vcc->stats->rx_err);
37621 goto out;
37622 }
37623
37624 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
37625 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
37626 __func__, vcc);
37627 - atomic_inc(&vcc->stats->rx_err);
37628 + atomic_inc_unchecked(&vcc->stats->rx_err);
37629 goto out;
37630 }
37631
37632 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37633 if (printk_ratelimit())
37634 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
37635 __func__, length);
37636 - atomic_inc(&vcc->stats->rx_drop);
37637 + atomic_inc_unchecked(&vcc->stats->rx_drop);
37638 goto out;
37639 }
37640
37641 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37642
37643 vcc->push(vcc, skb);
37644
37645 - atomic_inc(&vcc->stats->rx);
37646 + atomic_inc_unchecked(&vcc->stats->rx);
37647 out:
37648 skb_trim(sarb, 0);
37649 }
37650 @@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
37651 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
37652
37653 usbatm_pop(vcc, skb);
37654 - atomic_inc(&vcc->stats->tx);
37655 + atomic_inc_unchecked(&vcc->stats->tx);
37656
37657 skb = skb_dequeue(&instance->sndqueue);
37658 }
37659 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
37660 if (!left--)
37661 return sprintf(page,
37662 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
37663 - atomic_read(&atm_dev->stats.aal5.tx),
37664 - atomic_read(&atm_dev->stats.aal5.tx_err),
37665 - atomic_read(&atm_dev->stats.aal5.rx),
37666 - atomic_read(&atm_dev->stats.aal5.rx_err),
37667 - atomic_read(&atm_dev->stats.aal5.rx_drop));
37668 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
37669 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
37670 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
37671 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
37672 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
37673
37674 if (!left--) {
37675 if (instance->disconnected)
37676 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
37677 index d956965..4179a77 100644
37678 --- a/drivers/usb/core/devices.c
37679 +++ b/drivers/usb/core/devices.c
37680 @@ -126,7 +126,7 @@ static const char format_endpt[] =
37681 * time it gets called.
37682 */
37683 static struct device_connect_event {
37684 - atomic_t count;
37685 + atomic_unchecked_t count;
37686 wait_queue_head_t wait;
37687 } device_event = {
37688 .count = ATOMIC_INIT(1),
37689 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
37690
37691 void usbfs_conn_disc_event(void)
37692 {
37693 - atomic_add(2, &device_event.count);
37694 + atomic_add_unchecked(2, &device_event.count);
37695 wake_up(&device_event.wait);
37696 }
37697
37698 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
37699
37700 poll_wait(file, &device_event.wait, wait);
37701
37702 - event_count = atomic_read(&device_event.count);
37703 + event_count = atomic_read_unchecked(&device_event.count);
37704 if (file->f_version != event_count) {
37705 file->f_version = event_count;
37706 return POLLIN | POLLRDNORM;
37707 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
37708 index b3bdfed..a9460e0 100644
37709 --- a/drivers/usb/core/message.c
37710 +++ b/drivers/usb/core/message.c
37711 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
37712 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
37713 if (buf) {
37714 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
37715 - if (len > 0) {
37716 - smallbuf = kmalloc(++len, GFP_NOIO);
37717 + if (len++ > 0) {
37718 + smallbuf = kmalloc(len, GFP_NOIO);
37719 if (!smallbuf)
37720 return buf;
37721 memcpy(smallbuf, buf, len);
37722 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
37723 index 1fc8f12..20647c1 100644
37724 --- a/drivers/usb/early/ehci-dbgp.c
37725 +++ b/drivers/usb/early/ehci-dbgp.c
37726 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
37727
37728 #ifdef CONFIG_KGDB
37729 static struct kgdb_io kgdbdbgp_io_ops;
37730 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
37731 +static struct kgdb_io kgdbdbgp_io_ops_console;
37732 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
37733 #else
37734 #define dbgp_kgdb_mode (0)
37735 #endif
37736 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
37737 .write_char = kgdbdbgp_write_char,
37738 };
37739
37740 +static struct kgdb_io kgdbdbgp_io_ops_console = {
37741 + .name = "kgdbdbgp",
37742 + .read_char = kgdbdbgp_read_char,
37743 + .write_char = kgdbdbgp_write_char,
37744 + .is_console = 1
37745 +};
37746 +
37747 static int kgdbdbgp_wait_time;
37748
37749 static int __init kgdbdbgp_parse_config(char *str)
37750 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
37751 ptr++;
37752 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
37753 }
37754 - kgdb_register_io_module(&kgdbdbgp_io_ops);
37755 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
37756 + if (early_dbgp_console.index != -1)
37757 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
37758 + else
37759 + kgdb_register_io_module(&kgdbdbgp_io_ops);
37760
37761 return 0;
37762 }
37763 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
37764 index d6bea3e..60b250e 100644
37765 --- a/drivers/usb/wusbcore/wa-hc.h
37766 +++ b/drivers/usb/wusbcore/wa-hc.h
37767 @@ -192,7 +192,7 @@ struct wahc {
37768 struct list_head xfer_delayed_list;
37769 spinlock_t xfer_list_lock;
37770 struct work_struct xfer_work;
37771 - atomic_t xfer_id_count;
37772 + atomic_unchecked_t xfer_id_count;
37773 };
37774
37775
37776 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
37777 INIT_LIST_HEAD(&wa->xfer_delayed_list);
37778 spin_lock_init(&wa->xfer_list_lock);
37779 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
37780 - atomic_set(&wa->xfer_id_count, 1);
37781 + atomic_set_unchecked(&wa->xfer_id_count, 1);
37782 }
37783
37784 /**
37785 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
37786 index 57c01ab..8a05959 100644
37787 --- a/drivers/usb/wusbcore/wa-xfer.c
37788 +++ b/drivers/usb/wusbcore/wa-xfer.c
37789 @@ -296,7 +296,7 @@ out:
37790 */
37791 static void wa_xfer_id_init(struct wa_xfer *xfer)
37792 {
37793 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
37794 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
37795 }
37796
37797 /*
37798 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
37799 index c14c42b..f955cc2 100644
37800 --- a/drivers/vhost/vhost.c
37801 +++ b/drivers/vhost/vhost.c
37802 @@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
37803 return 0;
37804 }
37805
37806 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
37807 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
37808 {
37809 struct file *eventfp, *filep = NULL,
37810 *pollstart = NULL, *pollstop = NULL;
37811 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
37812 index b0b2ac3..89a4399 100644
37813 --- a/drivers/video/aty/aty128fb.c
37814 +++ b/drivers/video/aty/aty128fb.c
37815 @@ -148,7 +148,7 @@ enum {
37816 };
37817
37818 /* Must match above enum */
37819 -static const char *r128_family[] __devinitdata = {
37820 +static const char *r128_family[] __devinitconst = {
37821 "AGP",
37822 "PCI",
37823 "PRO AGP",
37824 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
37825 index 5c3960d..15cf8fc 100644
37826 --- a/drivers/video/fbcmap.c
37827 +++ b/drivers/video/fbcmap.c
37828 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
37829 rc = -ENODEV;
37830 goto out;
37831 }
37832 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
37833 - !info->fbops->fb_setcmap)) {
37834 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
37835 rc = -EINVAL;
37836 goto out1;
37837 }
37838 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
37839 index ad93629..e020fc3 100644
37840 --- a/drivers/video/fbmem.c
37841 +++ b/drivers/video/fbmem.c
37842 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
37843 image->dx += image->width + 8;
37844 }
37845 } else if (rotate == FB_ROTATE_UD) {
37846 - for (x = 0; x < num && image->dx >= 0; x++) {
37847 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
37848 info->fbops->fb_imageblit(info, image);
37849 image->dx -= image->width + 8;
37850 }
37851 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
37852 image->dy += image->height + 8;
37853 }
37854 } else if (rotate == FB_ROTATE_CCW) {
37855 - for (x = 0; x < num && image->dy >= 0; x++) {
37856 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
37857 info->fbops->fb_imageblit(info, image);
37858 image->dy -= image->height + 8;
37859 }
37860 @@ -1143,7 +1143,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
37861 return -EFAULT;
37862 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
37863 return -EINVAL;
37864 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
37865 + if (con2fb.framebuffer >= FB_MAX)
37866 return -EINVAL;
37867 if (!registered_fb[con2fb.framebuffer])
37868 request_module("fb%d", con2fb.framebuffer);
37869 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
37870 index 5a5d092..265c5ed 100644
37871 --- a/drivers/video/geode/gx1fb_core.c
37872 +++ b/drivers/video/geode/gx1fb_core.c
37873 @@ -29,7 +29,7 @@ static int crt_option = 1;
37874 static char panel_option[32] = "";
37875
37876 /* Modes relevant to the GX1 (taken from modedb.c) */
37877 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
37878 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
37879 /* 640x480-60 VESA */
37880 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
37881 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
37882 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
37883 index 0fad23f..0e9afa4 100644
37884 --- a/drivers/video/gxt4500.c
37885 +++ b/drivers/video/gxt4500.c
37886 @@ -156,7 +156,7 @@ struct gxt4500_par {
37887 static char *mode_option;
37888
37889 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
37890 -static const struct fb_videomode defaultmode __devinitdata = {
37891 +static const struct fb_videomode defaultmode __devinitconst = {
37892 .refresh = 60,
37893 .xres = 1280,
37894 .yres = 1024,
37895 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
37896 return 0;
37897 }
37898
37899 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
37900 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
37901 .id = "IBM GXT4500P",
37902 .type = FB_TYPE_PACKED_PIXELS,
37903 .visual = FB_VISUAL_PSEUDOCOLOR,
37904 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
37905 index 7672d2e..b56437f 100644
37906 --- a/drivers/video/i810/i810_accel.c
37907 +++ b/drivers/video/i810/i810_accel.c
37908 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
37909 }
37910 }
37911 printk("ringbuffer lockup!!!\n");
37912 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
37913 i810_report_error(mmio);
37914 par->dev_flags |= LOCKUP;
37915 info->pixmap.scan_align = 1;
37916 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
37917 index 318f6fb..9a389c1 100644
37918 --- a/drivers/video/i810/i810_main.c
37919 +++ b/drivers/video/i810/i810_main.c
37920 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
37921 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
37922
37923 /* PCI */
37924 -static const char *i810_pci_list[] __devinitdata = {
37925 +static const char *i810_pci_list[] __devinitconst = {
37926 "Intel(R) 810 Framebuffer Device" ,
37927 "Intel(R) 810-DC100 Framebuffer Device" ,
37928 "Intel(R) 810E Framebuffer Device" ,
37929 diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
37930 index de36693..3c63fc2 100644
37931 --- a/drivers/video/jz4740_fb.c
37932 +++ b/drivers/video/jz4740_fb.c
37933 @@ -136,7 +136,7 @@ struct jzfb {
37934 uint32_t pseudo_palette[16];
37935 };
37936
37937 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
37938 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
37939 .id = "JZ4740 FB",
37940 .type = FB_TYPE_PACKED_PIXELS,
37941 .visual = FB_VISUAL_TRUECOLOR,
37942 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
37943 index 3c14e43..eafa544 100644
37944 --- a/drivers/video/logo/logo_linux_clut224.ppm
37945 +++ b/drivers/video/logo/logo_linux_clut224.ppm
37946 @@ -1,1604 +1,1123 @@
37947 P3
37948 -# Standard 224-color Linux logo
37949 80 80
37950 255
37951 - 0 0 0 0 0 0 0 0 0 0 0 0
37952 - 0 0 0 0 0 0 0 0 0 0 0 0
37953 - 0 0 0 0 0 0 0 0 0 0 0 0
37954 - 0 0 0 0 0 0 0 0 0 0 0 0
37955 - 0 0 0 0 0 0 0 0 0 0 0 0
37956 - 0 0 0 0 0 0 0 0 0 0 0 0
37957 - 0 0 0 0 0 0 0 0 0 0 0 0
37958 - 0 0 0 0 0 0 0 0 0 0 0 0
37959 - 0 0 0 0 0 0 0 0 0 0 0 0
37960 - 6 6 6 6 6 6 10 10 10 10 10 10
37961 - 10 10 10 6 6 6 6 6 6 6 6 6
37962 - 0 0 0 0 0 0 0 0 0 0 0 0
37963 - 0 0 0 0 0 0 0 0 0 0 0 0
37964 - 0 0 0 0 0 0 0 0 0 0 0 0
37965 - 0 0 0 0 0 0 0 0 0 0 0 0
37966 - 0 0 0 0 0 0 0 0 0 0 0 0
37967 - 0 0 0 0 0 0 0 0 0 0 0 0
37968 - 0 0 0 0 0 0 0 0 0 0 0 0
37969 - 0 0 0 0 0 0 0 0 0 0 0 0
37970 - 0 0 0 0 0 0 0 0 0 0 0 0
37971 - 0 0 0 0 0 0 0 0 0 0 0 0
37972 - 0 0 0 0 0 0 0 0 0 0 0 0
37973 - 0 0 0 0 0 0 0 0 0 0 0 0
37974 - 0 0 0 0 0 0 0 0 0 0 0 0
37975 - 0 0 0 0 0 0 0 0 0 0 0 0
37976 - 0 0 0 0 0 0 0 0 0 0 0 0
37977 - 0 0 0 0 0 0 0 0 0 0 0 0
37978 - 0 0 0 0 0 0 0 0 0 0 0 0
37979 - 0 0 0 6 6 6 10 10 10 14 14 14
37980 - 22 22 22 26 26 26 30 30 30 34 34 34
37981 - 30 30 30 30 30 30 26 26 26 18 18 18
37982 - 14 14 14 10 10 10 6 6 6 0 0 0
37983 - 0 0 0 0 0 0 0 0 0 0 0 0
37984 - 0 0 0 0 0 0 0 0 0 0 0 0
37985 - 0 0 0 0 0 0 0 0 0 0 0 0
37986 - 0 0 0 0 0 0 0 0 0 0 0 0
37987 - 0 0 0 0 0 0 0 0 0 0 0 0
37988 - 0 0 0 0 0 0 0 0 0 0 0 0
37989 - 0 0 0 0 0 0 0 0 0 0 0 0
37990 - 0 0 0 0 0 0 0 0 0 0 0 0
37991 - 0 0 0 0 0 0 0 0 0 0 0 0
37992 - 0 0 0 0 0 1 0 0 1 0 0 0
37993 - 0 0 0 0 0 0 0 0 0 0 0 0
37994 - 0 0 0 0 0 0 0 0 0 0 0 0
37995 - 0 0 0 0 0 0 0 0 0 0 0 0
37996 - 0 0 0 0 0 0 0 0 0 0 0 0
37997 - 0 0 0 0 0 0 0 0 0 0 0 0
37998 - 0 0 0 0 0 0 0 0 0 0 0 0
37999 - 6 6 6 14 14 14 26 26 26 42 42 42
38000 - 54 54 54 66 66 66 78 78 78 78 78 78
38001 - 78 78 78 74 74 74 66 66 66 54 54 54
38002 - 42 42 42 26 26 26 18 18 18 10 10 10
38003 - 6 6 6 0 0 0 0 0 0 0 0 0
38004 - 0 0 0 0 0 0 0 0 0 0 0 0
38005 - 0 0 0 0 0 0 0 0 0 0 0 0
38006 - 0 0 0 0 0 0 0 0 0 0 0 0
38007 - 0 0 0 0 0 0 0 0 0 0 0 0
38008 - 0 0 0 0 0 0 0 0 0 0 0 0
38009 - 0 0 0 0 0 0 0 0 0 0 0 0
38010 - 0 0 0 0 0 0 0 0 0 0 0 0
38011 - 0 0 0 0 0 0 0 0 0 0 0 0
38012 - 0 0 1 0 0 0 0 0 0 0 0 0
38013 - 0 0 0 0 0 0 0 0 0 0 0 0
38014 - 0 0 0 0 0 0 0 0 0 0 0 0
38015 - 0 0 0 0 0 0 0 0 0 0 0 0
38016 - 0 0 0 0 0 0 0 0 0 0 0 0
38017 - 0 0 0 0 0 0 0 0 0 0 0 0
38018 - 0 0 0 0 0 0 0 0 0 10 10 10
38019 - 22 22 22 42 42 42 66 66 66 86 86 86
38020 - 66 66 66 38 38 38 38 38 38 22 22 22
38021 - 26 26 26 34 34 34 54 54 54 66 66 66
38022 - 86 86 86 70 70 70 46 46 46 26 26 26
38023 - 14 14 14 6 6 6 0 0 0 0 0 0
38024 - 0 0 0 0 0 0 0 0 0 0 0 0
38025 - 0 0 0 0 0 0 0 0 0 0 0 0
38026 - 0 0 0 0 0 0 0 0 0 0 0 0
38027 - 0 0 0 0 0 0 0 0 0 0 0 0
38028 - 0 0 0 0 0 0 0 0 0 0 0 0
38029 - 0 0 0 0 0 0 0 0 0 0 0 0
38030 - 0 0 0 0 0 0 0 0 0 0 0 0
38031 - 0 0 0 0 0 0 0 0 0 0 0 0
38032 - 0 0 1 0 0 1 0 0 1 0 0 0
38033 - 0 0 0 0 0 0 0 0 0 0 0 0
38034 - 0 0 0 0 0 0 0 0 0 0 0 0
38035 - 0 0 0 0 0 0 0 0 0 0 0 0
38036 - 0 0 0 0 0 0 0 0 0 0 0 0
38037 - 0 0 0 0 0 0 0 0 0 0 0 0
38038 - 0 0 0 0 0 0 10 10 10 26 26 26
38039 - 50 50 50 82 82 82 58 58 58 6 6 6
38040 - 2 2 6 2 2 6 2 2 6 2 2 6
38041 - 2 2 6 2 2 6 2 2 6 2 2 6
38042 - 6 6 6 54 54 54 86 86 86 66 66 66
38043 - 38 38 38 18 18 18 6 6 6 0 0 0
38044 - 0 0 0 0 0 0 0 0 0 0 0 0
38045 - 0 0 0 0 0 0 0 0 0 0 0 0
38046 - 0 0 0 0 0 0 0 0 0 0 0 0
38047 - 0 0 0 0 0 0 0 0 0 0 0 0
38048 - 0 0 0 0 0 0 0 0 0 0 0 0
38049 - 0 0 0 0 0 0 0 0 0 0 0 0
38050 - 0 0 0 0 0 0 0 0 0 0 0 0
38051 - 0 0 0 0 0 0 0 0 0 0 0 0
38052 - 0 0 0 0 0 0 0 0 0 0 0 0
38053 - 0 0 0 0 0 0 0 0 0 0 0 0
38054 - 0 0 0 0 0 0 0 0 0 0 0 0
38055 - 0 0 0 0 0 0 0 0 0 0 0 0
38056 - 0 0 0 0 0 0 0 0 0 0 0 0
38057 - 0 0 0 0 0 0 0 0 0 0 0 0
38058 - 0 0 0 6 6 6 22 22 22 50 50 50
38059 - 78 78 78 34 34 34 2 2 6 2 2 6
38060 - 2 2 6 2 2 6 2 2 6 2 2 6
38061 - 2 2 6 2 2 6 2 2 6 2 2 6
38062 - 2 2 6 2 2 6 6 6 6 70 70 70
38063 - 78 78 78 46 46 46 22 22 22 6 6 6
38064 - 0 0 0 0 0 0 0 0 0 0 0 0
38065 - 0 0 0 0 0 0 0 0 0 0 0 0
38066 - 0 0 0 0 0 0 0 0 0 0 0 0
38067 - 0 0 0 0 0 0 0 0 0 0 0 0
38068 - 0 0 0 0 0 0 0 0 0 0 0 0
38069 - 0 0 0 0 0 0 0 0 0 0 0 0
38070 - 0 0 0 0 0 0 0 0 0 0 0 0
38071 - 0 0 0 0 0 0 0 0 0 0 0 0
38072 - 0 0 1 0 0 1 0 0 1 0 0 0
38073 - 0 0 0 0 0 0 0 0 0 0 0 0
38074 - 0 0 0 0 0 0 0 0 0 0 0 0
38075 - 0 0 0 0 0 0 0 0 0 0 0 0
38076 - 0 0 0 0 0 0 0 0 0 0 0 0
38077 - 0 0 0 0 0 0 0 0 0 0 0 0
38078 - 6 6 6 18 18 18 42 42 42 82 82 82
38079 - 26 26 26 2 2 6 2 2 6 2 2 6
38080 - 2 2 6 2 2 6 2 2 6 2 2 6
38081 - 2 2 6 2 2 6 2 2 6 14 14 14
38082 - 46 46 46 34 34 34 6 6 6 2 2 6
38083 - 42 42 42 78 78 78 42 42 42 18 18 18
38084 - 6 6 6 0 0 0 0 0 0 0 0 0
38085 - 0 0 0 0 0 0 0 0 0 0 0 0
38086 - 0 0 0 0 0 0 0 0 0 0 0 0
38087 - 0 0 0 0 0 0 0 0 0 0 0 0
38088 - 0 0 0 0 0 0 0 0 0 0 0 0
38089 - 0 0 0 0 0 0 0 0 0 0 0 0
38090 - 0 0 0 0 0 0 0 0 0 0 0 0
38091 - 0 0 0 0 0 0 0 0 0 0 0 0
38092 - 0 0 1 0 0 0 0 0 1 0 0 0
38093 - 0 0 0 0 0 0 0 0 0 0 0 0
38094 - 0 0 0 0 0 0 0 0 0 0 0 0
38095 - 0 0 0 0 0 0 0 0 0 0 0 0
38096 - 0 0 0 0 0 0 0 0 0 0 0 0
38097 - 0 0 0 0 0 0 0 0 0 0 0 0
38098 - 10 10 10 30 30 30 66 66 66 58 58 58
38099 - 2 2 6 2 2 6 2 2 6 2 2 6
38100 - 2 2 6 2 2 6 2 2 6 2 2 6
38101 - 2 2 6 2 2 6 2 2 6 26 26 26
38102 - 86 86 86 101 101 101 46 46 46 10 10 10
38103 - 2 2 6 58 58 58 70 70 70 34 34 34
38104 - 10 10 10 0 0 0 0 0 0 0 0 0
38105 - 0 0 0 0 0 0 0 0 0 0 0 0
38106 - 0 0 0 0 0 0 0 0 0 0 0 0
38107 - 0 0 0 0 0 0 0 0 0 0 0 0
38108 - 0 0 0 0 0 0 0 0 0 0 0 0
38109 - 0 0 0 0 0 0 0 0 0 0 0 0
38110 - 0 0 0 0 0 0 0 0 0 0 0 0
38111 - 0 0 0 0 0 0 0 0 0 0 0 0
38112 - 0 0 1 0 0 1 0 0 1 0 0 0
38113 - 0 0 0 0 0 0 0 0 0 0 0 0
38114 - 0 0 0 0 0 0 0 0 0 0 0 0
38115 - 0 0 0 0 0 0 0 0 0 0 0 0
38116 - 0 0 0 0 0 0 0 0 0 0 0 0
38117 - 0 0 0 0 0 0 0 0 0 0 0 0
38118 - 14 14 14 42 42 42 86 86 86 10 10 10
38119 - 2 2 6 2 2 6 2 2 6 2 2 6
38120 - 2 2 6 2 2 6 2 2 6 2 2 6
38121 - 2 2 6 2 2 6 2 2 6 30 30 30
38122 - 94 94 94 94 94 94 58 58 58 26 26 26
38123 - 2 2 6 6 6 6 78 78 78 54 54 54
38124 - 22 22 22 6 6 6 0 0 0 0 0 0
38125 - 0 0 0 0 0 0 0 0 0 0 0 0
38126 - 0 0 0 0 0 0 0 0 0 0 0 0
38127 - 0 0 0 0 0 0 0 0 0 0 0 0
38128 - 0 0 0 0 0 0 0 0 0 0 0 0
38129 - 0 0 0 0 0 0 0 0 0 0 0 0
38130 - 0 0 0 0 0 0 0 0 0 0 0 0
38131 - 0 0 0 0 0 0 0 0 0 0 0 0
38132 - 0 0 0 0 0 0 0 0 0 0 0 0
38133 - 0 0 0 0 0 0 0 0 0 0 0 0
38134 - 0 0 0 0 0 0 0 0 0 0 0 0
38135 - 0 0 0 0 0 0 0 0 0 0 0 0
38136 - 0 0 0 0 0 0 0 0 0 0 0 0
38137 - 0 0 0 0 0 0 0 0 0 6 6 6
38138 - 22 22 22 62 62 62 62 62 62 2 2 6
38139 - 2 2 6 2 2 6 2 2 6 2 2 6
38140 - 2 2 6 2 2 6 2 2 6 2 2 6
38141 - 2 2 6 2 2 6 2 2 6 26 26 26
38142 - 54 54 54 38 38 38 18 18 18 10 10 10
38143 - 2 2 6 2 2 6 34 34 34 82 82 82
38144 - 38 38 38 14 14 14 0 0 0 0 0 0
38145 - 0 0 0 0 0 0 0 0 0 0 0 0
38146 - 0 0 0 0 0 0 0 0 0 0 0 0
38147 - 0 0 0 0 0 0 0 0 0 0 0 0
38148 - 0 0 0 0 0 0 0 0 0 0 0 0
38149 - 0 0 0 0 0 0 0 0 0 0 0 0
38150 - 0 0 0 0 0 0 0 0 0 0 0 0
38151 - 0 0 0 0 0 0 0 0 0 0 0 0
38152 - 0 0 0 0 0 1 0 0 1 0 0 0
38153 - 0 0 0 0 0 0 0 0 0 0 0 0
38154 - 0 0 0 0 0 0 0 0 0 0 0 0
38155 - 0 0 0 0 0 0 0 0 0 0 0 0
38156 - 0 0 0 0 0 0 0 0 0 0 0 0
38157 - 0 0 0 0 0 0 0 0 0 6 6 6
38158 - 30 30 30 78 78 78 30 30 30 2 2 6
38159 - 2 2 6 2 2 6 2 2 6 2 2 6
38160 - 2 2 6 2 2 6 2 2 6 2 2 6
38161 - 2 2 6 2 2 6 2 2 6 10 10 10
38162 - 10 10 10 2 2 6 2 2 6 2 2 6
38163 - 2 2 6 2 2 6 2 2 6 78 78 78
38164 - 50 50 50 18 18 18 6 6 6 0 0 0
38165 - 0 0 0 0 0 0 0 0 0 0 0 0
38166 - 0 0 0 0 0 0 0 0 0 0 0 0
38167 - 0 0 0 0 0 0 0 0 0 0 0 0
38168 - 0 0 0 0 0 0 0 0 0 0 0 0
38169 - 0 0 0 0 0 0 0 0 0 0 0 0
38170 - 0 0 0 0 0 0 0 0 0 0 0 0
38171 - 0 0 0 0 0 0 0 0 0 0 0 0
38172 - 0 0 1 0 0 0 0 0 0 0 0 0
38173 - 0 0 0 0 0 0 0 0 0 0 0 0
38174 - 0 0 0 0 0 0 0 0 0 0 0 0
38175 - 0 0 0 0 0 0 0 0 0 0 0 0
38176 - 0 0 0 0 0 0 0 0 0 0 0 0
38177 - 0 0 0 0 0 0 0 0 0 10 10 10
38178 - 38 38 38 86 86 86 14 14 14 2 2 6
38179 - 2 2 6 2 2 6 2 2 6 2 2 6
38180 - 2 2 6 2 2 6 2 2 6 2 2 6
38181 - 2 2 6 2 2 6 2 2 6 2 2 6
38182 - 2 2 6 2 2 6 2 2 6 2 2 6
38183 - 2 2 6 2 2 6 2 2 6 54 54 54
38184 - 66 66 66 26 26 26 6 6 6 0 0 0
38185 - 0 0 0 0 0 0 0 0 0 0 0 0
38186 - 0 0 0 0 0 0 0 0 0 0 0 0
38187 - 0 0 0 0 0 0 0 0 0 0 0 0
38188 - 0 0 0 0 0 0 0 0 0 0 0 0
38189 - 0 0 0 0 0 0 0 0 0 0 0 0
38190 - 0 0 0 0 0 0 0 0 0 0 0 0
38191 - 0 0 0 0 0 0 0 0 0 0 0 0
38192 - 0 0 0 0 0 1 0 0 1 0 0 0
38193 - 0 0 0 0 0 0 0 0 0 0 0 0
38194 - 0 0 0 0 0 0 0 0 0 0 0 0
38195 - 0 0 0 0 0 0 0 0 0 0 0 0
38196 - 0 0 0 0 0 0 0 0 0 0 0 0
38197 - 0 0 0 0 0 0 0 0 0 14 14 14
38198 - 42 42 42 82 82 82 2 2 6 2 2 6
38199 - 2 2 6 6 6 6 10 10 10 2 2 6
38200 - 2 2 6 2 2 6 2 2 6 2 2 6
38201 - 2 2 6 2 2 6 2 2 6 6 6 6
38202 - 14 14 14 10 10 10 2 2 6 2 2 6
38203 - 2 2 6 2 2 6 2 2 6 18 18 18
38204 - 82 82 82 34 34 34 10 10 10 0 0 0
38205 - 0 0 0 0 0 0 0 0 0 0 0 0
38206 - 0 0 0 0 0 0 0 0 0 0 0 0
38207 - 0 0 0 0 0 0 0 0 0 0 0 0
38208 - 0 0 0 0 0 0 0 0 0 0 0 0
38209 - 0 0 0 0 0 0 0 0 0 0 0 0
38210 - 0 0 0 0 0 0 0 0 0 0 0 0
38211 - 0 0 0 0 0 0 0 0 0 0 0 0
38212 - 0 0 1 0 0 0 0 0 0 0 0 0
38213 - 0 0 0 0 0 0 0 0 0 0 0 0
38214 - 0 0 0 0 0 0 0 0 0 0 0 0
38215 - 0 0 0 0 0 0 0 0 0 0 0 0
38216 - 0 0 0 0 0 0 0 0 0 0 0 0
38217 - 0 0 0 0 0 0 0 0 0 14 14 14
38218 - 46 46 46 86 86 86 2 2 6 2 2 6
38219 - 6 6 6 6 6 6 22 22 22 34 34 34
38220 - 6 6 6 2 2 6 2 2 6 2 2 6
38221 - 2 2 6 2 2 6 18 18 18 34 34 34
38222 - 10 10 10 50 50 50 22 22 22 2 2 6
38223 - 2 2 6 2 2 6 2 2 6 10 10 10
38224 - 86 86 86 42 42 42 14 14 14 0 0 0
38225 - 0 0 0 0 0 0 0 0 0 0 0 0
38226 - 0 0 0 0 0 0 0 0 0 0 0 0
38227 - 0 0 0 0 0 0 0 0 0 0 0 0
38228 - 0 0 0 0 0 0 0 0 0 0 0 0
38229 - 0 0 0 0 0 0 0 0 0 0 0 0
38230 - 0 0 0 0 0 0 0 0 0 0 0 0
38231 - 0 0 0 0 0 0 0 0 0 0 0 0
38232 - 0 0 1 0 0 1 0 0 1 0 0 0
38233 - 0 0 0 0 0 0 0 0 0 0 0 0
38234 - 0 0 0 0 0 0 0 0 0 0 0 0
38235 - 0 0 0 0 0 0 0 0 0 0 0 0
38236 - 0 0 0 0 0 0 0 0 0 0 0 0
38237 - 0 0 0 0 0 0 0 0 0 14 14 14
38238 - 46 46 46 86 86 86 2 2 6 2 2 6
38239 - 38 38 38 116 116 116 94 94 94 22 22 22
38240 - 22 22 22 2 2 6 2 2 6 2 2 6
38241 - 14 14 14 86 86 86 138 138 138 162 162 162
38242 -154 154 154 38 38 38 26 26 26 6 6 6
38243 - 2 2 6 2 2 6 2 2 6 2 2 6
38244 - 86 86 86 46 46 46 14 14 14 0 0 0
38245 - 0 0 0 0 0 0 0 0 0 0 0 0
38246 - 0 0 0 0 0 0 0 0 0 0 0 0
38247 - 0 0 0 0 0 0 0 0 0 0 0 0
38248 - 0 0 0 0 0 0 0 0 0 0 0 0
38249 - 0 0 0 0 0 0 0 0 0 0 0 0
38250 - 0 0 0 0 0 0 0 0 0 0 0 0
38251 - 0 0 0 0 0 0 0 0 0 0 0 0
38252 - 0 0 0 0 0 0 0 0 0 0 0 0
38253 - 0 0 0 0 0 0 0 0 0 0 0 0
38254 - 0 0 0 0 0 0 0 0 0 0 0 0
38255 - 0 0 0 0 0 0 0 0 0 0 0 0
38256 - 0 0 0 0 0 0 0 0 0 0 0 0
38257 - 0 0 0 0 0 0 0 0 0 14 14 14
38258 - 46 46 46 86 86 86 2 2 6 14 14 14
38259 -134 134 134 198 198 198 195 195 195 116 116 116
38260 - 10 10 10 2 2 6 2 2 6 6 6 6
38261 -101 98 89 187 187 187 210 210 210 218 218 218
38262 -214 214 214 134 134 134 14 14 14 6 6 6
38263 - 2 2 6 2 2 6 2 2 6 2 2 6
38264 - 86 86 86 50 50 50 18 18 18 6 6 6
38265 - 0 0 0 0 0 0 0 0 0 0 0 0
38266 - 0 0 0 0 0 0 0 0 0 0 0 0
38267 - 0 0 0 0 0 0 0 0 0 0 0 0
38268 - 0 0 0 0 0 0 0 0 0 0 0 0
38269 - 0 0 0 0 0 0 0 0 0 0 0 0
38270 - 0 0 0 0 0 0 0 0 0 0 0 0
38271 - 0 0 0 0 0 0 0 0 1 0 0 0
38272 - 0 0 1 0 0 1 0 0 1 0 0 0
38273 - 0 0 0 0 0 0 0 0 0 0 0 0
38274 - 0 0 0 0 0 0 0 0 0 0 0 0
38275 - 0 0 0 0 0 0 0 0 0 0 0 0
38276 - 0 0 0 0 0 0 0 0 0 0 0 0
38277 - 0 0 0 0 0 0 0 0 0 14 14 14
38278 - 46 46 46 86 86 86 2 2 6 54 54 54
38279 -218 218 218 195 195 195 226 226 226 246 246 246
38280 - 58 58 58 2 2 6 2 2 6 30 30 30
38281 -210 210 210 253 253 253 174 174 174 123 123 123
38282 -221 221 221 234 234 234 74 74 74 2 2 6
38283 - 2 2 6 2 2 6 2 2 6 2 2 6
38284 - 70 70 70 58 58 58 22 22 22 6 6 6
38285 - 0 0 0 0 0 0 0 0 0 0 0 0
38286 - 0 0 0 0 0 0 0 0 0 0 0 0
38287 - 0 0 0 0 0 0 0 0 0 0 0 0
38288 - 0 0 0 0 0 0 0 0 0 0 0 0
38289 - 0 0 0 0 0 0 0 0 0 0 0 0
38290 - 0 0 0 0 0 0 0 0 0 0 0 0
38291 - 0 0 0 0 0 0 0 0 0 0 0 0
38292 - 0 0 0 0 0 0 0 0 0 0 0 0
38293 - 0 0 0 0 0 0 0 0 0 0 0 0
38294 - 0 0 0 0 0 0 0 0 0 0 0 0
38295 - 0 0 0 0 0 0 0 0 0 0 0 0
38296 - 0 0 0 0 0 0 0 0 0 0 0 0
38297 - 0 0 0 0 0 0 0 0 0 14 14 14
38298 - 46 46 46 82 82 82 2 2 6 106 106 106
38299 -170 170 170 26 26 26 86 86 86 226 226 226
38300 -123 123 123 10 10 10 14 14 14 46 46 46
38301 -231 231 231 190 190 190 6 6 6 70 70 70
38302 - 90 90 90 238 238 238 158 158 158 2 2 6
38303 - 2 2 6 2 2 6 2 2 6 2 2 6
38304 - 70 70 70 58 58 58 22 22 22 6 6 6
38305 - 0 0 0 0 0 0 0 0 0 0 0 0
38306 - 0 0 0 0 0 0 0 0 0 0 0 0
38307 - 0 0 0 0 0 0 0 0 0 0 0 0
38308 - 0 0 0 0 0 0 0 0 0 0 0 0
38309 - 0 0 0 0 0 0 0 0 0 0 0 0
38310 - 0 0 0 0 0 0 0 0 0 0 0 0
38311 - 0 0 0 0 0 0 0 0 1 0 0 0
38312 - 0 0 1 0 0 1 0 0 1 0 0 0
38313 - 0 0 0 0 0 0 0 0 0 0 0 0
38314 - 0 0 0 0 0 0 0 0 0 0 0 0
38315 - 0 0 0 0 0 0 0 0 0 0 0 0
38316 - 0 0 0 0 0 0 0 0 0 0 0 0
38317 - 0 0 0 0 0 0 0 0 0 14 14 14
38318 - 42 42 42 86 86 86 6 6 6 116 116 116
38319 -106 106 106 6 6 6 70 70 70 149 149 149
38320 -128 128 128 18 18 18 38 38 38 54 54 54
38321 -221 221 221 106 106 106 2 2 6 14 14 14
38322 - 46 46 46 190 190 190 198 198 198 2 2 6
38323 - 2 2 6 2 2 6 2 2 6 2 2 6
38324 - 74 74 74 62 62 62 22 22 22 6 6 6
38325 - 0 0 0 0 0 0 0 0 0 0 0 0
38326 - 0 0 0 0 0 0 0 0 0 0 0 0
38327 - 0 0 0 0 0 0 0 0 0 0 0 0
38328 - 0 0 0 0 0 0 0 0 0 0 0 0
38329 - 0 0 0 0 0 0 0 0 0 0 0 0
38330 - 0 0 0 0 0 0 0 0 0 0 0 0
38331 - 0 0 0 0 0 0 0 0 1 0 0 0
38332 - 0 0 1 0 0 0 0 0 1 0 0 0
38333 - 0 0 0 0 0 0 0 0 0 0 0 0
38334 - 0 0 0 0 0 0 0 0 0 0 0 0
38335 - 0 0 0 0 0 0 0 0 0 0 0 0
38336 - 0 0 0 0 0 0 0 0 0 0 0 0
38337 - 0 0 0 0 0 0 0 0 0 14 14 14
38338 - 42 42 42 94 94 94 14 14 14 101 101 101
38339 -128 128 128 2 2 6 18 18 18 116 116 116
38340 -118 98 46 121 92 8 121 92 8 98 78 10
38341 -162 162 162 106 106 106 2 2 6 2 2 6
38342 - 2 2 6 195 195 195 195 195 195 6 6 6
38343 - 2 2 6 2 2 6 2 2 6 2 2 6
38344 - 74 74 74 62 62 62 22 22 22 6 6 6
38345 - 0 0 0 0 0 0 0 0 0 0 0 0
38346 - 0 0 0 0 0 0 0 0 0 0 0 0
38347 - 0 0 0 0 0 0 0 0 0 0 0 0
38348 - 0 0 0 0 0 0 0 0 0 0 0 0
38349 - 0 0 0 0 0 0 0 0 0 0 0 0
38350 - 0 0 0 0 0 0 0 0 0 0 0 0
38351 - 0 0 0 0 0 0 0 0 1 0 0 1
38352 - 0 0 1 0 0 0 0 0 1 0 0 0
38353 - 0 0 0 0 0 0 0 0 0 0 0 0
38354 - 0 0 0 0 0 0 0 0 0 0 0 0
38355 - 0 0 0 0 0 0 0 0 0 0 0 0
38356 - 0 0 0 0 0 0 0 0 0 0 0 0
38357 - 0 0 0 0 0 0 0 0 0 10 10 10
38358 - 38 38 38 90 90 90 14 14 14 58 58 58
38359 -210 210 210 26 26 26 54 38 6 154 114 10
38360 -226 170 11 236 186 11 225 175 15 184 144 12
38361 -215 174 15 175 146 61 37 26 9 2 2 6
38362 - 70 70 70 246 246 246 138 138 138 2 2 6
38363 - 2 2 6 2 2 6 2 2 6 2 2 6
38364 - 70 70 70 66 66 66 26 26 26 6 6 6
38365 - 0 0 0 0 0 0 0 0 0 0 0 0
38366 - 0 0 0 0 0 0 0 0 0 0 0 0
38367 - 0 0 0 0 0 0 0 0 0 0 0 0
38368 - 0 0 0 0 0 0 0 0 0 0 0 0
38369 - 0 0 0 0 0 0 0 0 0 0 0 0
38370 - 0 0 0 0 0 0 0 0 0 0 0 0
38371 - 0 0 0 0 0 0 0 0 0 0 0 0
38372 - 0 0 0 0 0 0 0 0 0 0 0 0
38373 - 0 0 0 0 0 0 0 0 0 0 0 0
38374 - 0 0 0 0 0 0 0 0 0 0 0 0
38375 - 0 0 0 0 0 0 0 0 0 0 0 0
38376 - 0 0 0 0 0 0 0 0 0 0 0 0
38377 - 0 0 0 0 0 0 0 0 0 10 10 10
38378 - 38 38 38 86 86 86 14 14 14 10 10 10
38379 -195 195 195 188 164 115 192 133 9 225 175 15
38380 -239 182 13 234 190 10 232 195 16 232 200 30
38381 -245 207 45 241 208 19 232 195 16 184 144 12
38382 -218 194 134 211 206 186 42 42 42 2 2 6
38383 - 2 2 6 2 2 6 2 2 6 2 2 6
38384 - 50 50 50 74 74 74 30 30 30 6 6 6
38385 - 0 0 0 0 0 0 0 0 0 0 0 0
38386 - 0 0 0 0 0 0 0 0 0 0 0 0
38387 - 0 0 0 0 0 0 0 0 0 0 0 0
38388 - 0 0 0 0 0 0 0 0 0 0 0 0
38389 - 0 0 0 0 0 0 0 0 0 0 0 0
38390 - 0 0 0 0 0 0 0 0 0 0 0 0
38391 - 0 0 0 0 0 0 0 0 0 0 0 0
38392 - 0 0 0 0 0 0 0 0 0 0 0 0
38393 - 0 0 0 0 0 0 0 0 0 0 0 0
38394 - 0 0 0 0 0 0 0 0 0 0 0 0
38395 - 0 0 0 0 0 0 0 0 0 0 0 0
38396 - 0 0 0 0 0 0 0 0 0 0 0 0
38397 - 0 0 0 0 0 0 0 0 0 10 10 10
38398 - 34 34 34 86 86 86 14 14 14 2 2 6
38399 -121 87 25 192 133 9 219 162 10 239 182 13
38400 -236 186 11 232 195 16 241 208 19 244 214 54
38401 -246 218 60 246 218 38 246 215 20 241 208 19
38402 -241 208 19 226 184 13 121 87 25 2 2 6
38403 - 2 2 6 2 2 6 2 2 6 2 2 6
38404 - 50 50 50 82 82 82 34 34 34 10 10 10
38405 - 0 0 0 0 0 0 0 0 0 0 0 0
38406 - 0 0 0 0 0 0 0 0 0 0 0 0
38407 - 0 0 0 0 0 0 0 0 0 0 0 0
38408 - 0 0 0 0 0 0 0 0 0 0 0 0
38409 - 0 0 0 0 0 0 0 0 0 0 0 0
38410 - 0 0 0 0 0 0 0 0 0 0 0 0
38411 - 0 0 0 0 0 0 0 0 0 0 0 0
38412 - 0 0 0 0 0 0 0 0 0 0 0 0
38413 - 0 0 0 0 0 0 0 0 0 0 0 0
38414 - 0 0 0 0 0 0 0 0 0 0 0 0
38415 - 0 0 0 0 0 0 0 0 0 0 0 0
38416 - 0 0 0 0 0 0 0 0 0 0 0 0
38417 - 0 0 0 0 0 0 0 0 0 10 10 10
38418 - 34 34 34 82 82 82 30 30 30 61 42 6
38419 -180 123 7 206 145 10 230 174 11 239 182 13
38420 -234 190 10 238 202 15 241 208 19 246 218 74
38421 -246 218 38 246 215 20 246 215 20 246 215 20
38422 -226 184 13 215 174 15 184 144 12 6 6 6
38423 - 2 2 6 2 2 6 2 2 6 2 2 6
38424 - 26 26 26 94 94 94 42 42 42 14 14 14
38425 - 0 0 0 0 0 0 0 0 0 0 0 0
38426 - 0 0 0 0 0 0 0 0 0 0 0 0
38427 - 0 0 0 0 0 0 0 0 0 0 0 0
38428 - 0 0 0 0 0 0 0 0 0 0 0 0
38429 - 0 0 0 0 0 0 0 0 0 0 0 0
38430 - 0 0 0 0 0 0 0 0 0 0 0 0
38431 - 0 0 0 0 0 0 0 0 0 0 0 0
38432 - 0 0 0 0 0 0 0 0 0 0 0 0
38433 - 0 0 0 0 0 0 0 0 0 0 0 0
38434 - 0 0 0 0 0 0 0 0 0 0 0 0
38435 - 0 0 0 0 0 0 0 0 0 0 0 0
38436 - 0 0 0 0 0 0 0 0 0 0 0 0
38437 - 0 0 0 0 0 0 0 0 0 10 10 10
38438 - 30 30 30 78 78 78 50 50 50 104 69 6
38439 -192 133 9 216 158 10 236 178 12 236 186 11
38440 -232 195 16 241 208 19 244 214 54 245 215 43
38441 -246 215 20 246 215 20 241 208 19 198 155 10
38442 -200 144 11 216 158 10 156 118 10 2 2 6
38443 - 2 2 6 2 2 6 2 2 6 2 2 6
38444 - 6 6 6 90 90 90 54 54 54 18 18 18
38445 - 6 6 6 0 0 0 0 0 0 0 0 0
38446 - 0 0 0 0 0 0 0 0 0 0 0 0
38447 - 0 0 0 0 0 0 0 0 0 0 0 0
38448 - 0 0 0 0 0 0 0 0 0 0 0 0
38449 - 0 0 0 0 0 0 0 0 0 0 0 0
38450 - 0 0 0 0 0 0 0 0 0 0 0 0
38451 - 0 0 0 0 0 0 0 0 0 0 0 0
38452 - 0 0 0 0 0 0 0 0 0 0 0 0
38453 - 0 0 0 0 0 0 0 0 0 0 0 0
38454 - 0 0 0 0 0 0 0 0 0 0 0 0
38455 - 0 0 0 0 0 0 0 0 0 0 0 0
38456 - 0 0 0 0 0 0 0 0 0 0 0 0
38457 - 0 0 0 0 0 0 0 0 0 10 10 10
38458 - 30 30 30 78 78 78 46 46 46 22 22 22
38459 -137 92 6 210 162 10 239 182 13 238 190 10
38460 -238 202 15 241 208 19 246 215 20 246 215 20
38461 -241 208 19 203 166 17 185 133 11 210 150 10
38462 -216 158 10 210 150 10 102 78 10 2 2 6
38463 - 6 6 6 54 54 54 14 14 14 2 2 6
38464 - 2 2 6 62 62 62 74 74 74 30 30 30
38465 - 10 10 10 0 0 0 0 0 0 0 0 0
38466 - 0 0 0 0 0 0 0 0 0 0 0 0
38467 - 0 0 0 0 0 0 0 0 0 0 0 0
38468 - 0 0 0 0 0 0 0 0 0 0 0 0
38469 - 0 0 0 0 0 0 0 0 0 0 0 0
38470 - 0 0 0 0 0 0 0 0 0 0 0 0
38471 - 0 0 0 0 0 0 0 0 0 0 0 0
38472 - 0 0 0 0 0 0 0 0 0 0 0 0
38473 - 0 0 0 0 0 0 0 0 0 0 0 0
38474 - 0 0 0 0 0 0 0 0 0 0 0 0
38475 - 0 0 0 0 0 0 0 0 0 0 0 0
38476 - 0 0 0 0 0 0 0 0 0 0 0 0
38477 - 0 0 0 0 0 0 0 0 0 10 10 10
38478 - 34 34 34 78 78 78 50 50 50 6 6 6
38479 - 94 70 30 139 102 15 190 146 13 226 184 13
38480 -232 200 30 232 195 16 215 174 15 190 146 13
38481 -168 122 10 192 133 9 210 150 10 213 154 11
38482 -202 150 34 182 157 106 101 98 89 2 2 6
38483 - 2 2 6 78 78 78 116 116 116 58 58 58
38484 - 2 2 6 22 22 22 90 90 90 46 46 46
38485 - 18 18 18 6 6 6 0 0 0 0 0 0
38486 - 0 0 0 0 0 0 0 0 0 0 0 0
38487 - 0 0 0 0 0 0 0 0 0 0 0 0
38488 - 0 0 0 0 0 0 0 0 0 0 0 0
38489 - 0 0 0 0 0 0 0 0 0 0 0 0
38490 - 0 0 0 0 0 0 0 0 0 0 0 0
38491 - 0 0 0 0 0 0 0 0 0 0 0 0
38492 - 0 0 0 0 0 0 0 0 0 0 0 0
38493 - 0 0 0 0 0 0 0 0 0 0 0 0
38494 - 0 0 0 0 0 0 0 0 0 0 0 0
38495 - 0 0 0 0 0 0 0 0 0 0 0 0
38496 - 0 0 0 0 0 0 0 0 0 0 0 0
38497 - 0 0 0 0 0 0 0 0 0 10 10 10
38498 - 38 38 38 86 86 86 50 50 50 6 6 6
38499 -128 128 128 174 154 114 156 107 11 168 122 10
38500 -198 155 10 184 144 12 197 138 11 200 144 11
38501 -206 145 10 206 145 10 197 138 11 188 164 115
38502 -195 195 195 198 198 198 174 174 174 14 14 14
38503 - 2 2 6 22 22 22 116 116 116 116 116 116
38504 - 22 22 22 2 2 6 74 74 74 70 70 70
38505 - 30 30 30 10 10 10 0 0 0 0 0 0
38506 - 0 0 0 0 0 0 0 0 0 0 0 0
38507 - 0 0 0 0 0 0 0 0 0 0 0 0
38508 - 0 0 0 0 0 0 0 0 0 0 0 0
38509 - 0 0 0 0 0 0 0 0 0 0 0 0
38510 - 0 0 0 0 0 0 0 0 0 0 0 0
38511 - 0 0 0 0 0 0 0 0 0 0 0 0
38512 - 0 0 0 0 0 0 0 0 0 0 0 0
38513 - 0 0 0 0 0 0 0 0 0 0 0 0
38514 - 0 0 0 0 0 0 0 0 0 0 0 0
38515 - 0 0 0 0 0 0 0 0 0 0 0 0
38516 - 0 0 0 0 0 0 0 0 0 0 0 0
38517 - 0 0 0 0 0 0 6 6 6 18 18 18
38518 - 50 50 50 101 101 101 26 26 26 10 10 10
38519 -138 138 138 190 190 190 174 154 114 156 107 11
38520 -197 138 11 200 144 11 197 138 11 192 133 9
38521 -180 123 7 190 142 34 190 178 144 187 187 187
38522 -202 202 202 221 221 221 214 214 214 66 66 66
38523 - 2 2 6 2 2 6 50 50 50 62 62 62
38524 - 6 6 6 2 2 6 10 10 10 90 90 90
38525 - 50 50 50 18 18 18 6 6 6 0 0 0
38526 - 0 0 0 0 0 0 0 0 0 0 0 0
38527 - 0 0 0 0 0 0 0 0 0 0 0 0
38528 - 0 0 0 0 0 0 0 0 0 0 0 0
38529 - 0 0 0 0 0 0 0 0 0 0 0 0
38530 - 0 0 0 0 0 0 0 0 0 0 0 0
38531 - 0 0 0 0 0 0 0 0 0 0 0 0
38532 - 0 0 0 0 0 0 0 0 0 0 0 0
38533 - 0 0 0 0 0 0 0 0 0 0 0 0
38534 - 0 0 0 0 0 0 0 0 0 0 0 0
38535 - 0 0 0 0 0 0 0 0 0 0 0 0
38536 - 0 0 0 0 0 0 0 0 0 0 0 0
38537 - 0 0 0 0 0 0 10 10 10 34 34 34
38538 - 74 74 74 74 74 74 2 2 6 6 6 6
38539 -144 144 144 198 198 198 190 190 190 178 166 146
38540 -154 121 60 156 107 11 156 107 11 168 124 44
38541 -174 154 114 187 187 187 190 190 190 210 210 210
38542 -246 246 246 253 253 253 253 253 253 182 182 182
38543 - 6 6 6 2 2 6 2 2 6 2 2 6
38544 - 2 2 6 2 2 6 2 2 6 62 62 62
38545 - 74 74 74 34 34 34 14 14 14 0 0 0
38546 - 0 0 0 0 0 0 0 0 0 0 0 0
38547 - 0 0 0 0 0 0 0 0 0 0 0 0
38548 - 0 0 0 0 0 0 0 0 0 0 0 0
38549 - 0 0 0 0 0 0 0 0 0 0 0 0
38550 - 0 0 0 0 0 0 0 0 0 0 0 0
38551 - 0 0 0 0 0 0 0 0 0 0 0 0
38552 - 0 0 0 0 0 0 0 0 0 0 0 0
38553 - 0 0 0 0 0 0 0 0 0 0 0 0
38554 - 0 0 0 0 0 0 0 0 0 0 0 0
38555 - 0 0 0 0 0 0 0 0 0 0 0 0
38556 - 0 0 0 0 0 0 0 0 0 0 0 0
38557 - 0 0 0 10 10 10 22 22 22 54 54 54
38558 - 94 94 94 18 18 18 2 2 6 46 46 46
38559 -234 234 234 221 221 221 190 190 190 190 190 190
38560 -190 190 190 187 187 187 187 187 187 190 190 190
38561 -190 190 190 195 195 195 214 214 214 242 242 242
38562 -253 253 253 253 253 253 253 253 253 253 253 253
38563 - 82 82 82 2 2 6 2 2 6 2 2 6
38564 - 2 2 6 2 2 6 2 2 6 14 14 14
38565 - 86 86 86 54 54 54 22 22 22 6 6 6
38566 - 0 0 0 0 0 0 0 0 0 0 0 0
38567 - 0 0 0 0 0 0 0 0 0 0 0 0
38568 - 0 0 0 0 0 0 0 0 0 0 0 0
38569 - 0 0 0 0 0 0 0 0 0 0 0 0
38570 - 0 0 0 0 0 0 0 0 0 0 0 0
38571 - 0 0 0 0 0 0 0 0 0 0 0 0
38572 - 0 0 0 0 0 0 0 0 0 0 0 0
38573 - 0 0 0 0 0 0 0 0 0 0 0 0
38574 - 0 0 0 0 0 0 0 0 0 0 0 0
38575 - 0 0 0 0 0 0 0 0 0 0 0 0
38576 - 0 0 0 0 0 0 0 0 0 0 0 0
38577 - 6 6 6 18 18 18 46 46 46 90 90 90
38578 - 46 46 46 18 18 18 6 6 6 182 182 182
38579 -253 253 253 246 246 246 206 206 206 190 190 190
38580 -190 190 190 190 190 190 190 190 190 190 190 190
38581 -206 206 206 231 231 231 250 250 250 253 253 253
38582 -253 253 253 253 253 253 253 253 253 253 253 253
38583 -202 202 202 14 14 14 2 2 6 2 2 6
38584 - 2 2 6 2 2 6 2 2 6 2 2 6
38585 - 42 42 42 86 86 86 42 42 42 18 18 18
38586 - 6 6 6 0 0 0 0 0 0 0 0 0
38587 - 0 0 0 0 0 0 0 0 0 0 0 0
38588 - 0 0 0 0 0 0 0 0 0 0 0 0
38589 - 0 0 0 0 0 0 0 0 0 0 0 0
38590 - 0 0 0 0 0 0 0 0 0 0 0 0
38591 - 0 0 0 0 0 0 0 0 0 0 0 0
38592 - 0 0 0 0 0 0 0 0 0 0 0 0
38593 - 0 0 0 0 0 0 0 0 0 0 0 0
38594 - 0 0 0 0 0 0 0 0 0 0 0 0
38595 - 0 0 0 0 0 0 0 0 0 0 0 0
38596 - 0 0 0 0 0 0 0 0 0 6 6 6
38597 - 14 14 14 38 38 38 74 74 74 66 66 66
38598 - 2 2 6 6 6 6 90 90 90 250 250 250
38599 -253 253 253 253 253 253 238 238 238 198 198 198
38600 -190 190 190 190 190 190 195 195 195 221 221 221
38601 -246 246 246 253 253 253 253 253 253 253 253 253
38602 -253 253 253 253 253 253 253 253 253 253 253 253
38603 -253 253 253 82 82 82 2 2 6 2 2 6
38604 - 2 2 6 2 2 6 2 2 6 2 2 6
38605 - 2 2 6 78 78 78 70 70 70 34 34 34
38606 - 14 14 14 6 6 6 0 0 0 0 0 0
38607 - 0 0 0 0 0 0 0 0 0 0 0 0
38608 - 0 0 0 0 0 0 0 0 0 0 0 0
38609 - 0 0 0 0 0 0 0 0 0 0 0 0
38610 - 0 0 0 0 0 0 0 0 0 0 0 0
38611 - 0 0 0 0 0 0 0 0 0 0 0 0
38612 - 0 0 0 0 0 0 0 0 0 0 0 0
38613 - 0 0 0 0 0 0 0 0 0 0 0 0
38614 - 0 0 0 0 0 0 0 0 0 0 0 0
38615 - 0 0 0 0 0 0 0 0 0 0 0 0
38616 - 0 0 0 0 0 0 0 0 0 14 14 14
38617 - 34 34 34 66 66 66 78 78 78 6 6 6
38618 - 2 2 6 18 18 18 218 218 218 253 253 253
38619 -253 253 253 253 253 253 253 253 253 246 246 246
38620 -226 226 226 231 231 231 246 246 246 253 253 253
38621 -253 253 253 253 253 253 253 253 253 253 253 253
38622 -253 253 253 253 253 253 253 253 253 253 253 253
38623 -253 253 253 178 178 178 2 2 6 2 2 6
38624 - 2 2 6 2 2 6 2 2 6 2 2 6
38625 - 2 2 6 18 18 18 90 90 90 62 62 62
38626 - 30 30 30 10 10 10 0 0 0 0 0 0
38627 - 0 0 0 0 0 0 0 0 0 0 0 0
38628 - 0 0 0 0 0 0 0 0 0 0 0 0
38629 - 0 0 0 0 0 0 0 0 0 0 0 0
38630 - 0 0 0 0 0 0 0 0 0 0 0 0
38631 - 0 0 0 0 0 0 0 0 0 0 0 0
38632 - 0 0 0 0 0 0 0 0 0 0 0 0
38633 - 0 0 0 0 0 0 0 0 0 0 0 0
38634 - 0 0 0 0 0 0 0 0 0 0 0 0
38635 - 0 0 0 0 0 0 0 0 0 0 0 0
38636 - 0 0 0 0 0 0 10 10 10 26 26 26
38637 - 58 58 58 90 90 90 18 18 18 2 2 6
38638 - 2 2 6 110 110 110 253 253 253 253 253 253
38639 -253 253 253 253 253 253 253 253 253 253 253 253
38640 -250 250 250 253 253 253 253 253 253 253 253 253
38641 -253 253 253 253 253 253 253 253 253 253 253 253
38642 -253 253 253 253 253 253 253 253 253 253 253 253
38643 -253 253 253 231 231 231 18 18 18 2 2 6
38644 - 2 2 6 2 2 6 2 2 6 2 2 6
38645 - 2 2 6 2 2 6 18 18 18 94 94 94
38646 - 54 54 54 26 26 26 10 10 10 0 0 0
38647 - 0 0 0 0 0 0 0 0 0 0 0 0
38648 - 0 0 0 0 0 0 0 0 0 0 0 0
38649 - 0 0 0 0 0 0 0 0 0 0 0 0
38650 - 0 0 0 0 0 0 0 0 0 0 0 0
38651 - 0 0 0 0 0 0 0 0 0 0 0 0
38652 - 0 0 0 0 0 0 0 0 0 0 0 0
38653 - 0 0 0 0 0 0 0 0 0 0 0 0
38654 - 0 0 0 0 0 0 0 0 0 0 0 0
38655 - 0 0 0 0 0 0 0 0 0 0 0 0
38656 - 0 0 0 6 6 6 22 22 22 50 50 50
38657 - 90 90 90 26 26 26 2 2 6 2 2 6
38658 - 14 14 14 195 195 195 250 250 250 253 253 253
38659 -253 253 253 253 253 253 253 253 253 253 253 253
38660 -253 253 253 253 253 253 253 253 253 253 253 253
38661 -253 253 253 253 253 253 253 253 253 253 253 253
38662 -253 253 253 253 253 253 253 253 253 253 253 253
38663 -250 250 250 242 242 242 54 54 54 2 2 6
38664 - 2 2 6 2 2 6 2 2 6 2 2 6
38665 - 2 2 6 2 2 6 2 2 6 38 38 38
38666 - 86 86 86 50 50 50 22 22 22 6 6 6
38667 - 0 0 0 0 0 0 0 0 0 0 0 0
38668 - 0 0 0 0 0 0 0 0 0 0 0 0
38669 - 0 0 0 0 0 0 0 0 0 0 0 0
38670 - 0 0 0 0 0 0 0 0 0 0 0 0
38671 - 0 0 0 0 0 0 0 0 0 0 0 0
38672 - 0 0 0 0 0 0 0 0 0 0 0 0
38673 - 0 0 0 0 0 0 0 0 0 0 0 0
38674 - 0 0 0 0 0 0 0 0 0 0 0 0
38675 - 0 0 0 0 0 0 0 0 0 0 0 0
38676 - 6 6 6 14 14 14 38 38 38 82 82 82
38677 - 34 34 34 2 2 6 2 2 6 2 2 6
38678 - 42 42 42 195 195 195 246 246 246 253 253 253
38679 -253 253 253 253 253 253 253 253 253 250 250 250
38680 -242 242 242 242 242 242 250 250 250 253 253 253
38681 -253 253 253 253 253 253 253 253 253 253 253 253
38682 -253 253 253 250 250 250 246 246 246 238 238 238
38683 -226 226 226 231 231 231 101 101 101 6 6 6
38684 - 2 2 6 2 2 6 2 2 6 2 2 6
38685 - 2 2 6 2 2 6 2 2 6 2 2 6
38686 - 38 38 38 82 82 82 42 42 42 14 14 14
38687 - 6 6 6 0 0 0 0 0 0 0 0 0
38688 - 0 0 0 0 0 0 0 0 0 0 0 0
38689 - 0 0 0 0 0 0 0 0 0 0 0 0
38690 - 0 0 0 0 0 0 0 0 0 0 0 0
38691 - 0 0 0 0 0 0 0 0 0 0 0 0
38692 - 0 0 0 0 0 0 0 0 0 0 0 0
38693 - 0 0 0 0 0 0 0 0 0 0 0 0
38694 - 0 0 0 0 0 0 0 0 0 0 0 0
38695 - 0 0 0 0 0 0 0 0 0 0 0 0
38696 - 10 10 10 26 26 26 62 62 62 66 66 66
38697 - 2 2 6 2 2 6 2 2 6 6 6 6
38698 - 70 70 70 170 170 170 206 206 206 234 234 234
38699 -246 246 246 250 250 250 250 250 250 238 238 238
38700 -226 226 226 231 231 231 238 238 238 250 250 250
38701 -250 250 250 250 250 250 246 246 246 231 231 231
38702 -214 214 214 206 206 206 202 202 202 202 202 202
38703 -198 198 198 202 202 202 182 182 182 18 18 18
38704 - 2 2 6 2 2 6 2 2 6 2 2 6
38705 - 2 2 6 2 2 6 2 2 6 2 2 6
38706 - 2 2 6 62 62 62 66 66 66 30 30 30
38707 - 10 10 10 0 0 0 0 0 0 0 0 0
38708 - 0 0 0 0 0 0 0 0 0 0 0 0
38709 - 0 0 0 0 0 0 0 0 0 0 0 0
38710 - 0 0 0 0 0 0 0 0 0 0 0 0
38711 - 0 0 0 0 0 0 0 0 0 0 0 0
38712 - 0 0 0 0 0 0 0 0 0 0 0 0
38713 - 0 0 0 0 0 0 0 0 0 0 0 0
38714 - 0 0 0 0 0 0 0 0 0 0 0 0
38715 - 0 0 0 0 0 0 0 0 0 0 0 0
38716 - 14 14 14 42 42 42 82 82 82 18 18 18
38717 - 2 2 6 2 2 6 2 2 6 10 10 10
38718 - 94 94 94 182 182 182 218 218 218 242 242 242
38719 -250 250 250 253 253 253 253 253 253 250 250 250
38720 -234 234 234 253 253 253 253 253 253 253 253 253
38721 -253 253 253 253 253 253 253 253 253 246 246 246
38722 -238 238 238 226 226 226 210 210 210 202 202 202
38723 -195 195 195 195 195 195 210 210 210 158 158 158
38724 - 6 6 6 14 14 14 50 50 50 14 14 14
38725 - 2 2 6 2 2 6 2 2 6 2 2 6
38726 - 2 2 6 6 6 6 86 86 86 46 46 46
38727 - 18 18 18 6 6 6 0 0 0 0 0 0
38728 - 0 0 0 0 0 0 0 0 0 0 0 0
38729 - 0 0 0 0 0 0 0 0 0 0 0 0
38730 - 0 0 0 0 0 0 0 0 0 0 0 0
38731 - 0 0 0 0 0 0 0 0 0 0 0 0
38732 - 0 0 0 0 0 0 0 0 0 0 0 0
38733 - 0 0 0 0 0 0 0 0 0 0 0 0
38734 - 0 0 0 0 0 0 0 0 0 0 0 0
38735 - 0 0 0 0 0 0 0 0 0 6 6 6
38736 - 22 22 22 54 54 54 70 70 70 2 2 6
38737 - 2 2 6 10 10 10 2 2 6 22 22 22
38738 -166 166 166 231 231 231 250 250 250 253 253 253
38739 -253 253 253 253 253 253 253 253 253 250 250 250
38740 -242 242 242 253 253 253 253 253 253 253 253 253
38741 -253 253 253 253 253 253 253 253 253 253 253 253
38742 -253 253 253 253 253 253 253 253 253 246 246 246
38743 -231 231 231 206 206 206 198 198 198 226 226 226
38744 - 94 94 94 2 2 6 6 6 6 38 38 38
38745 - 30 30 30 2 2 6 2 2 6 2 2 6
38746 - 2 2 6 2 2 6 62 62 62 66 66 66
38747 - 26 26 26 10 10 10 0 0 0 0 0 0
38748 - 0 0 0 0 0 0 0 0 0 0 0 0
38749 - 0 0 0 0 0 0 0 0 0 0 0 0
38750 - 0 0 0 0 0 0 0 0 0 0 0 0
38751 - 0 0 0 0 0 0 0 0 0 0 0 0
38752 - 0 0 0 0 0 0 0 0 0 0 0 0
38753 - 0 0 0 0 0 0 0 0 0 0 0 0
38754 - 0 0 0 0 0 0 0 0 0 0 0 0
38755 - 0 0 0 0 0 0 0 0 0 10 10 10
38756 - 30 30 30 74 74 74 50 50 50 2 2 6
38757 - 26 26 26 26 26 26 2 2 6 106 106 106
38758 -238 238 238 253 253 253 253 253 253 253 253 253
38759 -253 253 253 253 253 253 253 253 253 253 253 253
38760 -253 253 253 253 253 253 253 253 253 253 253 253
38761 -253 253 253 253 253 253 253 253 253 253 253 253
38762 -253 253 253 253 253 253 253 253 253 253 253 253
38763 -253 253 253 246 246 246 218 218 218 202 202 202
38764 -210 210 210 14 14 14 2 2 6 2 2 6
38765 - 30 30 30 22 22 22 2 2 6 2 2 6
38766 - 2 2 6 2 2 6 18 18 18 86 86 86
38767 - 42 42 42 14 14 14 0 0 0 0 0 0
38768 - 0 0 0 0 0 0 0 0 0 0 0 0
38769 - 0 0 0 0 0 0 0 0 0 0 0 0
38770 - 0 0 0 0 0 0 0 0 0 0 0 0
38771 - 0 0 0 0 0 0 0 0 0 0 0 0
38772 - 0 0 0 0 0 0 0 0 0 0 0 0
38773 - 0 0 0 0 0 0 0 0 0 0 0 0
38774 - 0 0 0 0 0 0 0 0 0 0 0 0
38775 - 0 0 0 0 0 0 0 0 0 14 14 14
38776 - 42 42 42 90 90 90 22 22 22 2 2 6
38777 - 42 42 42 2 2 6 18 18 18 218 218 218
38778 -253 253 253 253 253 253 253 253 253 253 253 253
38779 -253 253 253 253 253 253 253 253 253 253 253 253
38780 -253 253 253 253 253 253 253 253 253 253 253 253
38781 -253 253 253 253 253 253 253 253 253 253 253 253
38782 -253 253 253 253 253 253 253 253 253 253 253 253
38783 -253 253 253 253 253 253 250 250 250 221 221 221
38784 -218 218 218 101 101 101 2 2 6 14 14 14
38785 - 18 18 18 38 38 38 10 10 10 2 2 6
38786 - 2 2 6 2 2 6 2 2 6 78 78 78
38787 - 58 58 58 22 22 22 6 6 6 0 0 0
38788 - 0 0 0 0 0 0 0 0 0 0 0 0
38789 - 0 0 0 0 0 0 0 0 0 0 0 0
38790 - 0 0 0 0 0 0 0 0 0 0 0 0
38791 - 0 0 0 0 0 0 0 0 0 0 0 0
38792 - 0 0 0 0 0 0 0 0 0 0 0 0
38793 - 0 0 0 0 0 0 0 0 0 0 0 0
38794 - 0 0 0 0 0 0 0 0 0 0 0 0
38795 - 0 0 0 0 0 0 6 6 6 18 18 18
38796 - 54 54 54 82 82 82 2 2 6 26 26 26
38797 - 22 22 22 2 2 6 123 123 123 253 253 253
38798 -253 253 253 253 253 253 253 253 253 253 253 253
38799 -253 253 253 253 253 253 253 253 253 253 253 253
38800 -253 253 253 253 253 253 253 253 253 253 253 253
38801 -253 253 253 253 253 253 253 253 253 253 253 253
38802 -253 253 253 253 253 253 253 253 253 253 253 253
38803 -253 253 253 253 253 253 253 253 253 250 250 250
38804 -238 238 238 198 198 198 6 6 6 38 38 38
38805 - 58 58 58 26 26 26 38 38 38 2 2 6
38806 - 2 2 6 2 2 6 2 2 6 46 46 46
38807 - 78 78 78 30 30 30 10 10 10 0 0 0
38808 - 0 0 0 0 0 0 0 0 0 0 0 0
38809 - 0 0 0 0 0 0 0 0 0 0 0 0
38810 - 0 0 0 0 0 0 0 0 0 0 0 0
38811 - 0 0 0 0 0 0 0 0 0 0 0 0
38812 - 0 0 0 0 0 0 0 0 0 0 0 0
38813 - 0 0 0 0 0 0 0 0 0 0 0 0
38814 - 0 0 0 0 0 0 0 0 0 0 0 0
38815 - 0 0 0 0 0 0 10 10 10 30 30 30
38816 - 74 74 74 58 58 58 2 2 6 42 42 42
38817 - 2 2 6 22 22 22 231 231 231 253 253 253
38818 -253 253 253 253 253 253 253 253 253 253 253 253
38819 -253 253 253 253 253 253 253 253 253 250 250 250
38820 -253 253 253 253 253 253 253 253 253 253 253 253
38821 -253 253 253 253 253 253 253 253 253 253 253 253
38822 -253 253 253 253 253 253 253 253 253 253 253 253
38823 -253 253 253 253 253 253 253 253 253 253 253 253
38824 -253 253 253 246 246 246 46 46 46 38 38 38
38825 - 42 42 42 14 14 14 38 38 38 14 14 14
38826 - 2 2 6 2 2 6 2 2 6 6 6 6
38827 - 86 86 86 46 46 46 14 14 14 0 0 0
38828 - 0 0 0 0 0 0 0 0 0 0 0 0
38829 - 0 0 0 0 0 0 0 0 0 0 0 0
38830 - 0 0 0 0 0 0 0 0 0 0 0 0
38831 - 0 0 0 0 0 0 0 0 0 0 0 0
38832 - 0 0 0 0 0 0 0 0 0 0 0 0
38833 - 0 0 0 0 0 0 0 0 0 0 0 0
38834 - 0 0 0 0 0 0 0 0 0 0 0 0
38835 - 0 0 0 6 6 6 14 14 14 42 42 42
38836 - 90 90 90 18 18 18 18 18 18 26 26 26
38837 - 2 2 6 116 116 116 253 253 253 253 253 253
38838 -253 253 253 253 253 253 253 253 253 253 253 253
38839 -253 253 253 253 253 253 250 250 250 238 238 238
38840 -253 253 253 253 253 253 253 253 253 253 253 253
38841 -253 253 253 253 253 253 253 253 253 253 253 253
38842 -253 253 253 253 253 253 253 253 253 253 253 253
38843 -253 253 253 253 253 253 253 253 253 253 253 253
38844 -253 253 253 253 253 253 94 94 94 6 6 6
38845 - 2 2 6 2 2 6 10 10 10 34 34 34
38846 - 2 2 6 2 2 6 2 2 6 2 2 6
38847 - 74 74 74 58 58 58 22 22 22 6 6 6
38848 - 0 0 0 0 0 0 0 0 0 0 0 0
38849 - 0 0 0 0 0 0 0 0 0 0 0 0
38850 - 0 0 0 0 0 0 0 0 0 0 0 0
38851 - 0 0 0 0 0 0 0 0 0 0 0 0
38852 - 0 0 0 0 0 0 0 0 0 0 0 0
38853 - 0 0 0 0 0 0 0 0 0 0 0 0
38854 - 0 0 0 0 0 0 0 0 0 0 0 0
38855 - 0 0 0 10 10 10 26 26 26 66 66 66
38856 - 82 82 82 2 2 6 38 38 38 6 6 6
38857 - 14 14 14 210 210 210 253 253 253 253 253 253
38858 -253 253 253 253 253 253 253 253 253 253 253 253
38859 -253 253 253 253 253 253 246 246 246 242 242 242
38860 -253 253 253 253 253 253 253 253 253 253 253 253
38861 -253 253 253 253 253 253 253 253 253 253 253 253
38862 -253 253 253 253 253 253 253 253 253 253 253 253
38863 -253 253 253 253 253 253 253 253 253 253 253 253
38864 -253 253 253 253 253 253 144 144 144 2 2 6
38865 - 2 2 6 2 2 6 2 2 6 46 46 46
38866 - 2 2 6 2 2 6 2 2 6 2 2 6
38867 - 42 42 42 74 74 74 30 30 30 10 10 10
38868 - 0 0 0 0 0 0 0 0 0 0 0 0
38869 - 0 0 0 0 0 0 0 0 0 0 0 0
38870 - 0 0 0 0 0 0 0 0 0 0 0 0
38871 - 0 0 0 0 0 0 0 0 0 0 0 0
38872 - 0 0 0 0 0 0 0 0 0 0 0 0
38873 - 0 0 0 0 0 0 0 0 0 0 0 0
38874 - 0 0 0 0 0 0 0 0 0 0 0 0
38875 - 6 6 6 14 14 14 42 42 42 90 90 90
38876 - 26 26 26 6 6 6 42 42 42 2 2 6
38877 - 74 74 74 250 250 250 253 253 253 253 253 253
38878 -253 253 253 253 253 253 253 253 253 253 253 253
38879 -253 253 253 253 253 253 242 242 242 242 242 242
38880 -253 253 253 253 253 253 253 253 253 253 253 253
38881 -253 253 253 253 253 253 253 253 253 253 253 253
38882 -253 253 253 253 253 253 253 253 253 253 253 253
38883 -253 253 253 253 253 253 253 253 253 253 253 253
38884 -253 253 253 253 253 253 182 182 182 2 2 6
38885 - 2 2 6 2 2 6 2 2 6 46 46 46
38886 - 2 2 6 2 2 6 2 2 6 2 2 6
38887 - 10 10 10 86 86 86 38 38 38 10 10 10
38888 - 0 0 0 0 0 0 0 0 0 0 0 0
38889 - 0 0 0 0 0 0 0 0 0 0 0 0
38890 - 0 0 0 0 0 0 0 0 0 0 0 0
38891 - 0 0 0 0 0 0 0 0 0 0 0 0
38892 - 0 0 0 0 0 0 0 0 0 0 0 0
38893 - 0 0 0 0 0 0 0 0 0 0 0 0
38894 - 0 0 0 0 0 0 0 0 0 0 0 0
38895 - 10 10 10 26 26 26 66 66 66 82 82 82
38896 - 2 2 6 22 22 22 18 18 18 2 2 6
38897 -149 149 149 253 253 253 253 253 253 253 253 253
38898 -253 253 253 253 253 253 253 253 253 253 253 253
38899 -253 253 253 253 253 253 234 234 234 242 242 242
38900 -253 253 253 253 253 253 253 253 253 253 253 253
38901 -253 253 253 253 253 253 253 253 253 253 253 253
38902 -253 253 253 253 253 253 253 253 253 253 253 253
38903 -253 253 253 253 253 253 253 253 253 253 253 253
38904 -253 253 253 253 253 253 206 206 206 2 2 6
38905 - 2 2 6 2 2 6 2 2 6 38 38 38
38906 - 2 2 6 2 2 6 2 2 6 2 2 6
38907 - 6 6 6 86 86 86 46 46 46 14 14 14
38908 - 0 0 0 0 0 0 0 0 0 0 0 0
38909 - 0 0 0 0 0 0 0 0 0 0 0 0
38910 - 0 0 0 0 0 0 0 0 0 0 0 0
38911 - 0 0 0 0 0 0 0 0 0 0 0 0
38912 - 0 0 0 0 0 0 0 0 0 0 0 0
38913 - 0 0 0 0 0 0 0 0 0 0 0 0
38914 - 0 0 0 0 0 0 0 0 0 6 6 6
38915 - 18 18 18 46 46 46 86 86 86 18 18 18
38916 - 2 2 6 34 34 34 10 10 10 6 6 6
38917 -210 210 210 253 253 253 253 253 253 253 253 253
38918 -253 253 253 253 253 253 253 253 253 253 253 253
38919 -253 253 253 253 253 253 234 234 234 242 242 242
38920 -253 253 253 253 253 253 253 253 253 253 253 253
38921 -253 253 253 253 253 253 253 253 253 253 253 253
38922 -253 253 253 253 253 253 253 253 253 253 253 253
38923 -253 253 253 253 253 253 253 253 253 253 253 253
38924 -253 253 253 253 253 253 221 221 221 6 6 6
38925 - 2 2 6 2 2 6 6 6 6 30 30 30
38926 - 2 2 6 2 2 6 2 2 6 2 2 6
38927 - 2 2 6 82 82 82 54 54 54 18 18 18
38928 - 6 6 6 0 0 0 0 0 0 0 0 0
38929 - 0 0 0 0 0 0 0 0 0 0 0 0
38930 - 0 0 0 0 0 0 0 0 0 0 0 0
38931 - 0 0 0 0 0 0 0 0 0 0 0 0
38932 - 0 0 0 0 0 0 0 0 0 0 0 0
38933 - 0 0 0 0 0 0 0 0 0 0 0 0
38934 - 0 0 0 0 0 0 0 0 0 10 10 10
38935 - 26 26 26 66 66 66 62 62 62 2 2 6
38936 - 2 2 6 38 38 38 10 10 10 26 26 26
38937 -238 238 238 253 253 253 253 253 253 253 253 253
38938 -253 253 253 253 253 253 253 253 253 253 253 253
38939 -253 253 253 253 253 253 231 231 231 238 238 238
38940 -253 253 253 253 253 253 253 253 253 253 253 253
38941 -253 253 253 253 253 253 253 253 253 253 253 253
38942 -253 253 253 253 253 253 253 253 253 253 253 253
38943 -253 253 253 253 253 253 253 253 253 253 253 253
38944 -253 253 253 253 253 253 231 231 231 6 6 6
38945 - 2 2 6 2 2 6 10 10 10 30 30 30
38946 - 2 2 6 2 2 6 2 2 6 2 2 6
38947 - 2 2 6 66 66 66 58 58 58 22 22 22
38948 - 6 6 6 0 0 0 0 0 0 0 0 0
38949 - 0 0 0 0 0 0 0 0 0 0 0 0
38950 - 0 0 0 0 0 0 0 0 0 0 0 0
38951 - 0 0 0 0 0 0 0 0 0 0 0 0
38952 - 0 0 0 0 0 0 0 0 0 0 0 0
38953 - 0 0 0 0 0 0 0 0 0 0 0 0
38954 - 0 0 0 0 0 0 0 0 0 10 10 10
38955 - 38 38 38 78 78 78 6 6 6 2 2 6
38956 - 2 2 6 46 46 46 14 14 14 42 42 42
38957 -246 246 246 253 253 253 253 253 253 253 253 253
38958 -253 253 253 253 253 253 253 253 253 253 253 253
38959 -253 253 253 253 253 253 231 231 231 242 242 242
38960 -253 253 253 253 253 253 253 253 253 253 253 253
38961 -253 253 253 253 253 253 253 253 253 253 253 253
38962 -253 253 253 253 253 253 253 253 253 253 253 253
38963 -253 253 253 253 253 253 253 253 253 253 253 253
38964 -253 253 253 253 253 253 234 234 234 10 10 10
38965 - 2 2 6 2 2 6 22 22 22 14 14 14
38966 - 2 2 6 2 2 6 2 2 6 2 2 6
38967 - 2 2 6 66 66 66 62 62 62 22 22 22
38968 - 6 6 6 0 0 0 0 0 0 0 0 0
38969 - 0 0 0 0 0 0 0 0 0 0 0 0
38970 - 0 0 0 0 0 0 0 0 0 0 0 0
38971 - 0 0 0 0 0 0 0 0 0 0 0 0
38972 - 0 0 0 0 0 0 0 0 0 0 0 0
38973 - 0 0 0 0 0 0 0 0 0 0 0 0
38974 - 0 0 0 0 0 0 6 6 6 18 18 18
38975 - 50 50 50 74 74 74 2 2 6 2 2 6
38976 - 14 14 14 70 70 70 34 34 34 62 62 62
38977 -250 250 250 253 253 253 253 253 253 253 253 253
38978 -253 253 253 253 253 253 253 253 253 253 253 253
38979 -253 253 253 253 253 253 231 231 231 246 246 246
38980 -253 253 253 253 253 253 253 253 253 253 253 253
38981 -253 253 253 253 253 253 253 253 253 253 253 253
38982 -253 253 253 253 253 253 253 253 253 253 253 253
38983 -253 253 253 253 253 253 253 253 253 253 253 253
38984 -253 253 253 253 253 253 234 234 234 14 14 14
38985 - 2 2 6 2 2 6 30 30 30 2 2 6
38986 - 2 2 6 2 2 6 2 2 6 2 2 6
38987 - 2 2 6 66 66 66 62 62 62 22 22 22
38988 - 6 6 6 0 0 0 0 0 0 0 0 0
38989 - 0 0 0 0 0 0 0 0 0 0 0 0
38990 - 0 0 0 0 0 0 0 0 0 0 0 0
38991 - 0 0 0 0 0 0 0 0 0 0 0 0
38992 - 0 0 0 0 0 0 0 0 0 0 0 0
38993 - 0 0 0 0 0 0 0 0 0 0 0 0
38994 - 0 0 0 0 0 0 6 6 6 18 18 18
38995 - 54 54 54 62 62 62 2 2 6 2 2 6
38996 - 2 2 6 30 30 30 46 46 46 70 70 70
38997 -250 250 250 253 253 253 253 253 253 253 253 253
38998 -253 253 253 253 253 253 253 253 253 253 253 253
38999 -253 253 253 253 253 253 231 231 231 246 246 246
39000 -253 253 253 253 253 253 253 253 253 253 253 253
39001 -253 253 253 253 253 253 253 253 253 253 253 253
39002 -253 253 253 253 253 253 253 253 253 253 253 253
39003 -253 253 253 253 253 253 253 253 253 253 253 253
39004 -253 253 253 253 253 253 226 226 226 10 10 10
39005 - 2 2 6 6 6 6 30 30 30 2 2 6
39006 - 2 2 6 2 2 6 2 2 6 2 2 6
39007 - 2 2 6 66 66 66 58 58 58 22 22 22
39008 - 6 6 6 0 0 0 0 0 0 0 0 0
39009 - 0 0 0 0 0 0 0 0 0 0 0 0
39010 - 0 0 0 0 0 0 0 0 0 0 0 0
39011 - 0 0 0 0 0 0 0 0 0 0 0 0
39012 - 0 0 0 0 0 0 0 0 0 0 0 0
39013 - 0 0 0 0 0 0 0 0 0 0 0 0
39014 - 0 0 0 0 0 0 6 6 6 22 22 22
39015 - 58 58 58 62 62 62 2 2 6 2 2 6
39016 - 2 2 6 2 2 6 30 30 30 78 78 78
39017 -250 250 250 253 253 253 253 253 253 253 253 253
39018 -253 253 253 253 253 253 253 253 253 253 253 253
39019 -253 253 253 253 253 253 231 231 231 246 246 246
39020 -253 253 253 253 253 253 253 253 253 253 253 253
39021 -253 253 253 253 253 253 253 253 253 253 253 253
39022 -253 253 253 253 253 253 253 253 253 253 253 253
39023 -253 253 253 253 253 253 253 253 253 253 253 253
39024 -253 253 253 253 253 253 206 206 206 2 2 6
39025 - 22 22 22 34 34 34 18 14 6 22 22 22
39026 - 26 26 26 18 18 18 6 6 6 2 2 6
39027 - 2 2 6 82 82 82 54 54 54 18 18 18
39028 - 6 6 6 0 0 0 0 0 0 0 0 0
39029 - 0 0 0 0 0 0 0 0 0 0 0 0
39030 - 0 0 0 0 0 0 0 0 0 0 0 0
39031 - 0 0 0 0 0 0 0 0 0 0 0 0
39032 - 0 0 0 0 0 0 0 0 0 0 0 0
39033 - 0 0 0 0 0 0 0 0 0 0 0 0
39034 - 0 0 0 0 0 0 6 6 6 26 26 26
39035 - 62 62 62 106 106 106 74 54 14 185 133 11
39036 -210 162 10 121 92 8 6 6 6 62 62 62
39037 -238 238 238 253 253 253 253 253 253 253 253 253
39038 -253 253 253 253 253 253 253 253 253 253 253 253
39039 -253 253 253 253 253 253 231 231 231 246 246 246
39040 -253 253 253 253 253 253 253 253 253 253 253 253
39041 -253 253 253 253 253 253 253 253 253 253 253 253
39042 -253 253 253 253 253 253 253 253 253 253 253 253
39043 -253 253 253 253 253 253 253 253 253 253 253 253
39044 -253 253 253 253 253 253 158 158 158 18 18 18
39045 - 14 14 14 2 2 6 2 2 6 2 2 6
39046 - 6 6 6 18 18 18 66 66 66 38 38 38
39047 - 6 6 6 94 94 94 50 50 50 18 18 18
39048 - 6 6 6 0 0 0 0 0 0 0 0 0
39049 - 0 0 0 0 0 0 0 0 0 0 0 0
39050 - 0 0 0 0 0 0 0 0 0 0 0 0
39051 - 0 0 0 0 0 0 0 0 0 0 0 0
39052 - 0 0 0 0 0 0 0 0 0 0 0 0
39053 - 0 0 0 0 0 0 0 0 0 6 6 6
39054 - 10 10 10 10 10 10 18 18 18 38 38 38
39055 - 78 78 78 142 134 106 216 158 10 242 186 14
39056 -246 190 14 246 190 14 156 118 10 10 10 10
39057 - 90 90 90 238 238 238 253 253 253 253 253 253
39058 -253 253 253 253 253 253 253 253 253 253 253 253
39059 -253 253 253 253 253 253 231 231 231 250 250 250
39060 -253 253 253 253 253 253 253 253 253 253 253 253
39061 -253 253 253 253 253 253 253 253 253 253 253 253
39062 -253 253 253 253 253 253 253 253 253 253 253 253
39063 -253 253 253 253 253 253 253 253 253 246 230 190
39064 -238 204 91 238 204 91 181 142 44 37 26 9
39065 - 2 2 6 2 2 6 2 2 6 2 2 6
39066 - 2 2 6 2 2 6 38 38 38 46 46 46
39067 - 26 26 26 106 106 106 54 54 54 18 18 18
39068 - 6 6 6 0 0 0 0 0 0 0 0 0
39069 - 0 0 0 0 0 0 0 0 0 0 0 0
39070 - 0 0 0 0 0 0 0 0 0 0 0 0
39071 - 0 0 0 0 0 0 0 0 0 0 0 0
39072 - 0 0 0 0 0 0 0 0 0 0 0 0
39073 - 0 0 0 6 6 6 14 14 14 22 22 22
39074 - 30 30 30 38 38 38 50 50 50 70 70 70
39075 -106 106 106 190 142 34 226 170 11 242 186 14
39076 -246 190 14 246 190 14 246 190 14 154 114 10
39077 - 6 6 6 74 74 74 226 226 226 253 253 253
39078 -253 253 253 253 253 253 253 253 253 253 253 253
39079 -253 253 253 253 253 253 231 231 231 250 250 250
39080 -253 253 253 253 253 253 253 253 253 253 253 253
39081 -253 253 253 253 253 253 253 253 253 253 253 253
39082 -253 253 253 253 253 253 253 253 253 253 253 253
39083 -253 253 253 253 253 253 253 253 253 228 184 62
39084 -241 196 14 241 208 19 232 195 16 38 30 10
39085 - 2 2 6 2 2 6 2 2 6 2 2 6
39086 - 2 2 6 6 6 6 30 30 30 26 26 26
39087 -203 166 17 154 142 90 66 66 66 26 26 26
39088 - 6 6 6 0 0 0 0 0 0 0 0 0
39089 - 0 0 0 0 0 0 0 0 0 0 0 0
39090 - 0 0 0 0 0 0 0 0 0 0 0 0
39091 - 0 0 0 0 0 0 0 0 0 0 0 0
39092 - 0 0 0 0 0 0 0 0 0 0 0 0
39093 - 6 6 6 18 18 18 38 38 38 58 58 58
39094 - 78 78 78 86 86 86 101 101 101 123 123 123
39095 -175 146 61 210 150 10 234 174 13 246 186 14
39096 -246 190 14 246 190 14 246 190 14 238 190 10
39097 -102 78 10 2 2 6 46 46 46 198 198 198
39098 -253 253 253 253 253 253 253 253 253 253 253 253
39099 -253 253 253 253 253 253 234 234 234 242 242 242
39100 -253 253 253 253 253 253 253 253 253 253 253 253
39101 -253 253 253 253 253 253 253 253 253 253 253 253
39102 -253 253 253 253 253 253 253 253 253 253 253 253
39103 -253 253 253 253 253 253 253 253 253 224 178 62
39104 -242 186 14 241 196 14 210 166 10 22 18 6
39105 - 2 2 6 2 2 6 2 2 6 2 2 6
39106 - 2 2 6 2 2 6 6 6 6 121 92 8
39107 -238 202 15 232 195 16 82 82 82 34 34 34
39108 - 10 10 10 0 0 0 0 0 0 0 0 0
39109 - 0 0 0 0 0 0 0 0 0 0 0 0
39110 - 0 0 0 0 0 0 0 0 0 0 0 0
39111 - 0 0 0 0 0 0 0 0 0 0 0 0
39112 - 0 0 0 0 0 0 0 0 0 0 0 0
39113 - 14 14 14 38 38 38 70 70 70 154 122 46
39114 -190 142 34 200 144 11 197 138 11 197 138 11
39115 -213 154 11 226 170 11 242 186 14 246 190 14
39116 -246 190 14 246 190 14 246 190 14 246 190 14
39117 -225 175 15 46 32 6 2 2 6 22 22 22
39118 -158 158 158 250 250 250 253 253 253 253 253 253
39119 -253 253 253 253 253 253 253 253 253 253 253 253
39120 -253 253 253 253 253 253 253 253 253 253 253 253
39121 -253 253 253 253 253 253 253 253 253 253 253 253
39122 -253 253 253 253 253 253 253 253 253 253 253 253
39123 -253 253 253 250 250 250 242 242 242 224 178 62
39124 -239 182 13 236 186 11 213 154 11 46 32 6
39125 - 2 2 6 2 2 6 2 2 6 2 2 6
39126 - 2 2 6 2 2 6 61 42 6 225 175 15
39127 -238 190 10 236 186 11 112 100 78 42 42 42
39128 - 14 14 14 0 0 0 0 0 0 0 0 0
39129 - 0 0 0 0 0 0 0 0 0 0 0 0
39130 - 0 0 0 0 0 0 0 0 0 0 0 0
39131 - 0 0 0 0 0 0 0 0 0 0 0 0
39132 - 0 0 0 0 0 0 0 0 0 6 6 6
39133 - 22 22 22 54 54 54 154 122 46 213 154 11
39134 -226 170 11 230 174 11 226 170 11 226 170 11
39135 -236 178 12 242 186 14 246 190 14 246 190 14
39136 -246 190 14 246 190 14 246 190 14 246 190 14
39137 -241 196 14 184 144 12 10 10 10 2 2 6
39138 - 6 6 6 116 116 116 242 242 242 253 253 253
39139 -253 253 253 253 253 253 253 253 253 253 253 253
39140 -253 253 253 253 253 253 253 253 253 253 253 253
39141 -253 253 253 253 253 253 253 253 253 253 253 253
39142 -253 253 253 253 253 253 253 253 253 253 253 253
39143 -253 253 253 231 231 231 198 198 198 214 170 54
39144 -236 178 12 236 178 12 210 150 10 137 92 6
39145 - 18 14 6 2 2 6 2 2 6 2 2 6
39146 - 6 6 6 70 47 6 200 144 11 236 178 12
39147 -239 182 13 239 182 13 124 112 88 58 58 58
39148 - 22 22 22 6 6 6 0 0 0 0 0 0
39149 - 0 0 0 0 0 0 0 0 0 0 0 0
39150 - 0 0 0 0 0 0 0 0 0 0 0 0
39151 - 0 0 0 0 0 0 0 0 0 0 0 0
39152 - 0 0 0 0 0 0 0 0 0 10 10 10
39153 - 30 30 30 70 70 70 180 133 36 226 170 11
39154 -239 182 13 242 186 14 242 186 14 246 186 14
39155 -246 190 14 246 190 14 246 190 14 246 190 14
39156 -246 190 14 246 190 14 246 190 14 246 190 14
39157 -246 190 14 232 195 16 98 70 6 2 2 6
39158 - 2 2 6 2 2 6 66 66 66 221 221 221
39159 -253 253 253 253 253 253 253 253 253 253 253 253
39160 -253 253 253 253 253 253 253 253 253 253 253 253
39161 -253 253 253 253 253 253 253 253 253 253 253 253
39162 -253 253 253 253 253 253 253 253 253 253 253 253
39163 -253 253 253 206 206 206 198 198 198 214 166 58
39164 -230 174 11 230 174 11 216 158 10 192 133 9
39165 -163 110 8 116 81 8 102 78 10 116 81 8
39166 -167 114 7 197 138 11 226 170 11 239 182 13
39167 -242 186 14 242 186 14 162 146 94 78 78 78
39168 - 34 34 34 14 14 14 6 6 6 0 0 0
39169 - 0 0 0 0 0 0 0 0 0 0 0 0
39170 - 0 0 0 0 0 0 0 0 0 0 0 0
39171 - 0 0 0 0 0 0 0 0 0 0 0 0
39172 - 0 0 0 0 0 0 0 0 0 6 6 6
39173 - 30 30 30 78 78 78 190 142 34 226 170 11
39174 -239 182 13 246 190 14 246 190 14 246 190 14
39175 -246 190 14 246 190 14 246 190 14 246 190 14
39176 -246 190 14 246 190 14 246 190 14 246 190 14
39177 -246 190 14 241 196 14 203 166 17 22 18 6
39178 - 2 2 6 2 2 6 2 2 6 38 38 38
39179 -218 218 218 253 253 253 253 253 253 253 253 253
39180 -253 253 253 253 253 253 253 253 253 253 253 253
39181 -253 253 253 253 253 253 253 253 253 253 253 253
39182 -253 253 253 253 253 253 253 253 253 253 253 253
39183 -250 250 250 206 206 206 198 198 198 202 162 69
39184 -226 170 11 236 178 12 224 166 10 210 150 10
39185 -200 144 11 197 138 11 192 133 9 197 138 11
39186 -210 150 10 226 170 11 242 186 14 246 190 14
39187 -246 190 14 246 186 14 225 175 15 124 112 88
39188 - 62 62 62 30 30 30 14 14 14 6 6 6
39189 - 0 0 0 0 0 0 0 0 0 0 0 0
39190 - 0 0 0 0 0 0 0 0 0 0 0 0
39191 - 0 0 0 0 0 0 0 0 0 0 0 0
39192 - 0 0 0 0 0 0 0 0 0 10 10 10
39193 - 30 30 30 78 78 78 174 135 50 224 166 10
39194 -239 182 13 246 190 14 246 190 14 246 190 14
39195 -246 190 14 246 190 14 246 190 14 246 190 14
39196 -246 190 14 246 190 14 246 190 14 246 190 14
39197 -246 190 14 246 190 14 241 196 14 139 102 15
39198 - 2 2 6 2 2 6 2 2 6 2 2 6
39199 - 78 78 78 250 250 250 253 253 253 253 253 253
39200 -253 253 253 253 253 253 253 253 253 253 253 253
39201 -253 253 253 253 253 253 253 253 253 253 253 253
39202 -253 253 253 253 253 253 253 253 253 253 253 253
39203 -250 250 250 214 214 214 198 198 198 190 150 46
39204 -219 162 10 236 178 12 234 174 13 224 166 10
39205 -216 158 10 213 154 11 213 154 11 216 158 10
39206 -226 170 11 239 182 13 246 190 14 246 190 14
39207 -246 190 14 246 190 14 242 186 14 206 162 42
39208 -101 101 101 58 58 58 30 30 30 14 14 14
39209 - 6 6 6 0 0 0 0 0 0 0 0 0
39210 - 0 0 0 0 0 0 0 0 0 0 0 0
39211 - 0 0 0 0 0 0 0 0 0 0 0 0
39212 - 0 0 0 0 0 0 0 0 0 10 10 10
39213 - 30 30 30 74 74 74 174 135 50 216 158 10
39214 -236 178 12 246 190 14 246 190 14 246 190 14
39215 -246 190 14 246 190 14 246 190 14 246 190 14
39216 -246 190 14 246 190 14 246 190 14 246 190 14
39217 -246 190 14 246 190 14 241 196 14 226 184 13
39218 - 61 42 6 2 2 6 2 2 6 2 2 6
39219 - 22 22 22 238 238 238 253 253 253 253 253 253
39220 -253 253 253 253 253 253 253 253 253 253 253 253
39221 -253 253 253 253 253 253 253 253 253 253 253 253
39222 -253 253 253 253 253 253 253 253 253 253 253 253
39223 -253 253 253 226 226 226 187 187 187 180 133 36
39224 -216 158 10 236 178 12 239 182 13 236 178 12
39225 -230 174 11 226 170 11 226 170 11 230 174 11
39226 -236 178 12 242 186 14 246 190 14 246 190 14
39227 -246 190 14 246 190 14 246 186 14 239 182 13
39228 -206 162 42 106 106 106 66 66 66 34 34 34
39229 - 14 14 14 6 6 6 0 0 0 0 0 0
39230 - 0 0 0 0 0 0 0 0 0 0 0 0
39231 - 0 0 0 0 0 0 0 0 0 0 0 0
39232 - 0 0 0 0 0 0 0 0 0 6 6 6
39233 - 26 26 26 70 70 70 163 133 67 213 154 11
39234 -236 178 12 246 190 14 246 190 14 246 190 14
39235 -246 190 14 246 190 14 246 190 14 246 190 14
39236 -246 190 14 246 190 14 246 190 14 246 190 14
39237 -246 190 14 246 190 14 246 190 14 241 196 14
39238 -190 146 13 18 14 6 2 2 6 2 2 6
39239 - 46 46 46 246 246 246 253 253 253 253 253 253
39240 -253 253 253 253 253 253 253 253 253 253 253 253
39241 -253 253 253 253 253 253 253 253 253 253 253 253
39242 -253 253 253 253 253 253 253 253 253 253 253 253
39243 -253 253 253 221 221 221 86 86 86 156 107 11
39244 -216 158 10 236 178 12 242 186 14 246 186 14
39245 -242 186 14 239 182 13 239 182 13 242 186 14
39246 -242 186 14 246 186 14 246 190 14 246 190 14
39247 -246 190 14 246 190 14 246 190 14 246 190 14
39248 -242 186 14 225 175 15 142 122 72 66 66 66
39249 - 30 30 30 10 10 10 0 0 0 0 0 0
39250 - 0 0 0 0 0 0 0 0 0 0 0 0
39251 - 0 0 0 0 0 0 0 0 0 0 0 0
39252 - 0 0 0 0 0 0 0 0 0 6 6 6
39253 - 26 26 26 70 70 70 163 133 67 210 150 10
39254 -236 178 12 246 190 14 246 190 14 246 190 14
39255 -246 190 14 246 190 14 246 190 14 246 190 14
39256 -246 190 14 246 190 14 246 190 14 246 190 14
39257 -246 190 14 246 190 14 246 190 14 246 190 14
39258 -232 195 16 121 92 8 34 34 34 106 106 106
39259 -221 221 221 253 253 253 253 253 253 253 253 253
39260 -253 253 253 253 253 253 253 253 253 253 253 253
39261 -253 253 253 253 253 253 253 253 253 253 253 253
39262 -253 253 253 253 253 253 253 253 253 253 253 253
39263 -242 242 242 82 82 82 18 14 6 163 110 8
39264 -216 158 10 236 178 12 242 186 14 246 190 14
39265 -246 190 14 246 190 14 246 190 14 246 190 14
39266 -246 190 14 246 190 14 246 190 14 246 190 14
39267 -246 190 14 246 190 14 246 190 14 246 190 14
39268 -246 190 14 246 190 14 242 186 14 163 133 67
39269 - 46 46 46 18 18 18 6 6 6 0 0 0
39270 - 0 0 0 0 0 0 0 0 0 0 0 0
39271 - 0 0 0 0 0 0 0 0 0 0 0 0
39272 - 0 0 0 0 0 0 0 0 0 10 10 10
39273 - 30 30 30 78 78 78 163 133 67 210 150 10
39274 -236 178 12 246 186 14 246 190 14 246 190 14
39275 -246 190 14 246 190 14 246 190 14 246 190 14
39276 -246 190 14 246 190 14 246 190 14 246 190 14
39277 -246 190 14 246 190 14 246 190 14 246 190 14
39278 -241 196 14 215 174 15 190 178 144 253 253 253
39279 -253 253 253 253 253 253 253 253 253 253 253 253
39280 -253 253 253 253 253 253 253 253 253 253 253 253
39281 -253 253 253 253 253 253 253 253 253 253 253 253
39282 -253 253 253 253 253 253 253 253 253 218 218 218
39283 - 58 58 58 2 2 6 22 18 6 167 114 7
39284 -216 158 10 236 178 12 246 186 14 246 190 14
39285 -246 190 14 246 190 14 246 190 14 246 190 14
39286 -246 190 14 246 190 14 246 190 14 246 190 14
39287 -246 190 14 246 190 14 246 190 14 246 190 14
39288 -246 190 14 246 186 14 242 186 14 190 150 46
39289 - 54 54 54 22 22 22 6 6 6 0 0 0
39290 - 0 0 0 0 0 0 0 0 0 0 0 0
39291 - 0 0 0 0 0 0 0 0 0 0 0 0
39292 - 0 0 0 0 0 0 0 0 0 14 14 14
39293 - 38 38 38 86 86 86 180 133 36 213 154 11
39294 -236 178 12 246 186 14 246 190 14 246 190 14
39295 -246 190 14 246 190 14 246 190 14 246 190 14
39296 -246 190 14 246 190 14 246 190 14 246 190 14
39297 -246 190 14 246 190 14 246 190 14 246 190 14
39298 -246 190 14 232 195 16 190 146 13 214 214 214
39299 -253 253 253 253 253 253 253 253 253 253 253 253
39300 -253 253 253 253 253 253 253 253 253 253 253 253
39301 -253 253 253 253 253 253 253 253 253 253 253 253
39302 -253 253 253 250 250 250 170 170 170 26 26 26
39303 - 2 2 6 2 2 6 37 26 9 163 110 8
39304 -219 162 10 239 182 13 246 186 14 246 190 14
39305 -246 190 14 246 190 14 246 190 14 246 190 14
39306 -246 190 14 246 190 14 246 190 14 246 190 14
39307 -246 190 14 246 190 14 246 190 14 246 190 14
39308 -246 186 14 236 178 12 224 166 10 142 122 72
39309 - 46 46 46 18 18 18 6 6 6 0 0 0
39310 - 0 0 0 0 0 0 0 0 0 0 0 0
39311 - 0 0 0 0 0 0 0 0 0 0 0 0
39312 - 0 0 0 0 0 0 6 6 6 18 18 18
39313 - 50 50 50 109 106 95 192 133 9 224 166 10
39314 -242 186 14 246 190 14 246 190 14 246 190 14
39315 -246 190 14 246 190 14 246 190 14 246 190 14
39316 -246 190 14 246 190 14 246 190 14 246 190 14
39317 -246 190 14 246 190 14 246 190 14 246 190 14
39318 -242 186 14 226 184 13 210 162 10 142 110 46
39319 -226 226 226 253 253 253 253 253 253 253 253 253
39320 -253 253 253 253 253 253 253 253 253 253 253 253
39321 -253 253 253 253 253 253 253 253 253 253 253 253
39322 -198 198 198 66 66 66 2 2 6 2 2 6
39323 - 2 2 6 2 2 6 50 34 6 156 107 11
39324 -219 162 10 239 182 13 246 186 14 246 190 14
39325 -246 190 14 246 190 14 246 190 14 246 190 14
39326 -246 190 14 246 190 14 246 190 14 246 190 14
39327 -246 190 14 246 190 14 246 190 14 242 186 14
39328 -234 174 13 213 154 11 154 122 46 66 66 66
39329 - 30 30 30 10 10 10 0 0 0 0 0 0
39330 - 0 0 0 0 0 0 0 0 0 0 0 0
39331 - 0 0 0 0 0 0 0 0 0 0 0 0
39332 - 0 0 0 0 0 0 6 6 6 22 22 22
39333 - 58 58 58 154 121 60 206 145 10 234 174 13
39334 -242 186 14 246 186 14 246 190 14 246 190 14
39335 -246 190 14 246 190 14 246 190 14 246 190 14
39336 -246 190 14 246 190 14 246 190 14 246 190 14
39337 -246 190 14 246 190 14 246 190 14 246 190 14
39338 -246 186 14 236 178 12 210 162 10 163 110 8
39339 - 61 42 6 138 138 138 218 218 218 250 250 250
39340 -253 253 253 253 253 253 253 253 253 250 250 250
39341 -242 242 242 210 210 210 144 144 144 66 66 66
39342 - 6 6 6 2 2 6 2 2 6 2 2 6
39343 - 2 2 6 2 2 6 61 42 6 163 110 8
39344 -216 158 10 236 178 12 246 190 14 246 190 14
39345 -246 190 14 246 190 14 246 190 14 246 190 14
39346 -246 190 14 246 190 14 246 190 14 246 190 14
39347 -246 190 14 239 182 13 230 174 11 216 158 10
39348 -190 142 34 124 112 88 70 70 70 38 38 38
39349 - 18 18 18 6 6 6 0 0 0 0 0 0
39350 - 0 0 0 0 0 0 0 0 0 0 0 0
39351 - 0 0 0 0 0 0 0 0 0 0 0 0
39352 - 0 0 0 0 0 0 6 6 6 22 22 22
39353 - 62 62 62 168 124 44 206 145 10 224 166 10
39354 -236 178 12 239 182 13 242 186 14 242 186 14
39355 -246 186 14 246 190 14 246 190 14 246 190 14
39356 -246 190 14 246 190 14 246 190 14 246 190 14
39357 -246 190 14 246 190 14 246 190 14 246 190 14
39358 -246 190 14 236 178 12 216 158 10 175 118 6
39359 - 80 54 7 2 2 6 6 6 6 30 30 30
39360 - 54 54 54 62 62 62 50 50 50 38 38 38
39361 - 14 14 14 2 2 6 2 2 6 2 2 6
39362 - 2 2 6 2 2 6 2 2 6 2 2 6
39363 - 2 2 6 6 6 6 80 54 7 167 114 7
39364 -213 154 11 236 178 12 246 190 14 246 190 14
39365 -246 190 14 246 190 14 246 190 14 246 190 14
39366 -246 190 14 242 186 14 239 182 13 239 182 13
39367 -230 174 11 210 150 10 174 135 50 124 112 88
39368 - 82 82 82 54 54 54 34 34 34 18 18 18
39369 - 6 6 6 0 0 0 0 0 0 0 0 0
39370 - 0 0 0 0 0 0 0 0 0 0 0 0
39371 - 0 0 0 0 0 0 0 0 0 0 0 0
39372 - 0 0 0 0 0 0 6 6 6 18 18 18
39373 - 50 50 50 158 118 36 192 133 9 200 144 11
39374 -216 158 10 219 162 10 224 166 10 226 170 11
39375 -230 174 11 236 178 12 239 182 13 239 182 13
39376 -242 186 14 246 186 14 246 190 14 246 190 14
39377 -246 190 14 246 190 14 246 190 14 246 190 14
39378 -246 186 14 230 174 11 210 150 10 163 110 8
39379 -104 69 6 10 10 10 2 2 6 2 2 6
39380 - 2 2 6 2 2 6 2 2 6 2 2 6
39381 - 2 2 6 2 2 6 2 2 6 2 2 6
39382 - 2 2 6 2 2 6 2 2 6 2 2 6
39383 - 2 2 6 6 6 6 91 60 6 167 114 7
39384 -206 145 10 230 174 11 242 186 14 246 190 14
39385 -246 190 14 246 190 14 246 186 14 242 186 14
39386 -239 182 13 230 174 11 224 166 10 213 154 11
39387 -180 133 36 124 112 88 86 86 86 58 58 58
39388 - 38 38 38 22 22 22 10 10 10 6 6 6
39389 - 0 0 0 0 0 0 0 0 0 0 0 0
39390 - 0 0 0 0 0 0 0 0 0 0 0 0
39391 - 0 0 0 0 0 0 0 0 0 0 0 0
39392 - 0 0 0 0 0 0 0 0 0 14 14 14
39393 - 34 34 34 70 70 70 138 110 50 158 118 36
39394 -167 114 7 180 123 7 192 133 9 197 138 11
39395 -200 144 11 206 145 10 213 154 11 219 162 10
39396 -224 166 10 230 174 11 239 182 13 242 186 14
39397 -246 186 14 246 186 14 246 186 14 246 186 14
39398 -239 182 13 216 158 10 185 133 11 152 99 6
39399 -104 69 6 18 14 6 2 2 6 2 2 6
39400 - 2 2 6 2 2 6 2 2 6 2 2 6
39401 - 2 2 6 2 2 6 2 2 6 2 2 6
39402 - 2 2 6 2 2 6 2 2 6 2 2 6
39403 - 2 2 6 6 6 6 80 54 7 152 99 6
39404 -192 133 9 219 162 10 236 178 12 239 182 13
39405 -246 186 14 242 186 14 239 182 13 236 178 12
39406 -224 166 10 206 145 10 192 133 9 154 121 60
39407 - 94 94 94 62 62 62 42 42 42 22 22 22
39408 - 14 14 14 6 6 6 0 0 0 0 0 0
39409 - 0 0 0 0 0 0 0 0 0 0 0 0
39410 - 0 0 0 0 0 0 0 0 0 0 0 0
39411 - 0 0 0 0 0 0 0 0 0 0 0 0
39412 - 0 0 0 0 0 0 0 0 0 6 6 6
39413 - 18 18 18 34 34 34 58 58 58 78 78 78
39414 -101 98 89 124 112 88 142 110 46 156 107 11
39415 -163 110 8 167 114 7 175 118 6 180 123 7
39416 -185 133 11 197 138 11 210 150 10 219 162 10
39417 -226 170 11 236 178 12 236 178 12 234 174 13
39418 -219 162 10 197 138 11 163 110 8 130 83 6
39419 - 91 60 6 10 10 10 2 2 6 2 2 6
39420 - 18 18 18 38 38 38 38 38 38 38 38 38
39421 - 38 38 38 38 38 38 38 38 38 38 38 38
39422 - 38 38 38 38 38 38 26 26 26 2 2 6
39423 - 2 2 6 6 6 6 70 47 6 137 92 6
39424 -175 118 6 200 144 11 219 162 10 230 174 11
39425 -234 174 13 230 174 11 219 162 10 210 150 10
39426 -192 133 9 163 110 8 124 112 88 82 82 82
39427 - 50 50 50 30 30 30 14 14 14 6 6 6
39428 - 0 0 0 0 0 0 0 0 0 0 0 0
39429 - 0 0 0 0 0 0 0 0 0 0 0 0
39430 - 0 0 0 0 0 0 0 0 0 0 0 0
39431 - 0 0 0 0 0 0 0 0 0 0 0 0
39432 - 0 0 0 0 0 0 0 0 0 0 0 0
39433 - 6 6 6 14 14 14 22 22 22 34 34 34
39434 - 42 42 42 58 58 58 74 74 74 86 86 86
39435 -101 98 89 122 102 70 130 98 46 121 87 25
39436 -137 92 6 152 99 6 163 110 8 180 123 7
39437 -185 133 11 197 138 11 206 145 10 200 144 11
39438 -180 123 7 156 107 11 130 83 6 104 69 6
39439 - 50 34 6 54 54 54 110 110 110 101 98 89
39440 - 86 86 86 82 82 82 78 78 78 78 78 78
39441 - 78 78 78 78 78 78 78 78 78 78 78 78
39442 - 78 78 78 82 82 82 86 86 86 94 94 94
39443 -106 106 106 101 101 101 86 66 34 124 80 6
39444 -156 107 11 180 123 7 192 133 9 200 144 11
39445 -206 145 10 200 144 11 192 133 9 175 118 6
39446 -139 102 15 109 106 95 70 70 70 42 42 42
39447 - 22 22 22 10 10 10 0 0 0 0 0 0
39448 - 0 0 0 0 0 0 0 0 0 0 0 0
39449 - 0 0 0 0 0 0 0 0 0 0 0 0
39450 - 0 0 0 0 0 0 0 0 0 0 0 0
39451 - 0 0 0 0 0 0 0 0 0 0 0 0
39452 - 0 0 0 0 0 0 0 0 0 0 0 0
39453 - 0 0 0 0 0 0 6 6 6 10 10 10
39454 - 14 14 14 22 22 22 30 30 30 38 38 38
39455 - 50 50 50 62 62 62 74 74 74 90 90 90
39456 -101 98 89 112 100 78 121 87 25 124 80 6
39457 -137 92 6 152 99 6 152 99 6 152 99 6
39458 -138 86 6 124 80 6 98 70 6 86 66 30
39459 -101 98 89 82 82 82 58 58 58 46 46 46
39460 - 38 38 38 34 34 34 34 34 34 34 34 34
39461 - 34 34 34 34 34 34 34 34 34 34 34 34
39462 - 34 34 34 34 34 34 38 38 38 42 42 42
39463 - 54 54 54 82 82 82 94 86 76 91 60 6
39464 -134 86 6 156 107 11 167 114 7 175 118 6
39465 -175 118 6 167 114 7 152 99 6 121 87 25
39466 -101 98 89 62 62 62 34 34 34 18 18 18
39467 - 6 6 6 0 0 0 0 0 0 0 0 0
39468 - 0 0 0 0 0 0 0 0 0 0 0 0
39469 - 0 0 0 0 0 0 0 0 0 0 0 0
39470 - 0 0 0 0 0 0 0 0 0 0 0 0
39471 - 0 0 0 0 0 0 0 0 0 0 0 0
39472 - 0 0 0 0 0 0 0 0 0 0 0 0
39473 - 0 0 0 0 0 0 0 0 0 0 0 0
39474 - 0 0 0 6 6 6 6 6 6 10 10 10
39475 - 18 18 18 22 22 22 30 30 30 42 42 42
39476 - 50 50 50 66 66 66 86 86 86 101 98 89
39477 -106 86 58 98 70 6 104 69 6 104 69 6
39478 -104 69 6 91 60 6 82 62 34 90 90 90
39479 - 62 62 62 38 38 38 22 22 22 14 14 14
39480 - 10 10 10 10 10 10 10 10 10 10 10 10
39481 - 10 10 10 10 10 10 6 6 6 10 10 10
39482 - 10 10 10 10 10 10 10 10 10 14 14 14
39483 - 22 22 22 42 42 42 70 70 70 89 81 66
39484 - 80 54 7 104 69 6 124 80 6 137 92 6
39485 -134 86 6 116 81 8 100 82 52 86 86 86
39486 - 58 58 58 30 30 30 14 14 14 6 6 6
39487 - 0 0 0 0 0 0 0 0 0 0 0 0
39488 - 0 0 0 0 0 0 0 0 0 0 0 0
39489 - 0 0 0 0 0 0 0 0 0 0 0 0
39490 - 0 0 0 0 0 0 0 0 0 0 0 0
39491 - 0 0 0 0 0 0 0 0 0 0 0 0
39492 - 0 0 0 0 0 0 0 0 0 0 0 0
39493 - 0 0 0 0 0 0 0 0 0 0 0 0
39494 - 0 0 0 0 0 0 0 0 0 0 0 0
39495 - 0 0 0 6 6 6 10 10 10 14 14 14
39496 - 18 18 18 26 26 26 38 38 38 54 54 54
39497 - 70 70 70 86 86 86 94 86 76 89 81 66
39498 - 89 81 66 86 86 86 74 74 74 50 50 50
39499 - 30 30 30 14 14 14 6 6 6 0 0 0
39500 - 0 0 0 0 0 0 0 0 0 0 0 0
39501 - 0 0 0 0 0 0 0 0 0 0 0 0
39502 - 0 0 0 0 0 0 0 0 0 0 0 0
39503 - 6 6 6 18 18 18 34 34 34 58 58 58
39504 - 82 82 82 89 81 66 89 81 66 89 81 66
39505 - 94 86 66 94 86 76 74 74 74 50 50 50
39506 - 26 26 26 14 14 14 6 6 6 0 0 0
39507 - 0 0 0 0 0 0 0 0 0 0 0 0
39508 - 0 0 0 0 0 0 0 0 0 0 0 0
39509 - 0 0 0 0 0 0 0 0 0 0 0 0
39510 - 0 0 0 0 0 0 0 0 0 0 0 0
39511 - 0 0 0 0 0 0 0 0 0 0 0 0
39512 - 0 0 0 0 0 0 0 0 0 0 0 0
39513 - 0 0 0 0 0 0 0 0 0 0 0 0
39514 - 0 0 0 0 0 0 0 0 0 0 0 0
39515 - 0 0 0 0 0 0 0 0 0 0 0 0
39516 - 6 6 6 6 6 6 14 14 14 18 18 18
39517 - 30 30 30 38 38 38 46 46 46 54 54 54
39518 - 50 50 50 42 42 42 30 30 30 18 18 18
39519 - 10 10 10 0 0 0 0 0 0 0 0 0
39520 - 0 0 0 0 0 0 0 0 0 0 0 0
39521 - 0 0 0 0 0 0 0 0 0 0 0 0
39522 - 0 0 0 0 0 0 0 0 0 0 0 0
39523 - 0 0 0 6 6 6 14 14 14 26 26 26
39524 - 38 38 38 50 50 50 58 58 58 58 58 58
39525 - 54 54 54 42 42 42 30 30 30 18 18 18
39526 - 10 10 10 0 0 0 0 0 0 0 0 0
39527 - 0 0 0 0 0 0 0 0 0 0 0 0
39528 - 0 0 0 0 0 0 0 0 0 0 0 0
39529 - 0 0 0 0 0 0 0 0 0 0 0 0
39530 - 0 0 0 0 0 0 0 0 0 0 0 0
39531 - 0 0 0 0 0 0 0 0 0 0 0 0
39532 - 0 0 0 0 0 0 0 0 0 0 0 0
39533 - 0 0 0 0 0 0 0 0 0 0 0 0
39534 - 0 0 0 0 0 0 0 0 0 0 0 0
39535 - 0 0 0 0 0 0 0 0 0 0 0 0
39536 - 0 0 0 0 0 0 0 0 0 6 6 6
39537 - 6 6 6 10 10 10 14 14 14 18 18 18
39538 - 18 18 18 14 14 14 10 10 10 6 6 6
39539 - 0 0 0 0 0 0 0 0 0 0 0 0
39540 - 0 0 0 0 0 0 0 0 0 0 0 0
39541 - 0 0 0 0 0 0 0 0 0 0 0 0
39542 - 0 0 0 0 0 0 0 0 0 0 0 0
39543 - 0 0 0 0 0 0 0 0 0 6 6 6
39544 - 14 14 14 18 18 18 22 22 22 22 22 22
39545 - 18 18 18 14 14 14 10 10 10 6 6 6
39546 - 0 0 0 0 0 0 0 0 0 0 0 0
39547 - 0 0 0 0 0 0 0 0 0 0 0 0
39548 - 0 0 0 0 0 0 0 0 0 0 0 0
39549 - 0 0 0 0 0 0 0 0 0 0 0 0
39550 - 0 0 0 0 0 0 0 0 0 0 0 0
39551 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39552 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39553 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39554 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39555 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39556 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39557 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39558 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39559 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39560 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39561 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39562 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39563 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39564 +4 4 4 4 4 4
39565 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39566 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39567 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39568 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39569 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39570 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39571 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39572 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39573 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39574 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39575 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39576 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39577 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39578 +4 4 4 4 4 4
39579 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39580 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39581 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39582 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39583 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39584 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39585 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39586 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39587 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39588 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39589 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39590 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39591 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39592 +4 4 4 4 4 4
39593 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39594 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39595 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39596 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39597 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39598 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39599 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39600 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39601 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39602 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39603 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39604 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39605 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39606 +4 4 4 4 4 4
39607 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39608 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39609 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39610 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39611 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39612 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39613 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39614 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39615 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39616 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39617 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39618 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39619 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39620 +4 4 4 4 4 4
39621 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39622 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39623 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39624 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39625 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39626 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39627 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39628 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39629 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39630 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39631 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39632 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39633 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39634 +4 4 4 4 4 4
39635 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39636 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39637 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39638 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39639 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
39640 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
39641 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39642 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39643 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39644 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
39645 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39646 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
39647 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39648 +4 4 4 4 4 4
39649 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39650 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39651 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39652 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39653 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
39654 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
39655 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39656 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39657 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39658 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
39659 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
39660 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
39661 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39662 +4 4 4 4 4 4
39663 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39664 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39665 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39666 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39667 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
39668 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
39669 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39670 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39671 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39672 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
39673 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
39674 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
39675 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
39676 +4 4 4 4 4 4
39677 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39678 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39679 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39680 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
39681 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
39682 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
39683 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
39684 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39685 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
39686 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
39687 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
39688 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
39689 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
39690 +4 4 4 4 4 4
39691 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39692 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39693 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39694 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
39695 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
39696 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
39697 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
39698 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39699 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
39700 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
39701 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
39702 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
39703 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
39704 +4 4 4 4 4 4
39705 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39706 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39707 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39708 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
39709 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
39710 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
39711 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
39712 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39713 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
39714 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
39715 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
39716 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
39717 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
39718 +4 4 4 4 4 4
39719 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39720 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39721 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
39722 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
39723 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
39724 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
39725 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
39726 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
39727 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
39728 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
39729 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
39730 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
39731 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
39732 +4 4 4 4 4 4
39733 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39734 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39735 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
39736 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
39737 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
39738 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
39739 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
39740 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
39741 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
39742 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
39743 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
39744 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
39745 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
39746 +4 4 4 4 4 4
39747 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39748 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39749 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
39750 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
39751 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
39752 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
39753 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
39754 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
39755 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
39756 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
39757 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
39758 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
39759 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39760 +4 4 4 4 4 4
39761 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39762 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39763 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
39764 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
39765 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
39766 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
39767 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
39768 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
39769 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
39770 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
39771 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
39772 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
39773 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
39774 +4 4 4 4 4 4
39775 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39776 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
39777 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
39778 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
39779 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
39780 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
39781 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
39782 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
39783 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
39784 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
39785 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
39786 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
39787 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
39788 +4 4 4 4 4 4
39789 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39790 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
39791 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
39792 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
39793 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
39794 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
39795 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
39796 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
39797 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
39798 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
39799 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
39800 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
39801 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
39802 +0 0 0 4 4 4
39803 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
39804 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
39805 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
39806 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
39807 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
39808 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
39809 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
39810 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
39811 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
39812 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
39813 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
39814 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
39815 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
39816 +2 0 0 0 0 0
39817 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
39818 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
39819 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
39820 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
39821 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
39822 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
39823 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
39824 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
39825 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
39826 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
39827 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
39828 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
39829 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
39830 +37 38 37 0 0 0
39831 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
39832 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
39833 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
39834 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
39835 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
39836 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
39837 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
39838 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
39839 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
39840 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
39841 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
39842 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
39843 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
39844 +85 115 134 4 0 0
39845 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
39846 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
39847 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
39848 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
39849 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
39850 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
39851 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
39852 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
39853 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
39854 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
39855 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
39856 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
39857 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
39858 +60 73 81 4 0 0
39859 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
39860 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
39861 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
39862 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
39863 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
39864 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
39865 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
39866 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
39867 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
39868 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
39869 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
39870 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
39871 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
39872 +16 19 21 4 0 0
39873 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
39874 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
39875 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
39876 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
39877 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
39878 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
39879 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
39880 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
39881 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
39882 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
39883 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
39884 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
39885 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
39886 +4 0 0 4 3 3
39887 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
39888 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
39889 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
39890 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
39891 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
39892 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
39893 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
39894 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
39895 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
39896 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
39897 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
39898 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
39899 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
39900 +3 2 2 4 4 4
39901 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
39902 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
39903 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
39904 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
39905 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
39906 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
39907 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
39908 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
39909 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
39910 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
39911 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
39912 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
39913 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
39914 +4 4 4 4 4 4
39915 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
39916 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
39917 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
39918 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
39919 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
39920 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
39921 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
39922 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
39923 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
39924 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
39925 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
39926 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
39927 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
39928 +4 4 4 4 4 4
39929 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
39930 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
39931 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
39932 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
39933 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
39934 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
39935 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
39936 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
39937 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
39938 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
39939 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
39940 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
39941 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
39942 +5 5 5 5 5 5
39943 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
39944 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
39945 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
39946 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
39947 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
39948 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
39949 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
39950 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
39951 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
39952 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
39953 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
39954 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
39955 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
39956 +5 5 5 4 4 4
39957 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
39958 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
39959 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
39960 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
39961 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
39962 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
39963 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
39964 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
39965 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
39966 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
39967 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
39968 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
39969 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39970 +4 4 4 4 4 4
39971 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
39972 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
39973 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
39974 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
39975 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
39976 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
39977 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
39978 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
39979 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
39980 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
39981 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
39982 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
39983 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39984 +4 4 4 4 4 4
39985 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
39986 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
39987 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
39988 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
39989 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
39990 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
39991 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
39992 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
39993 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
39994 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
39995 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
39996 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39997 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39998 +4 4 4 4 4 4
39999 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
40000 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
40001 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
40002 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
40003 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40004 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40005 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40006 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
40007 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
40008 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
40009 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
40010 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40011 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40012 +4 4 4 4 4 4
40013 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
40014 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
40015 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
40016 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
40017 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40018 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
40019 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40020 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
40021 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
40022 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
40023 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40024 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40025 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40026 +4 4 4 4 4 4
40027 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
40028 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
40029 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
40030 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
40031 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40032 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
40033 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
40034 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
40035 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
40036 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
40037 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
40038 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40039 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40040 +4 4 4 4 4 4
40041 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
40042 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
40043 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
40044 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
40045 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40046 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
40047 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
40048 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
40049 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
40050 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
40051 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
40052 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40053 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40054 +4 4 4 4 4 4
40055 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
40056 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
40057 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
40058 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40059 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
40060 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
40061 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
40062 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
40063 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
40064 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
40065 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40066 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40067 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40068 +4 4 4 4 4 4
40069 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
40070 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
40071 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
40072 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40073 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40074 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
40075 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
40076 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
40077 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
40078 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
40079 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40080 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40081 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40082 +4 4 4 4 4 4
40083 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
40084 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
40085 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40086 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40087 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40088 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
40089 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
40090 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
40091 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
40092 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
40093 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40094 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40095 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40096 +4 4 4 4 4 4
40097 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
40098 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
40099 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40100 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40101 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40102 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
40103 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
40104 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
40105 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40106 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40107 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40108 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40109 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40110 +4 4 4 4 4 4
40111 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40112 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
40113 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40114 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
40115 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
40116 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
40117 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
40118 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
40119 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40120 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40121 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40122 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40123 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40124 +4 4 4 4 4 4
40125 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40126 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
40127 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40128 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
40129 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40130 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
40131 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
40132 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
40133 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40134 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40135 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40136 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40137 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40138 +4 4 4 4 4 4
40139 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
40140 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
40141 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40142 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
40143 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
40144 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
40145 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
40146 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
40147 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40148 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40149 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40150 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40151 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40152 +4 4 4 4 4 4
40153 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
40154 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
40155 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40156 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
40157 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
40158 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
40159 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
40160 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
40161 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40162 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40163 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40164 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40165 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40166 +4 4 4 4 4 4
40167 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40168 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
40169 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40170 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
40171 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
40172 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
40173 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
40174 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
40175 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40176 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40177 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40178 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40179 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40180 +4 4 4 4 4 4
40181 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
40182 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
40183 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40184 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
40185 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
40186 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
40187 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
40188 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
40189 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
40190 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40191 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40192 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40193 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40194 +4 4 4 4 4 4
40195 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40196 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
40197 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
40198 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
40199 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
40200 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
40201 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
40202 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
40203 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40204 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40205 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40206 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40207 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40208 +4 4 4 4 4 4
40209 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40210 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
40211 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40212 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
40213 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
40214 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
40215 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
40216 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
40217 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40218 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40219 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40220 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40221 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40222 +4 4 4 4 4 4
40223 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40224 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
40225 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
40226 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
40227 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
40228 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
40229 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40230 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
40231 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40232 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40233 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40234 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40235 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40236 +4 4 4 4 4 4
40237 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40238 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
40239 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
40240 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40241 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
40242 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
40243 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40244 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
40245 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40246 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40247 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40248 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40249 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40250 +4 4 4 4 4 4
40251 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40252 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
40253 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
40254 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
40255 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
40256 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
40257 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
40258 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
40259 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
40260 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40261 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40262 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40263 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40264 +4 4 4 4 4 4
40265 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40266 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
40267 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
40268 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
40269 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
40270 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
40271 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
40272 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
40273 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
40274 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40275 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40276 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40277 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40278 +4 4 4 4 4 4
40279 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
40280 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
40281 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
40282 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
40283 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40284 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
40285 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
40286 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
40287 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
40288 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40289 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40290 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40291 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40292 +4 4 4 4 4 4
40293 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40294 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
40295 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
40296 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
40297 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
40298 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
40299 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
40300 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
40301 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
40302 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40303 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40304 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40305 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40306 +4 4 4 4 4 4
40307 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
40308 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
40309 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
40310 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
40311 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
40312 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
40313 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
40314 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
40315 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
40316 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
40317 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40318 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40319 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40320 +4 4 4 4 4 4
40321 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
40322 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40323 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
40324 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
40325 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
40326 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
40327 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
40328 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
40329 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
40330 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
40331 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40332 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40333 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40334 +4 4 4 4 4 4
40335 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
40336 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40337 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
40338 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
40339 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
40340 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
40341 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40342 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
40343 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
40344 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
40345 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40346 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40347 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40348 +4 4 4 4 4 4
40349 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
40350 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
40351 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
40352 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
40353 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
40354 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
40355 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
40356 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
40357 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
40358 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
40359 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40360 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40361 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40362 +4 4 4 4 4 4
40363 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
40364 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
40365 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
40366 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
40367 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
40368 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
40369 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
40370 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
40371 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
40372 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
40373 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40374 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40375 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40376 +4 4 4 4 4 4
40377 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40378 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
40379 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
40380 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
40381 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
40382 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
40383 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
40384 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
40385 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
40386 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
40387 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40388 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40389 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40390 +4 4 4 4 4 4
40391 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
40392 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
40393 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
40394 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
40395 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
40396 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
40397 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
40398 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
40399 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
40400 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40401 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40402 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40403 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40404 +4 4 4 4 4 4
40405 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
40406 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
40407 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
40408 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
40409 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
40410 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
40411 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
40412 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
40413 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40414 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40415 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40416 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40417 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40418 +4 4 4 4 4 4
40419 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
40420 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40421 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
40422 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
40423 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
40424 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
40425 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
40426 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
40427 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
40428 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40429 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40430 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40431 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40432 +4 4 4 4 4 4
40433 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
40434 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
40435 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
40436 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
40437 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
40438 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
40439 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
40440 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
40441 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
40442 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40443 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40444 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40445 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40446 +4 4 4 4 4 4
40447 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
40448 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
40449 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
40450 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
40451 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
40452 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
40453 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
40454 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
40455 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40456 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40457 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40458 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40459 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40460 +4 4 4 4 4 4
40461 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
40462 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
40463 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
40464 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
40465 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
40466 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
40467 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
40468 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
40469 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40470 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40471 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40472 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40473 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40474 +4 4 4 4 4 4
40475 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
40476 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
40477 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
40478 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
40479 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
40480 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
40481 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
40482 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
40483 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40484 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40485 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40486 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40487 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40488 +4 4 4 4 4 4
40489 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40490 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
40491 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40492 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
40493 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
40494 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
40495 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
40496 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
40497 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40498 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40499 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40500 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40501 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40502 +4 4 4 4 4 4
40503 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40504 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
40505 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
40506 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
40507 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
40508 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
40509 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
40510 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
40511 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40512 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40513 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40514 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40515 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40516 +4 4 4 4 4 4
40517 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40518 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40519 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
40520 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
40521 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
40522 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
40523 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
40524 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40525 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40526 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40527 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40528 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40529 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40530 +4 4 4 4 4 4
40531 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40532 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40533 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
40534 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
40535 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
40536 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
40537 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
40538 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40539 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40540 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40541 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40542 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40543 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40544 +4 4 4 4 4 4
40545 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40546 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40547 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40548 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40549 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
40550 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
40551 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
40552 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40553 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40554 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40555 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40556 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40557 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40558 +4 4 4 4 4 4
40559 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40560 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40561 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40562 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
40563 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
40564 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
40565 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
40566 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40567 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40568 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40569 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40570 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40571 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40572 +4 4 4 4 4 4
40573 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40574 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40575 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40576 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
40577 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40578 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
40579 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
40580 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40581 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40582 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40583 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40584 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40585 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40586 +4 4 4 4 4 4
40587 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40588 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40589 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40590 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
40591 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
40592 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
40593 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
40594 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40595 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40596 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40597 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40598 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40599 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40600 +4 4 4 4 4 4
40601 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40602 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40603 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40604 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40605 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
40606 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40607 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40608 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40609 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40610 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40611 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40612 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40613 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40614 +4 4 4 4 4 4
40615 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40616 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40617 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40618 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40619 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
40620 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
40621 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40622 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40623 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40624 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40625 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40626 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40627 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40628 +4 4 4 4 4 4
40629 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40630 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40631 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40632 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40633 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
40634 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
40635 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40636 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40637 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40638 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40639 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40640 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40641 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40642 +4 4 4 4 4 4
40643 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40644 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40645 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40646 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40647 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
40648 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
40649 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40650 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40651 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40652 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40653 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40654 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40655 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40656 +4 4 4 4 4 4
40657 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40658 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40659 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40660 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40661 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
40662 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
40663 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40664 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40665 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40666 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40667 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40668 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40669 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40670 +4 4 4 4 4 4
40671 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
40672 index 3473e75..c930142 100644
40673 --- a/drivers/video/udlfb.c
40674 +++ b/drivers/video/udlfb.c
40675 @@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
40676 dlfb_urb_completion(urb);
40677
40678 error:
40679 - atomic_add(bytes_sent, &dev->bytes_sent);
40680 - atomic_add(bytes_identical, &dev->bytes_identical);
40681 - atomic_add(width*height*2, &dev->bytes_rendered);
40682 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
40683 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
40684 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
40685 end_cycles = get_cycles();
40686 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
40687 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
40688 >> 10)), /* Kcycles */
40689 &dev->cpu_kcycles_used);
40690
40691 @@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
40692 dlfb_urb_completion(urb);
40693
40694 error:
40695 - atomic_add(bytes_sent, &dev->bytes_sent);
40696 - atomic_add(bytes_identical, &dev->bytes_identical);
40697 - atomic_add(bytes_rendered, &dev->bytes_rendered);
40698 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
40699 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
40700 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
40701 end_cycles = get_cycles();
40702 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
40703 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
40704 >> 10)), /* Kcycles */
40705 &dev->cpu_kcycles_used);
40706 }
40707 @@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
40708 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40709 struct dlfb_data *dev = fb_info->par;
40710 return snprintf(buf, PAGE_SIZE, "%u\n",
40711 - atomic_read(&dev->bytes_rendered));
40712 + atomic_read_unchecked(&dev->bytes_rendered));
40713 }
40714
40715 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
40716 @@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
40717 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40718 struct dlfb_data *dev = fb_info->par;
40719 return snprintf(buf, PAGE_SIZE, "%u\n",
40720 - atomic_read(&dev->bytes_identical));
40721 + atomic_read_unchecked(&dev->bytes_identical));
40722 }
40723
40724 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
40725 @@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
40726 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40727 struct dlfb_data *dev = fb_info->par;
40728 return snprintf(buf, PAGE_SIZE, "%u\n",
40729 - atomic_read(&dev->bytes_sent));
40730 + atomic_read_unchecked(&dev->bytes_sent));
40731 }
40732
40733 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
40734 @@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
40735 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40736 struct dlfb_data *dev = fb_info->par;
40737 return snprintf(buf, PAGE_SIZE, "%u\n",
40738 - atomic_read(&dev->cpu_kcycles_used));
40739 + atomic_read_unchecked(&dev->cpu_kcycles_used));
40740 }
40741
40742 static ssize_t edid_show(
40743 @@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
40744 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40745 struct dlfb_data *dev = fb_info->par;
40746
40747 - atomic_set(&dev->bytes_rendered, 0);
40748 - atomic_set(&dev->bytes_identical, 0);
40749 - atomic_set(&dev->bytes_sent, 0);
40750 - atomic_set(&dev->cpu_kcycles_used, 0);
40751 + atomic_set_unchecked(&dev->bytes_rendered, 0);
40752 + atomic_set_unchecked(&dev->bytes_identical, 0);
40753 + atomic_set_unchecked(&dev->bytes_sent, 0);
40754 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
40755
40756 return count;
40757 }
40758 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
40759 index 7f8472c..9842e87 100644
40760 --- a/drivers/video/uvesafb.c
40761 +++ b/drivers/video/uvesafb.c
40762 @@ -19,6 +19,7 @@
40763 #include <linux/io.h>
40764 #include <linux/mutex.h>
40765 #include <linux/slab.h>
40766 +#include <linux/moduleloader.h>
40767 #include <video/edid.h>
40768 #include <video/uvesafb.h>
40769 #ifdef CONFIG_X86
40770 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
40771 NULL,
40772 };
40773
40774 - return call_usermodehelper(v86d_path, argv, envp, 1);
40775 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
40776 }
40777
40778 /*
40779 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
40780 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
40781 par->pmi_setpal = par->ypan = 0;
40782 } else {
40783 +
40784 +#ifdef CONFIG_PAX_KERNEXEC
40785 +#ifdef CONFIG_MODULES
40786 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
40787 +#endif
40788 + if (!par->pmi_code) {
40789 + par->pmi_setpal = par->ypan = 0;
40790 + return 0;
40791 + }
40792 +#endif
40793 +
40794 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
40795 + task->t.regs.edi);
40796 +
40797 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40798 + pax_open_kernel();
40799 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
40800 + pax_close_kernel();
40801 +
40802 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
40803 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
40804 +#else
40805 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
40806 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
40807 +#endif
40808 +
40809 printk(KERN_INFO "uvesafb: protected mode interface info at "
40810 "%04x:%04x\n",
40811 (u16)task->t.regs.es, (u16)task->t.regs.edi);
40812 @@ -1821,6 +1844,11 @@ out:
40813 if (par->vbe_modes)
40814 kfree(par->vbe_modes);
40815
40816 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40817 + if (par->pmi_code)
40818 + module_free_exec(NULL, par->pmi_code);
40819 +#endif
40820 +
40821 framebuffer_release(info);
40822 return err;
40823 }
40824 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev)
40825 kfree(par->vbe_state_orig);
40826 if (par->vbe_state_saved)
40827 kfree(par->vbe_state_saved);
40828 +
40829 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40830 + if (par->pmi_code)
40831 + module_free_exec(NULL, par->pmi_code);
40832 +#endif
40833 +
40834 }
40835
40836 framebuffer_release(info);
40837 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
40838 index 501b340..86bd4cf 100644
40839 --- a/drivers/video/vesafb.c
40840 +++ b/drivers/video/vesafb.c
40841 @@ -9,6 +9,7 @@
40842 */
40843
40844 #include <linux/module.h>
40845 +#include <linux/moduleloader.h>
40846 #include <linux/kernel.h>
40847 #include <linux/errno.h>
40848 #include <linux/string.h>
40849 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
40850 static int vram_total __initdata; /* Set total amount of memory */
40851 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
40852 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
40853 -static void (*pmi_start)(void) __read_mostly;
40854 -static void (*pmi_pal) (void) __read_mostly;
40855 +static void (*pmi_start)(void) __read_only;
40856 +static void (*pmi_pal) (void) __read_only;
40857 static int depth __read_mostly;
40858 static int vga_compat __read_mostly;
40859 /* --------------------------------------------------------------------- */
40860 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
40861 unsigned int size_vmode;
40862 unsigned int size_remap;
40863 unsigned int size_total;
40864 + void *pmi_code = NULL;
40865
40866 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
40867 return -ENODEV;
40868 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
40869 size_remap = size_total;
40870 vesafb_fix.smem_len = size_remap;
40871
40872 -#ifndef __i386__
40873 - screen_info.vesapm_seg = 0;
40874 -#endif
40875 -
40876 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
40877 printk(KERN_WARNING
40878 "vesafb: cannot reserve video memory at 0x%lx\n",
40879 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
40880 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
40881 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
40882
40883 +#ifdef __i386__
40884 +
40885 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40886 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
40887 + if (!pmi_code)
40888 +#elif !defined(CONFIG_PAX_KERNEXEC)
40889 + if (0)
40890 +#endif
40891 +
40892 +#endif
40893 + screen_info.vesapm_seg = 0;
40894 +
40895 if (screen_info.vesapm_seg) {
40896 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
40897 - screen_info.vesapm_seg,screen_info.vesapm_off);
40898 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
40899 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
40900 }
40901
40902 if (screen_info.vesapm_seg < 0xc000)
40903 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
40904
40905 if (ypan || pmi_setpal) {
40906 unsigned short *pmi_base;
40907 +
40908 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
40909 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
40910 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
40911 +
40912 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40913 + pax_open_kernel();
40914 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
40915 +#else
40916 + pmi_code = pmi_base;
40917 +#endif
40918 +
40919 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
40920 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
40921 +
40922 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40923 + pmi_start = ktva_ktla(pmi_start);
40924 + pmi_pal = ktva_ktla(pmi_pal);
40925 + pax_close_kernel();
40926 +#endif
40927 +
40928 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
40929 if (pmi_base[3]) {
40930 printk(KERN_INFO "vesafb: pmi: ports = ");
40931 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
40932 info->node, info->fix.id);
40933 return 0;
40934 err:
40935 +
40936 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40937 + module_free_exec(NULL, pmi_code);
40938 +#endif
40939 +
40940 if (info->screen_base)
40941 iounmap(info->screen_base);
40942 framebuffer_release(info);
40943 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
40944 index 88714ae..16c2e11 100644
40945 --- a/drivers/video/via/via_clock.h
40946 +++ b/drivers/video/via/via_clock.h
40947 @@ -56,7 +56,7 @@ struct via_clock {
40948
40949 void (*set_engine_pll_state)(u8 state);
40950 void (*set_engine_pll)(struct via_pll_config config);
40951 -};
40952 +} __no_const;
40953
40954
40955 static inline u32 get_pll_internal_frequency(u32 ref_freq,
40956 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
40957 index e56c934..fc22f4b 100644
40958 --- a/drivers/xen/xen-pciback/conf_space.h
40959 +++ b/drivers/xen/xen-pciback/conf_space.h
40960 @@ -44,15 +44,15 @@ struct config_field {
40961 struct {
40962 conf_dword_write write;
40963 conf_dword_read read;
40964 - } dw;
40965 + } __no_const dw;
40966 struct {
40967 conf_word_write write;
40968 conf_word_read read;
40969 - } w;
40970 + } __no_const w;
40971 struct {
40972 conf_byte_write write;
40973 conf_byte_read read;
40974 - } b;
40975 + } __no_const b;
40976 } u;
40977 struct list_head list;
40978 };
40979 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
40980 index 879ed88..bc03a01 100644
40981 --- a/fs/9p/vfs_inode.c
40982 +++ b/fs/9p/vfs_inode.c
40983 @@ -1286,7 +1286,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
40984 void
40985 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
40986 {
40987 - char *s = nd_get_link(nd);
40988 + const char *s = nd_get_link(nd);
40989
40990 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
40991 IS_ERR(s) ? "<error>" : s);
40992 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
40993 index 79e2ca7..5828ad1 100644
40994 --- a/fs/Kconfig.binfmt
40995 +++ b/fs/Kconfig.binfmt
40996 @@ -86,7 +86,7 @@ config HAVE_AOUT
40997
40998 config BINFMT_AOUT
40999 tristate "Kernel support for a.out and ECOFF binaries"
41000 - depends on HAVE_AOUT
41001 + depends on HAVE_AOUT && BROKEN
41002 ---help---
41003 A.out (Assembler.OUTput) is a set of formats for libraries and
41004 executables used in the earliest versions of UNIX. Linux used
41005 diff --git a/fs/aio.c b/fs/aio.c
41006 index 67e4b90..fbb09dc 100644
41007 --- a/fs/aio.c
41008 +++ b/fs/aio.c
41009 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
41010 size += sizeof(struct io_event) * nr_events;
41011 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
41012
41013 - if (nr_pages < 0)
41014 + if (nr_pages <= 0)
41015 return -EINVAL;
41016
41017 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
41018 @@ -1463,22 +1463,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
41019 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41020 {
41021 ssize_t ret;
41022 + struct iovec iovstack;
41023
41024 #ifdef CONFIG_COMPAT
41025 if (compat)
41026 ret = compat_rw_copy_check_uvector(type,
41027 (struct compat_iovec __user *)kiocb->ki_buf,
41028 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41029 + kiocb->ki_nbytes, 1, &iovstack,
41030 &kiocb->ki_iovec, 1);
41031 else
41032 #endif
41033 ret = rw_copy_check_uvector(type,
41034 (struct iovec __user *)kiocb->ki_buf,
41035 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41036 + kiocb->ki_nbytes, 1, &iovstack,
41037 &kiocb->ki_iovec, 1);
41038 if (ret < 0)
41039 goto out;
41040
41041 + if (kiocb->ki_iovec == &iovstack) {
41042 + kiocb->ki_inline_vec = iovstack;
41043 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
41044 + }
41045 kiocb->ki_nr_segs = kiocb->ki_nbytes;
41046 kiocb->ki_cur_seg = 0;
41047 /* ki_nbytes/left now reflect bytes instead of segs */
41048 diff --git a/fs/attr.c b/fs/attr.c
41049 index 7ee7ba4..0c61a60 100644
41050 --- a/fs/attr.c
41051 +++ b/fs/attr.c
41052 @@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
41053 unsigned long limit;
41054
41055 limit = rlimit(RLIMIT_FSIZE);
41056 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
41057 if (limit != RLIM_INFINITY && offset > limit)
41058 goto out_sig;
41059 if (offset > inode->i_sb->s_maxbytes)
41060 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
41061 index 6861f61..a25f010 100644
41062 --- a/fs/autofs4/waitq.c
41063 +++ b/fs/autofs4/waitq.c
41064 @@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
41065 {
41066 unsigned long sigpipe, flags;
41067 mm_segment_t fs;
41068 - const char *data = (const char *)addr;
41069 + const char __user *data = (const char __force_user *)addr;
41070 ssize_t wr = 0;
41071
41072 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
41073 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
41074 index 8342ca6..82fd192 100644
41075 --- a/fs/befs/linuxvfs.c
41076 +++ b/fs/befs/linuxvfs.c
41077 @@ -503,7 +503,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41078 {
41079 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
41080 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
41081 - char *link = nd_get_link(nd);
41082 + const char *link = nd_get_link(nd);
41083 if (!IS_ERR(link))
41084 kfree(link);
41085 }
41086 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
41087 index a6395bd..f1e376a 100644
41088 --- a/fs/binfmt_aout.c
41089 +++ b/fs/binfmt_aout.c
41090 @@ -16,6 +16,7 @@
41091 #include <linux/string.h>
41092 #include <linux/fs.h>
41093 #include <linux/file.h>
41094 +#include <linux/security.h>
41095 #include <linux/stat.h>
41096 #include <linux/fcntl.h>
41097 #include <linux/ptrace.h>
41098 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
41099 #endif
41100 # define START_STACK(u) ((void __user *)u.start_stack)
41101
41102 + memset(&dump, 0, sizeof(dump));
41103 +
41104 fs = get_fs();
41105 set_fs(KERNEL_DS);
41106 has_dumped = 1;
41107 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
41108
41109 /* If the size of the dump file exceeds the rlimit, then see what would happen
41110 if we wrote the stack, but not the data area. */
41111 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
41112 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
41113 dump.u_dsize = 0;
41114
41115 /* Make sure we have enough room to write the stack and data areas. */
41116 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
41117 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
41118 dump.u_ssize = 0;
41119
41120 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41121 rlim = rlimit(RLIMIT_DATA);
41122 if (rlim >= RLIM_INFINITY)
41123 rlim = ~0;
41124 +
41125 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
41126 if (ex.a_data + ex.a_bss > rlim)
41127 return -ENOMEM;
41128
41129 @@ -259,9 +266,37 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41130 current->mm->free_area_cache = current->mm->mmap_base;
41131 current->mm->cached_hole_size = 0;
41132
41133 + retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
41134 + if (retval < 0) {
41135 + /* Someone check-me: is this error path enough? */
41136 + send_sig(SIGKILL, current, 0);
41137 + return retval;
41138 + }
41139 +
41140 install_exec_creds(bprm);
41141 current->flags &= ~PF_FORKNOEXEC;
41142
41143 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41144 + current->mm->pax_flags = 0UL;
41145 +#endif
41146 +
41147 +#ifdef CONFIG_PAX_PAGEEXEC
41148 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
41149 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
41150 +
41151 +#ifdef CONFIG_PAX_EMUTRAMP
41152 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
41153 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
41154 +#endif
41155 +
41156 +#ifdef CONFIG_PAX_MPROTECT
41157 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
41158 + current->mm->pax_flags |= MF_PAX_MPROTECT;
41159 +#endif
41160 +
41161 + }
41162 +#endif
41163 +
41164 if (N_MAGIC(ex) == OMAGIC) {
41165 unsigned long text_addr, map_size;
41166 loff_t pos;
41167 @@ -334,7 +369,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41168
41169 down_write(&current->mm->mmap_sem);
41170 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
41171 - PROT_READ | PROT_WRITE | PROT_EXEC,
41172 + PROT_READ | PROT_WRITE,
41173 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
41174 fd_offset + ex.a_text);
41175 up_write(&current->mm->mmap_sem);
41176 @@ -352,13 +387,6 @@ beyond_if:
41177 return retval;
41178 }
41179
41180 - retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
41181 - if (retval < 0) {
41182 - /* Someone check-me: is this error path enough? */
41183 - send_sig(SIGKILL, current, 0);
41184 - return retval;
41185 - }
41186 -
41187 current->mm->start_stack =
41188 (unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
41189 #ifdef __alpha__
41190 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
41191 index 6ff96c6..dbf63ee 100644
41192 --- a/fs/binfmt_elf.c
41193 +++ b/fs/binfmt_elf.c
41194 @@ -32,6 +32,7 @@
41195 #include <linux/elf.h>
41196 #include <linux/utsname.h>
41197 #include <linux/coredump.h>
41198 +#include <linux/xattr.h>
41199 #include <asm/uaccess.h>
41200 #include <asm/param.h>
41201 #include <asm/page.h>
41202 @@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
41203 #define elf_core_dump NULL
41204 #endif
41205
41206 +#ifdef CONFIG_PAX_MPROTECT
41207 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
41208 +#endif
41209 +
41210 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
41211 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
41212 #else
41213 @@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
41214 .load_binary = load_elf_binary,
41215 .load_shlib = load_elf_library,
41216 .core_dump = elf_core_dump,
41217 +
41218 +#ifdef CONFIG_PAX_MPROTECT
41219 + .handle_mprotect= elf_handle_mprotect,
41220 +#endif
41221 +
41222 .min_coredump = ELF_EXEC_PAGESIZE,
41223 };
41224
41225 @@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
41226
41227 static int set_brk(unsigned long start, unsigned long end)
41228 {
41229 + unsigned long e = end;
41230 +
41231 start = ELF_PAGEALIGN(start);
41232 end = ELF_PAGEALIGN(end);
41233 if (end > start) {
41234 @@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
41235 if (BAD_ADDR(addr))
41236 return addr;
41237 }
41238 - current->mm->start_brk = current->mm->brk = end;
41239 + current->mm->start_brk = current->mm->brk = e;
41240 return 0;
41241 }
41242
41243 @@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41244 elf_addr_t __user *u_rand_bytes;
41245 const char *k_platform = ELF_PLATFORM;
41246 const char *k_base_platform = ELF_BASE_PLATFORM;
41247 - unsigned char k_rand_bytes[16];
41248 + u32 k_rand_bytes[4];
41249 int items;
41250 elf_addr_t *elf_info;
41251 int ei_index = 0;
41252 const struct cred *cred = current_cred();
41253 struct vm_area_struct *vma;
41254 + unsigned long saved_auxv[AT_VECTOR_SIZE];
41255
41256 /*
41257 * In some cases (e.g. Hyper-Threading), we want to avoid L1
41258 @@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41259 * Generate 16 random bytes for userspace PRNG seeding.
41260 */
41261 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
41262 - u_rand_bytes = (elf_addr_t __user *)
41263 - STACK_ALLOC(p, sizeof(k_rand_bytes));
41264 + srandom32(k_rand_bytes[0] ^ random32());
41265 + srandom32(k_rand_bytes[1] ^ random32());
41266 + srandom32(k_rand_bytes[2] ^ random32());
41267 + srandom32(k_rand_bytes[3] ^ random32());
41268 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
41269 + u_rand_bytes = (elf_addr_t __user *) p;
41270 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
41271 return -EFAULT;
41272
41273 @@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41274 return -EFAULT;
41275 current->mm->env_end = p;
41276
41277 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
41278 +
41279 /* Put the elf_info on the stack in the right place. */
41280 sp = (elf_addr_t __user *)envp + 1;
41281 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
41282 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
41283 return -EFAULT;
41284 return 0;
41285 }
41286 @@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41287 {
41288 struct elf_phdr *elf_phdata;
41289 struct elf_phdr *eppnt;
41290 - unsigned long load_addr = 0;
41291 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
41292 int load_addr_set = 0;
41293 unsigned long last_bss = 0, elf_bss = 0;
41294 - unsigned long error = ~0UL;
41295 + unsigned long error = -EINVAL;
41296 unsigned long total_size;
41297 int retval, i, size;
41298
41299 @@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41300 goto out_close;
41301 }
41302
41303 +#ifdef CONFIG_PAX_SEGMEXEC
41304 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
41305 + pax_task_size = SEGMEXEC_TASK_SIZE;
41306 +#endif
41307 +
41308 eppnt = elf_phdata;
41309 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
41310 if (eppnt->p_type == PT_LOAD) {
41311 @@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41312 k = load_addr + eppnt->p_vaddr;
41313 if (BAD_ADDR(k) ||
41314 eppnt->p_filesz > eppnt->p_memsz ||
41315 - eppnt->p_memsz > TASK_SIZE ||
41316 - TASK_SIZE - eppnt->p_memsz < k) {
41317 + eppnt->p_memsz > pax_task_size ||
41318 + pax_task_size - eppnt->p_memsz < k) {
41319 error = -ENOMEM;
41320 goto out_close;
41321 }
41322 @@ -528,6 +552,351 @@ out:
41323 return error;
41324 }
41325
41326 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
41327 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
41328 +{
41329 + unsigned long pax_flags = 0UL;
41330 +
41331 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
41332 +
41333 +#ifdef CONFIG_PAX_PAGEEXEC
41334 + if (elf_phdata->p_flags & PF_PAGEEXEC)
41335 + pax_flags |= MF_PAX_PAGEEXEC;
41336 +#endif
41337 +
41338 +#ifdef CONFIG_PAX_SEGMEXEC
41339 + if (elf_phdata->p_flags & PF_SEGMEXEC)
41340 + pax_flags |= MF_PAX_SEGMEXEC;
41341 +#endif
41342 +
41343 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41344 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41345 + if ((__supported_pte_mask & _PAGE_NX))
41346 + pax_flags &= ~MF_PAX_SEGMEXEC;
41347 + else
41348 + pax_flags &= ~MF_PAX_PAGEEXEC;
41349 + }
41350 +#endif
41351 +
41352 +#ifdef CONFIG_PAX_EMUTRAMP
41353 + if (elf_phdata->p_flags & PF_EMUTRAMP)
41354 + pax_flags |= MF_PAX_EMUTRAMP;
41355 +#endif
41356 +
41357 +#ifdef CONFIG_PAX_MPROTECT
41358 + if (elf_phdata->p_flags & PF_MPROTECT)
41359 + pax_flags |= MF_PAX_MPROTECT;
41360 +#endif
41361 +
41362 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41363 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
41364 + pax_flags |= MF_PAX_RANDMMAP;
41365 +#endif
41366 +
41367 +#endif
41368 +
41369 + return pax_flags;
41370 +}
41371 +
41372 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
41373 +{
41374 + unsigned long pax_flags = 0UL;
41375 +
41376 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
41377 +
41378 +#ifdef CONFIG_PAX_PAGEEXEC
41379 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
41380 + pax_flags |= MF_PAX_PAGEEXEC;
41381 +#endif
41382 +
41383 +#ifdef CONFIG_PAX_SEGMEXEC
41384 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
41385 + pax_flags |= MF_PAX_SEGMEXEC;
41386 +#endif
41387 +
41388 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41389 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41390 + if ((__supported_pte_mask & _PAGE_NX))
41391 + pax_flags &= ~MF_PAX_SEGMEXEC;
41392 + else
41393 + pax_flags &= ~MF_PAX_PAGEEXEC;
41394 + }
41395 +#endif
41396 +
41397 +#ifdef CONFIG_PAX_EMUTRAMP
41398 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
41399 + pax_flags |= MF_PAX_EMUTRAMP;
41400 +#endif
41401 +
41402 +#ifdef CONFIG_PAX_MPROTECT
41403 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
41404 + pax_flags |= MF_PAX_MPROTECT;
41405 +#endif
41406 +
41407 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41408 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
41409 + pax_flags |= MF_PAX_RANDMMAP;
41410 +#endif
41411 +
41412 +#endif
41413 +
41414 + return pax_flags;
41415 +}
41416 +
41417 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
41418 +{
41419 + unsigned long pax_flags = 0UL;
41420 +
41421 +#ifdef CONFIG_PAX_EI_PAX
41422 +
41423 +#ifdef CONFIG_PAX_PAGEEXEC
41424 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
41425 + pax_flags |= MF_PAX_PAGEEXEC;
41426 +#endif
41427 +
41428 +#ifdef CONFIG_PAX_SEGMEXEC
41429 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
41430 + pax_flags |= MF_PAX_SEGMEXEC;
41431 +#endif
41432 +
41433 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41434 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41435 + if ((__supported_pte_mask & _PAGE_NX))
41436 + pax_flags &= ~MF_PAX_SEGMEXEC;
41437 + else
41438 + pax_flags &= ~MF_PAX_PAGEEXEC;
41439 + }
41440 +#endif
41441 +
41442 +#ifdef CONFIG_PAX_EMUTRAMP
41443 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
41444 + pax_flags |= MF_PAX_EMUTRAMP;
41445 +#endif
41446 +
41447 +#ifdef CONFIG_PAX_MPROTECT
41448 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
41449 + pax_flags |= MF_PAX_MPROTECT;
41450 +#endif
41451 +
41452 +#ifdef CONFIG_PAX_ASLR
41453 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
41454 + pax_flags |= MF_PAX_RANDMMAP;
41455 +#endif
41456 +
41457 +#else
41458 +
41459 +#ifdef CONFIG_PAX_PAGEEXEC
41460 + pax_flags |= MF_PAX_PAGEEXEC;
41461 +#endif
41462 +
41463 +#ifdef CONFIG_PAX_MPROTECT
41464 + pax_flags |= MF_PAX_MPROTECT;
41465 +#endif
41466 +
41467 +#ifdef CONFIG_PAX_RANDMMAP
41468 + pax_flags |= MF_PAX_RANDMMAP;
41469 +#endif
41470 +
41471 +#ifdef CONFIG_PAX_SEGMEXEC
41472 + if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
41473 + pax_flags &= ~MF_PAX_PAGEEXEC;
41474 + pax_flags |= MF_PAX_SEGMEXEC;
41475 + }
41476 +#endif
41477 +
41478 +#endif
41479 +
41480 + return pax_flags;
41481 +}
41482 +
41483 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
41484 +{
41485 +
41486 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
41487 + unsigned long i;
41488 +
41489 + for (i = 0UL; i < elf_ex->e_phnum; i++)
41490 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
41491 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
41492 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
41493 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
41494 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
41495 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
41496 + return ~0UL;
41497 +
41498 +#ifdef CONFIG_PAX_SOFTMODE
41499 + if (pax_softmode)
41500 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
41501 + else
41502 +#endif
41503 +
41504 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
41505 + break;
41506 + }
41507 +#endif
41508 +
41509 + return ~0UL;
41510 +}
41511 +
41512 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
41513 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
41514 +{
41515 + unsigned long pax_flags = 0UL;
41516 +
41517 +#ifdef CONFIG_PAX_PAGEEXEC
41518 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
41519 + pax_flags |= MF_PAX_PAGEEXEC;
41520 +#endif
41521 +
41522 +#ifdef CONFIG_PAX_SEGMEXEC
41523 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
41524 + pax_flags |= MF_PAX_SEGMEXEC;
41525 +#endif
41526 +
41527 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41528 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41529 + if ((__supported_pte_mask & _PAGE_NX))
41530 + pax_flags &= ~MF_PAX_SEGMEXEC;
41531 + else
41532 + pax_flags &= ~MF_PAX_PAGEEXEC;
41533 + }
41534 +#endif
41535 +
41536 +#ifdef CONFIG_PAX_EMUTRAMP
41537 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
41538 + pax_flags |= MF_PAX_EMUTRAMP;
41539 +#endif
41540 +
41541 +#ifdef CONFIG_PAX_MPROTECT
41542 + if (pax_flags_softmode & MF_PAX_MPROTECT)
41543 + pax_flags |= MF_PAX_MPROTECT;
41544 +#endif
41545 +
41546 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41547 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
41548 + pax_flags |= MF_PAX_RANDMMAP;
41549 +#endif
41550 +
41551 + return pax_flags;
41552 +}
41553 +
41554 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
41555 +{
41556 + unsigned long pax_flags = 0UL;
41557 +
41558 +#ifdef CONFIG_PAX_PAGEEXEC
41559 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
41560 + pax_flags |= MF_PAX_PAGEEXEC;
41561 +#endif
41562 +
41563 +#ifdef CONFIG_PAX_SEGMEXEC
41564 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
41565 + pax_flags |= MF_PAX_SEGMEXEC;
41566 +#endif
41567 +
41568 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41569 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41570 + if ((__supported_pte_mask & _PAGE_NX))
41571 + pax_flags &= ~MF_PAX_SEGMEXEC;
41572 + else
41573 + pax_flags &= ~MF_PAX_PAGEEXEC;
41574 + }
41575 +#endif
41576 +
41577 +#ifdef CONFIG_PAX_EMUTRAMP
41578 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
41579 + pax_flags |= MF_PAX_EMUTRAMP;
41580 +#endif
41581 +
41582 +#ifdef CONFIG_PAX_MPROTECT
41583 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
41584 + pax_flags |= MF_PAX_MPROTECT;
41585 +#endif
41586 +
41587 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41588 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
41589 + pax_flags |= MF_PAX_RANDMMAP;
41590 +#endif
41591 +
41592 + return pax_flags;
41593 +}
41594 +#endif
41595 +
41596 +static unsigned long pax_parse_xattr_pax(struct file * const file)
41597 +{
41598 +
41599 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
41600 + ssize_t xattr_size, i;
41601 + unsigned char xattr_value[5];
41602 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
41603 +
41604 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
41605 + if (xattr_size <= 0)
41606 + return ~0UL;
41607 +
41608 + for (i = 0; i < xattr_size; i++)
41609 + switch (xattr_value[i]) {
41610 + default:
41611 + return ~0UL;
41612 +
41613 +#define parse_flag(option1, option2, flag) \
41614 + case option1: \
41615 + pax_flags_hardmode |= MF_PAX_##flag; \
41616 + break; \
41617 + case option2: \
41618 + pax_flags_softmode |= MF_PAX_##flag; \
41619 + break;
41620 +
41621 + parse_flag('p', 'P', PAGEEXEC);
41622 + parse_flag('e', 'E', EMUTRAMP);
41623 + parse_flag('m', 'M', MPROTECT);
41624 + parse_flag('r', 'R', RANDMMAP);
41625 + parse_flag('s', 'S', SEGMEXEC);
41626 +
41627 +#undef parse_flag
41628 + }
41629 +
41630 + if (pax_flags_hardmode & pax_flags_softmode)
41631 + return ~0UL;
41632 +
41633 +#ifdef CONFIG_PAX_SOFTMODE
41634 + if (pax_softmode)
41635 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
41636 + else
41637 +#endif
41638 +
41639 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
41640 +#else
41641 + return ~0UL;
41642 +#endif
41643 +
41644 +}
41645 +
41646 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
41647 +{
41648 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
41649 +
41650 + pax_flags = pax_parse_ei_pax(elf_ex);
41651 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
41652 + xattr_pax_flags = pax_parse_xattr_pax(file);
41653 +
41654 + if (pt_pax_flags == ~0UL)
41655 + pt_pax_flags = xattr_pax_flags;
41656 + else if (xattr_pax_flags == ~0UL)
41657 + xattr_pax_flags = pt_pax_flags;
41658 + if (pt_pax_flags != xattr_pax_flags)
41659 + return -EINVAL;
41660 + if (pt_pax_flags != ~0UL)
41661 + pax_flags = pt_pax_flags;
41662 +
41663 + if (0 > pax_check_flags(&pax_flags))
41664 + return -EINVAL;
41665 +
41666 + current->mm->pax_flags = pax_flags;
41667 + return 0;
41668 +}
41669 +#endif
41670 +
41671 /*
41672 * These are the functions used to load ELF style executables and shared
41673 * libraries. There is no binary dependent code anywhere else.
41674 @@ -544,6 +913,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
41675 {
41676 unsigned int random_variable = 0;
41677
41678 +#ifdef CONFIG_PAX_RANDUSTACK
41679 + if (randomize_va_space)
41680 + return stack_top - current->mm->delta_stack;
41681 +#endif
41682 +
41683 if ((current->flags & PF_RANDOMIZE) &&
41684 !(current->personality & ADDR_NO_RANDOMIZE)) {
41685 random_variable = get_random_int() & STACK_RND_MASK;
41686 @@ -562,7 +936,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41687 unsigned long load_addr = 0, load_bias = 0;
41688 int load_addr_set = 0;
41689 char * elf_interpreter = NULL;
41690 - unsigned long error;
41691 + unsigned long error = 0;
41692 struct elf_phdr *elf_ppnt, *elf_phdata;
41693 unsigned long elf_bss, elf_brk;
41694 int retval, i;
41695 @@ -572,11 +946,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41696 unsigned long start_code, end_code, start_data, end_data;
41697 unsigned long reloc_func_desc __maybe_unused = 0;
41698 int executable_stack = EXSTACK_DEFAULT;
41699 - unsigned long def_flags = 0;
41700 struct {
41701 struct elfhdr elf_ex;
41702 struct elfhdr interp_elf_ex;
41703 } *loc;
41704 + unsigned long pax_task_size = TASK_SIZE;
41705
41706 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
41707 if (!loc) {
41708 @@ -713,11 +1087,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41709
41710 /* OK, This is the point of no return */
41711 current->flags &= ~PF_FORKNOEXEC;
41712 - current->mm->def_flags = def_flags;
41713 +
41714 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41715 + current->mm->pax_flags = 0UL;
41716 +#endif
41717 +
41718 +#ifdef CONFIG_PAX_DLRESOLVE
41719 + current->mm->call_dl_resolve = 0UL;
41720 +#endif
41721 +
41722 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
41723 + current->mm->call_syscall = 0UL;
41724 +#endif
41725 +
41726 +#ifdef CONFIG_PAX_ASLR
41727 + current->mm->delta_mmap = 0UL;
41728 + current->mm->delta_stack = 0UL;
41729 +#endif
41730 +
41731 + current->mm->def_flags = 0;
41732 +
41733 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
41734 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
41735 + send_sig(SIGKILL, current, 0);
41736 + goto out_free_dentry;
41737 + }
41738 +#endif
41739 +
41740 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
41741 + pax_set_initial_flags(bprm);
41742 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
41743 + if (pax_set_initial_flags_func)
41744 + (pax_set_initial_flags_func)(bprm);
41745 +#endif
41746 +
41747 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
41748 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
41749 + current->mm->context.user_cs_limit = PAGE_SIZE;
41750 + current->mm->def_flags |= VM_PAGEEXEC;
41751 + }
41752 +#endif
41753 +
41754 +#ifdef CONFIG_PAX_SEGMEXEC
41755 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
41756 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
41757 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
41758 + pax_task_size = SEGMEXEC_TASK_SIZE;
41759 + current->mm->def_flags |= VM_NOHUGEPAGE;
41760 + }
41761 +#endif
41762 +
41763 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
41764 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41765 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
41766 + put_cpu();
41767 + }
41768 +#endif
41769
41770 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
41771 may depend on the personality. */
41772 SET_PERSONALITY(loc->elf_ex);
41773 +
41774 +#ifdef CONFIG_PAX_ASLR
41775 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
41776 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
41777 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
41778 + }
41779 +#endif
41780 +
41781 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41782 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41783 + executable_stack = EXSTACK_DISABLE_X;
41784 + current->personality &= ~READ_IMPLIES_EXEC;
41785 + } else
41786 +#endif
41787 +
41788 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
41789 current->personality |= READ_IMPLIES_EXEC;
41790
41791 @@ -808,6 +1252,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41792 #else
41793 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
41794 #endif
41795 +
41796 +#ifdef CONFIG_PAX_RANDMMAP
41797 + /* PaX: randomize base address at the default exe base if requested */
41798 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
41799 +#ifdef CONFIG_SPARC64
41800 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
41801 +#else
41802 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
41803 +#endif
41804 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
41805 + elf_flags |= MAP_FIXED;
41806 + }
41807 +#endif
41808 +
41809 }
41810
41811 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
41812 @@ -840,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41813 * allowed task size. Note that p_filesz must always be
41814 * <= p_memsz so it is only necessary to check p_memsz.
41815 */
41816 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
41817 - elf_ppnt->p_memsz > TASK_SIZE ||
41818 - TASK_SIZE - elf_ppnt->p_memsz < k) {
41819 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
41820 + elf_ppnt->p_memsz > pax_task_size ||
41821 + pax_task_size - elf_ppnt->p_memsz < k) {
41822 /* set_brk can never work. Avoid overflows. */
41823 send_sig(SIGKILL, current, 0);
41824 retval = -EINVAL;
41825 @@ -870,6 +1328,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41826 start_data += load_bias;
41827 end_data += load_bias;
41828
41829 +#ifdef CONFIG_PAX_RANDMMAP
41830 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
41831 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
41832 +#endif
41833 +
41834 /* Calling set_brk effectively mmaps the pages that we need
41835 * for the bss and break sections. We must do this before
41836 * mapping in the interpreter, to make sure it doesn't wind
41837 @@ -881,9 +1344,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41838 goto out_free_dentry;
41839 }
41840 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
41841 - send_sig(SIGSEGV, current, 0);
41842 - retval = -EFAULT; /* Nobody gets to see this, but.. */
41843 - goto out_free_dentry;
41844 + /*
41845 + * This bss-zeroing can fail if the ELF
41846 + * file specifies odd protections. So
41847 + * we don't check the return value
41848 + */
41849 }
41850
41851 if (elf_interpreter) {
41852 @@ -1098,7 +1563,7 @@ out:
41853 * Decide what to dump of a segment, part, all or none.
41854 */
41855 static unsigned long vma_dump_size(struct vm_area_struct *vma,
41856 - unsigned long mm_flags)
41857 + unsigned long mm_flags, long signr)
41858 {
41859 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
41860
41861 @@ -1132,7 +1597,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
41862 if (vma->vm_file == NULL)
41863 return 0;
41864
41865 - if (FILTER(MAPPED_PRIVATE))
41866 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
41867 goto whole;
41868
41869 /*
41870 @@ -1354,9 +1819,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
41871 {
41872 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
41873 int i = 0;
41874 - do
41875 + do {
41876 i += 2;
41877 - while (auxv[i - 2] != AT_NULL);
41878 + } while (auxv[i - 2] != AT_NULL);
41879 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
41880 }
41881
41882 @@ -1862,14 +2327,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
41883 }
41884
41885 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
41886 - unsigned long mm_flags)
41887 + struct coredump_params *cprm)
41888 {
41889 struct vm_area_struct *vma;
41890 size_t size = 0;
41891
41892 for (vma = first_vma(current, gate_vma); vma != NULL;
41893 vma = next_vma(vma, gate_vma))
41894 - size += vma_dump_size(vma, mm_flags);
41895 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
41896 return size;
41897 }
41898
41899 @@ -1963,7 +2428,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41900
41901 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
41902
41903 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
41904 + offset += elf_core_vma_data_size(gate_vma, cprm);
41905 offset += elf_core_extra_data_size();
41906 e_shoff = offset;
41907
41908 @@ -1977,10 +2442,12 @@ static int elf_core_dump(struct coredump_params *cprm)
41909 offset = dataoff;
41910
41911 size += sizeof(*elf);
41912 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
41913 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
41914 goto end_coredump;
41915
41916 size += sizeof(*phdr4note);
41917 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
41918 if (size > cprm->limit
41919 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
41920 goto end_coredump;
41921 @@ -1994,7 +2461,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41922 phdr.p_offset = offset;
41923 phdr.p_vaddr = vma->vm_start;
41924 phdr.p_paddr = 0;
41925 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
41926 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
41927 phdr.p_memsz = vma->vm_end - vma->vm_start;
41928 offset += phdr.p_filesz;
41929 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
41930 @@ -2005,6 +2472,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41931 phdr.p_align = ELF_EXEC_PAGESIZE;
41932
41933 size += sizeof(phdr);
41934 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
41935 if (size > cprm->limit
41936 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
41937 goto end_coredump;
41938 @@ -2029,7 +2497,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41939 unsigned long addr;
41940 unsigned long end;
41941
41942 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
41943 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
41944
41945 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
41946 struct page *page;
41947 @@ -2038,6 +2506,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41948 page = get_dump_page(addr);
41949 if (page) {
41950 void *kaddr = kmap(page);
41951 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
41952 stop = ((size += PAGE_SIZE) > cprm->limit) ||
41953 !dump_write(cprm->file, kaddr,
41954 PAGE_SIZE);
41955 @@ -2055,6 +2524,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41956
41957 if (e_phnum == PN_XNUM) {
41958 size += sizeof(*shdr4extnum);
41959 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
41960 if (size > cprm->limit
41961 || !dump_write(cprm->file, shdr4extnum,
41962 sizeof(*shdr4extnum)))
41963 @@ -2075,6 +2545,97 @@ out:
41964
41965 #endif /* CONFIG_ELF_CORE */
41966
41967 +#ifdef CONFIG_PAX_MPROTECT
41968 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
41969 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
41970 + * we'll remove VM_MAYWRITE for good on RELRO segments.
41971 + *
41972 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
41973 + * basis because we want to allow the common case and not the special ones.
41974 + */
41975 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
41976 +{
41977 + struct elfhdr elf_h;
41978 + struct elf_phdr elf_p;
41979 + unsigned long i;
41980 + unsigned long oldflags;
41981 + bool is_textrel_rw, is_textrel_rx, is_relro;
41982 +
41983 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
41984 + return;
41985 +
41986 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
41987 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
41988 +
41989 +#ifdef CONFIG_PAX_ELFRELOCS
41990 + /* possible TEXTREL */
41991 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
41992 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
41993 +#else
41994 + is_textrel_rw = false;
41995 + is_textrel_rx = false;
41996 +#endif
41997 +
41998 + /* possible RELRO */
41999 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
42000 +
42001 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
42002 + return;
42003 +
42004 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
42005 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
42006 +
42007 +#ifdef CONFIG_PAX_ETEXECRELOCS
42008 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42009 +#else
42010 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
42011 +#endif
42012 +
42013 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42014 + !elf_check_arch(&elf_h) ||
42015 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
42016 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
42017 + return;
42018 +
42019 + for (i = 0UL; i < elf_h.e_phnum; i++) {
42020 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
42021 + return;
42022 + switch (elf_p.p_type) {
42023 + case PT_DYNAMIC:
42024 + if (!is_textrel_rw && !is_textrel_rx)
42025 + continue;
42026 + i = 0UL;
42027 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
42028 + elf_dyn dyn;
42029 +
42030 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
42031 + return;
42032 + if (dyn.d_tag == DT_NULL)
42033 + return;
42034 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
42035 + gr_log_textrel(vma);
42036 + if (is_textrel_rw)
42037 + vma->vm_flags |= VM_MAYWRITE;
42038 + else
42039 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
42040 + vma->vm_flags &= ~VM_MAYWRITE;
42041 + return;
42042 + }
42043 + i++;
42044 + }
42045 + return;
42046 +
42047 + case PT_GNU_RELRO:
42048 + if (!is_relro)
42049 + continue;
42050 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
42051 + vma->vm_flags &= ~VM_MAYWRITE;
42052 + return;
42053 + }
42054 + }
42055 +}
42056 +#endif
42057 +
42058 static int __init init_elf_binfmt(void)
42059 {
42060 return register_binfmt(&elf_format);
42061 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
42062 index 1bffbe0..c8c283e 100644
42063 --- a/fs/binfmt_flat.c
42064 +++ b/fs/binfmt_flat.c
42065 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
42066 realdatastart = (unsigned long) -ENOMEM;
42067 printk("Unable to allocate RAM for process data, errno %d\n",
42068 (int)-realdatastart);
42069 + down_write(&current->mm->mmap_sem);
42070 do_munmap(current->mm, textpos, text_len);
42071 + up_write(&current->mm->mmap_sem);
42072 ret = realdatastart;
42073 goto err;
42074 }
42075 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42076 }
42077 if (IS_ERR_VALUE(result)) {
42078 printk("Unable to read data+bss, errno %d\n", (int)-result);
42079 + down_write(&current->mm->mmap_sem);
42080 do_munmap(current->mm, textpos, text_len);
42081 do_munmap(current->mm, realdatastart, len);
42082 + up_write(&current->mm->mmap_sem);
42083 ret = result;
42084 goto err;
42085 }
42086 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42087 }
42088 if (IS_ERR_VALUE(result)) {
42089 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
42090 + down_write(&current->mm->mmap_sem);
42091 do_munmap(current->mm, textpos, text_len + data_len + extra +
42092 MAX_SHARED_LIBS * sizeof(unsigned long));
42093 + up_write(&current->mm->mmap_sem);
42094 ret = result;
42095 goto err;
42096 }
42097 diff --git a/fs/bio.c b/fs/bio.c
42098 index b1fe82c..84da0a9 100644
42099 --- a/fs/bio.c
42100 +++ b/fs/bio.c
42101 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
42102 const int read = bio_data_dir(bio) == READ;
42103 struct bio_map_data *bmd = bio->bi_private;
42104 int i;
42105 - char *p = bmd->sgvecs[0].iov_base;
42106 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
42107
42108 __bio_for_each_segment(bvec, bio, i, 0) {
42109 char *addr = page_address(bvec->bv_page);
42110 diff --git a/fs/block_dev.c b/fs/block_dev.c
42111 index b07f1da..9efcb92 100644
42112 --- a/fs/block_dev.c
42113 +++ b/fs/block_dev.c
42114 @@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
42115 else if (bdev->bd_contains == bdev)
42116 return true; /* is a whole device which isn't held */
42117
42118 - else if (whole->bd_holder == bd_may_claim)
42119 + else if (whole->bd_holder == (void *)bd_may_claim)
42120 return true; /* is a partition of a device that is being partitioned */
42121 else if (whole->bd_holder != NULL)
42122 return false; /* is a partition of a held device */
42123 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
42124 index dede441..f2a2507 100644
42125 --- a/fs/btrfs/ctree.c
42126 +++ b/fs/btrfs/ctree.c
42127 @@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
42128 free_extent_buffer(buf);
42129 add_root_to_dirty_list(root);
42130 } else {
42131 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
42132 - parent_start = parent->start;
42133 - else
42134 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
42135 + if (parent)
42136 + parent_start = parent->start;
42137 + else
42138 + parent_start = 0;
42139 + } else
42140 parent_start = 0;
42141
42142 WARN_ON(trans->transid != btrfs_header_generation(parent));
42143 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
42144 index fd1a06d..6e9033d 100644
42145 --- a/fs/btrfs/inode.c
42146 +++ b/fs/btrfs/inode.c
42147 @@ -6895,7 +6895,7 @@ fail:
42148 return -ENOMEM;
42149 }
42150
42151 -static int btrfs_getattr(struct vfsmount *mnt,
42152 +int btrfs_getattr(struct vfsmount *mnt,
42153 struct dentry *dentry, struct kstat *stat)
42154 {
42155 struct inode *inode = dentry->d_inode;
42156 @@ -6909,6 +6909,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
42157 return 0;
42158 }
42159
42160 +EXPORT_SYMBOL(btrfs_getattr);
42161 +
42162 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
42163 +{
42164 + return BTRFS_I(inode)->root->anon_dev;
42165 +}
42166 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
42167 +
42168 /*
42169 * If a file is moved, it will inherit the cow and compression flags of the new
42170 * directory.
42171 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
42172 index c04f02c..f5c9e2e 100644
42173 --- a/fs/btrfs/ioctl.c
42174 +++ b/fs/btrfs/ioctl.c
42175 @@ -2733,9 +2733,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42176 for (i = 0; i < num_types; i++) {
42177 struct btrfs_space_info *tmp;
42178
42179 + /* Don't copy in more than we allocated */
42180 if (!slot_count)
42181 break;
42182
42183 + slot_count--;
42184 +
42185 info = NULL;
42186 rcu_read_lock();
42187 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
42188 @@ -2757,15 +2760,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42189 memcpy(dest, &space, sizeof(space));
42190 dest++;
42191 space_args.total_spaces++;
42192 - slot_count--;
42193 }
42194 - if (!slot_count)
42195 - break;
42196 }
42197 up_read(&info->groups_sem);
42198 }
42199
42200 - user_dest = (struct btrfs_ioctl_space_info *)
42201 + user_dest = (struct btrfs_ioctl_space_info __user *)
42202 (arg + sizeof(struct btrfs_ioctl_space_args));
42203
42204 if (copy_to_user(user_dest, dest_orig, alloc_size))
42205 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
42206 index cfb5543..1ae7347 100644
42207 --- a/fs/btrfs/relocation.c
42208 +++ b/fs/btrfs/relocation.c
42209 @@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
42210 }
42211 spin_unlock(&rc->reloc_root_tree.lock);
42212
42213 - BUG_ON((struct btrfs_root *)node->data != root);
42214 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
42215
42216 if (!del) {
42217 spin_lock(&rc->reloc_root_tree.lock);
42218 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
42219 index 622f469..e8d2d55 100644
42220 --- a/fs/cachefiles/bind.c
42221 +++ b/fs/cachefiles/bind.c
42222 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
42223 args);
42224
42225 /* start by checking things over */
42226 - ASSERT(cache->fstop_percent >= 0 &&
42227 - cache->fstop_percent < cache->fcull_percent &&
42228 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
42229 cache->fcull_percent < cache->frun_percent &&
42230 cache->frun_percent < 100);
42231
42232 - ASSERT(cache->bstop_percent >= 0 &&
42233 - cache->bstop_percent < cache->bcull_percent &&
42234 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
42235 cache->bcull_percent < cache->brun_percent &&
42236 cache->brun_percent < 100);
42237
42238 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
42239 index 0a1467b..6a53245 100644
42240 --- a/fs/cachefiles/daemon.c
42241 +++ b/fs/cachefiles/daemon.c
42242 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
42243 if (n > buflen)
42244 return -EMSGSIZE;
42245
42246 - if (copy_to_user(_buffer, buffer, n) != 0)
42247 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
42248 return -EFAULT;
42249
42250 return n;
42251 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
42252 if (test_bit(CACHEFILES_DEAD, &cache->flags))
42253 return -EIO;
42254
42255 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
42256 + if (datalen > PAGE_SIZE - 1)
42257 return -EOPNOTSUPP;
42258
42259 /* drag the command string into the kernel so we can parse it */
42260 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
42261 if (args[0] != '%' || args[1] != '\0')
42262 return -EINVAL;
42263
42264 - if (fstop < 0 || fstop >= cache->fcull_percent)
42265 + if (fstop >= cache->fcull_percent)
42266 return cachefiles_daemon_range_error(cache, args);
42267
42268 cache->fstop_percent = fstop;
42269 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
42270 if (args[0] != '%' || args[1] != '\0')
42271 return -EINVAL;
42272
42273 - if (bstop < 0 || bstop >= cache->bcull_percent)
42274 + if (bstop >= cache->bcull_percent)
42275 return cachefiles_daemon_range_error(cache, args);
42276
42277 cache->bstop_percent = bstop;
42278 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
42279 index bd6bc1b..b627b53 100644
42280 --- a/fs/cachefiles/internal.h
42281 +++ b/fs/cachefiles/internal.h
42282 @@ -57,7 +57,7 @@ struct cachefiles_cache {
42283 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
42284 struct rb_root active_nodes; /* active nodes (can't be culled) */
42285 rwlock_t active_lock; /* lock for active_nodes */
42286 - atomic_t gravecounter; /* graveyard uniquifier */
42287 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
42288 unsigned frun_percent; /* when to stop culling (% files) */
42289 unsigned fcull_percent; /* when to start culling (% files) */
42290 unsigned fstop_percent; /* when to stop allocating (% files) */
42291 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
42292 * proc.c
42293 */
42294 #ifdef CONFIG_CACHEFILES_HISTOGRAM
42295 -extern atomic_t cachefiles_lookup_histogram[HZ];
42296 -extern atomic_t cachefiles_mkdir_histogram[HZ];
42297 -extern atomic_t cachefiles_create_histogram[HZ];
42298 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42299 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42300 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
42301
42302 extern int __init cachefiles_proc_init(void);
42303 extern void cachefiles_proc_cleanup(void);
42304 static inline
42305 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
42306 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
42307 {
42308 unsigned long jif = jiffies - start_jif;
42309 if (jif >= HZ)
42310 jif = HZ - 1;
42311 - atomic_inc(&histogram[jif]);
42312 + atomic_inc_unchecked(&histogram[jif]);
42313 }
42314
42315 #else
42316 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
42317 index a0358c2..d6137f2 100644
42318 --- a/fs/cachefiles/namei.c
42319 +++ b/fs/cachefiles/namei.c
42320 @@ -318,7 +318,7 @@ try_again:
42321 /* first step is to make up a grave dentry in the graveyard */
42322 sprintf(nbuffer, "%08x%08x",
42323 (uint32_t) get_seconds(),
42324 - (uint32_t) atomic_inc_return(&cache->gravecounter));
42325 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
42326
42327 /* do the multiway lock magic */
42328 trap = lock_rename(cache->graveyard, dir);
42329 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
42330 index eccd339..4c1d995 100644
42331 --- a/fs/cachefiles/proc.c
42332 +++ b/fs/cachefiles/proc.c
42333 @@ -14,9 +14,9 @@
42334 #include <linux/seq_file.h>
42335 #include "internal.h"
42336
42337 -atomic_t cachefiles_lookup_histogram[HZ];
42338 -atomic_t cachefiles_mkdir_histogram[HZ];
42339 -atomic_t cachefiles_create_histogram[HZ];
42340 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42341 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42342 +atomic_unchecked_t cachefiles_create_histogram[HZ];
42343
42344 /*
42345 * display the latency histogram
42346 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
42347 return 0;
42348 default:
42349 index = (unsigned long) v - 3;
42350 - x = atomic_read(&cachefiles_lookup_histogram[index]);
42351 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
42352 - z = atomic_read(&cachefiles_create_histogram[index]);
42353 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
42354 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
42355 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
42356 if (x == 0 && y == 0 && z == 0)
42357 return 0;
42358
42359 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
42360 index 0e3c092..818480e 100644
42361 --- a/fs/cachefiles/rdwr.c
42362 +++ b/fs/cachefiles/rdwr.c
42363 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
42364 old_fs = get_fs();
42365 set_fs(KERNEL_DS);
42366 ret = file->f_op->write(
42367 - file, (const void __user *) data, len, &pos);
42368 + file, (const void __force_user *) data, len, &pos);
42369 set_fs(old_fs);
42370 kunmap(page);
42371 if (ret != len)
42372 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
42373 index 9895400..fa40a7d 100644
42374 --- a/fs/ceph/dir.c
42375 +++ b/fs/ceph/dir.c
42376 @@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
42377 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
42378 struct ceph_mds_client *mdsc = fsc->mdsc;
42379 unsigned frag = fpos_frag(filp->f_pos);
42380 - int off = fpos_off(filp->f_pos);
42381 + unsigned int off = fpos_off(filp->f_pos);
42382 int err;
42383 u32 ftype;
42384 struct ceph_mds_reply_info_parsed *rinfo;
42385 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
42386 index 84e8c07..6170d31 100644
42387 --- a/fs/cifs/cifs_debug.c
42388 +++ b/fs/cifs/cifs_debug.c
42389 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
42390
42391 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
42392 #ifdef CONFIG_CIFS_STATS2
42393 - atomic_set(&totBufAllocCount, 0);
42394 - atomic_set(&totSmBufAllocCount, 0);
42395 + atomic_set_unchecked(&totBufAllocCount, 0);
42396 + atomic_set_unchecked(&totSmBufAllocCount, 0);
42397 #endif /* CONFIG_CIFS_STATS2 */
42398 spin_lock(&cifs_tcp_ses_lock);
42399 list_for_each(tmp1, &cifs_tcp_ses_list) {
42400 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
42401 tcon = list_entry(tmp3,
42402 struct cifs_tcon,
42403 tcon_list);
42404 - atomic_set(&tcon->num_smbs_sent, 0);
42405 - atomic_set(&tcon->num_writes, 0);
42406 - atomic_set(&tcon->num_reads, 0);
42407 - atomic_set(&tcon->num_oplock_brks, 0);
42408 - atomic_set(&tcon->num_opens, 0);
42409 - atomic_set(&tcon->num_posixopens, 0);
42410 - atomic_set(&tcon->num_posixmkdirs, 0);
42411 - atomic_set(&tcon->num_closes, 0);
42412 - atomic_set(&tcon->num_deletes, 0);
42413 - atomic_set(&tcon->num_mkdirs, 0);
42414 - atomic_set(&tcon->num_rmdirs, 0);
42415 - atomic_set(&tcon->num_renames, 0);
42416 - atomic_set(&tcon->num_t2renames, 0);
42417 - atomic_set(&tcon->num_ffirst, 0);
42418 - atomic_set(&tcon->num_fnext, 0);
42419 - atomic_set(&tcon->num_fclose, 0);
42420 - atomic_set(&tcon->num_hardlinks, 0);
42421 - atomic_set(&tcon->num_symlinks, 0);
42422 - atomic_set(&tcon->num_locks, 0);
42423 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
42424 + atomic_set_unchecked(&tcon->num_writes, 0);
42425 + atomic_set_unchecked(&tcon->num_reads, 0);
42426 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
42427 + atomic_set_unchecked(&tcon->num_opens, 0);
42428 + atomic_set_unchecked(&tcon->num_posixopens, 0);
42429 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
42430 + atomic_set_unchecked(&tcon->num_closes, 0);
42431 + atomic_set_unchecked(&tcon->num_deletes, 0);
42432 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
42433 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
42434 + atomic_set_unchecked(&tcon->num_renames, 0);
42435 + atomic_set_unchecked(&tcon->num_t2renames, 0);
42436 + atomic_set_unchecked(&tcon->num_ffirst, 0);
42437 + atomic_set_unchecked(&tcon->num_fnext, 0);
42438 + atomic_set_unchecked(&tcon->num_fclose, 0);
42439 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
42440 + atomic_set_unchecked(&tcon->num_symlinks, 0);
42441 + atomic_set_unchecked(&tcon->num_locks, 0);
42442 }
42443 }
42444 }
42445 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
42446 smBufAllocCount.counter, cifs_min_small);
42447 #ifdef CONFIG_CIFS_STATS2
42448 seq_printf(m, "Total Large %d Small %d Allocations\n",
42449 - atomic_read(&totBufAllocCount),
42450 - atomic_read(&totSmBufAllocCount));
42451 + atomic_read_unchecked(&totBufAllocCount),
42452 + atomic_read_unchecked(&totSmBufAllocCount));
42453 #endif /* CONFIG_CIFS_STATS2 */
42454
42455 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
42456 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
42457 if (tcon->need_reconnect)
42458 seq_puts(m, "\tDISCONNECTED ");
42459 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
42460 - atomic_read(&tcon->num_smbs_sent),
42461 - atomic_read(&tcon->num_oplock_brks));
42462 + atomic_read_unchecked(&tcon->num_smbs_sent),
42463 + atomic_read_unchecked(&tcon->num_oplock_brks));
42464 seq_printf(m, "\nReads: %d Bytes: %lld",
42465 - atomic_read(&tcon->num_reads),
42466 + atomic_read_unchecked(&tcon->num_reads),
42467 (long long)(tcon->bytes_read));
42468 seq_printf(m, "\nWrites: %d Bytes: %lld",
42469 - atomic_read(&tcon->num_writes),
42470 + atomic_read_unchecked(&tcon->num_writes),
42471 (long long)(tcon->bytes_written));
42472 seq_printf(m, "\nFlushes: %d",
42473 - atomic_read(&tcon->num_flushes));
42474 + atomic_read_unchecked(&tcon->num_flushes));
42475 seq_printf(m, "\nLocks: %d HardLinks: %d "
42476 "Symlinks: %d",
42477 - atomic_read(&tcon->num_locks),
42478 - atomic_read(&tcon->num_hardlinks),
42479 - atomic_read(&tcon->num_symlinks));
42480 + atomic_read_unchecked(&tcon->num_locks),
42481 + atomic_read_unchecked(&tcon->num_hardlinks),
42482 + atomic_read_unchecked(&tcon->num_symlinks));
42483 seq_printf(m, "\nOpens: %d Closes: %d "
42484 "Deletes: %d",
42485 - atomic_read(&tcon->num_opens),
42486 - atomic_read(&tcon->num_closes),
42487 - atomic_read(&tcon->num_deletes));
42488 + atomic_read_unchecked(&tcon->num_opens),
42489 + atomic_read_unchecked(&tcon->num_closes),
42490 + atomic_read_unchecked(&tcon->num_deletes));
42491 seq_printf(m, "\nPosix Opens: %d "
42492 "Posix Mkdirs: %d",
42493 - atomic_read(&tcon->num_posixopens),
42494 - atomic_read(&tcon->num_posixmkdirs));
42495 + atomic_read_unchecked(&tcon->num_posixopens),
42496 + atomic_read_unchecked(&tcon->num_posixmkdirs));
42497 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
42498 - atomic_read(&tcon->num_mkdirs),
42499 - atomic_read(&tcon->num_rmdirs));
42500 + atomic_read_unchecked(&tcon->num_mkdirs),
42501 + atomic_read_unchecked(&tcon->num_rmdirs));
42502 seq_printf(m, "\nRenames: %d T2 Renames %d",
42503 - atomic_read(&tcon->num_renames),
42504 - atomic_read(&tcon->num_t2renames));
42505 + atomic_read_unchecked(&tcon->num_renames),
42506 + atomic_read_unchecked(&tcon->num_t2renames));
42507 seq_printf(m, "\nFindFirst: %d FNext %d "
42508 "FClose %d",
42509 - atomic_read(&tcon->num_ffirst),
42510 - atomic_read(&tcon->num_fnext),
42511 - atomic_read(&tcon->num_fclose));
42512 + atomic_read_unchecked(&tcon->num_ffirst),
42513 + atomic_read_unchecked(&tcon->num_fnext),
42514 + atomic_read_unchecked(&tcon->num_fclose));
42515 }
42516 }
42517 }
42518 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
42519 index 8f1fe32..38f9e27 100644
42520 --- a/fs/cifs/cifsfs.c
42521 +++ b/fs/cifs/cifsfs.c
42522 @@ -989,7 +989,7 @@ cifs_init_request_bufs(void)
42523 cifs_req_cachep = kmem_cache_create("cifs_request",
42524 CIFSMaxBufSize +
42525 MAX_CIFS_HDR_SIZE, 0,
42526 - SLAB_HWCACHE_ALIGN, NULL);
42527 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
42528 if (cifs_req_cachep == NULL)
42529 return -ENOMEM;
42530
42531 @@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void)
42532 efficient to alloc 1 per page off the slab compared to 17K (5page)
42533 alloc of large cifs buffers even when page debugging is on */
42534 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
42535 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
42536 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
42537 NULL);
42538 if (cifs_sm_req_cachep == NULL) {
42539 mempool_destroy(cifs_req_poolp);
42540 @@ -1101,8 +1101,8 @@ init_cifs(void)
42541 atomic_set(&bufAllocCount, 0);
42542 atomic_set(&smBufAllocCount, 0);
42543 #ifdef CONFIG_CIFS_STATS2
42544 - atomic_set(&totBufAllocCount, 0);
42545 - atomic_set(&totSmBufAllocCount, 0);
42546 + atomic_set_unchecked(&totBufAllocCount, 0);
42547 + atomic_set_unchecked(&totSmBufAllocCount, 0);
42548 #endif /* CONFIG_CIFS_STATS2 */
42549
42550 atomic_set(&midCount, 0);
42551 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
42552 index 8238aa1..0347196 100644
42553 --- a/fs/cifs/cifsglob.h
42554 +++ b/fs/cifs/cifsglob.h
42555 @@ -392,28 +392,28 @@ struct cifs_tcon {
42556 __u16 Flags; /* optional support bits */
42557 enum statusEnum tidStatus;
42558 #ifdef CONFIG_CIFS_STATS
42559 - atomic_t num_smbs_sent;
42560 - atomic_t num_writes;
42561 - atomic_t num_reads;
42562 - atomic_t num_flushes;
42563 - atomic_t num_oplock_brks;
42564 - atomic_t num_opens;
42565 - atomic_t num_closes;
42566 - atomic_t num_deletes;
42567 - atomic_t num_mkdirs;
42568 - atomic_t num_posixopens;
42569 - atomic_t num_posixmkdirs;
42570 - atomic_t num_rmdirs;
42571 - atomic_t num_renames;
42572 - atomic_t num_t2renames;
42573 - atomic_t num_ffirst;
42574 - atomic_t num_fnext;
42575 - atomic_t num_fclose;
42576 - atomic_t num_hardlinks;
42577 - atomic_t num_symlinks;
42578 - atomic_t num_locks;
42579 - atomic_t num_acl_get;
42580 - atomic_t num_acl_set;
42581 + atomic_unchecked_t num_smbs_sent;
42582 + atomic_unchecked_t num_writes;
42583 + atomic_unchecked_t num_reads;
42584 + atomic_unchecked_t num_flushes;
42585 + atomic_unchecked_t num_oplock_brks;
42586 + atomic_unchecked_t num_opens;
42587 + atomic_unchecked_t num_closes;
42588 + atomic_unchecked_t num_deletes;
42589 + atomic_unchecked_t num_mkdirs;
42590 + atomic_unchecked_t num_posixopens;
42591 + atomic_unchecked_t num_posixmkdirs;
42592 + atomic_unchecked_t num_rmdirs;
42593 + atomic_unchecked_t num_renames;
42594 + atomic_unchecked_t num_t2renames;
42595 + atomic_unchecked_t num_ffirst;
42596 + atomic_unchecked_t num_fnext;
42597 + atomic_unchecked_t num_fclose;
42598 + atomic_unchecked_t num_hardlinks;
42599 + atomic_unchecked_t num_symlinks;
42600 + atomic_unchecked_t num_locks;
42601 + atomic_unchecked_t num_acl_get;
42602 + atomic_unchecked_t num_acl_set;
42603 #ifdef CONFIG_CIFS_STATS2
42604 unsigned long long time_writes;
42605 unsigned long long time_reads;
42606 @@ -628,7 +628,7 @@ convert_delimiter(char *path, char delim)
42607 }
42608
42609 #ifdef CONFIG_CIFS_STATS
42610 -#define cifs_stats_inc atomic_inc
42611 +#define cifs_stats_inc atomic_inc_unchecked
42612
42613 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
42614 unsigned int bytes)
42615 @@ -985,8 +985,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
42616 /* Various Debug counters */
42617 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
42618 #ifdef CONFIG_CIFS_STATS2
42619 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
42620 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
42621 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
42622 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
42623 #endif
42624 GLOBAL_EXTERN atomic_t smBufAllocCount;
42625 GLOBAL_EXTERN atomic_t midCount;
42626 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
42627 index 6b0e064..94e6c3c 100644
42628 --- a/fs/cifs/link.c
42629 +++ b/fs/cifs/link.c
42630 @@ -600,7 +600,7 @@ symlink_exit:
42631
42632 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
42633 {
42634 - char *p = nd_get_link(nd);
42635 + const char *p = nd_get_link(nd);
42636 if (!IS_ERR(p))
42637 kfree(p);
42638 }
42639 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
42640 index 703ef5c..2a44ed5 100644
42641 --- a/fs/cifs/misc.c
42642 +++ b/fs/cifs/misc.c
42643 @@ -156,7 +156,7 @@ cifs_buf_get(void)
42644 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
42645 atomic_inc(&bufAllocCount);
42646 #ifdef CONFIG_CIFS_STATS2
42647 - atomic_inc(&totBufAllocCount);
42648 + atomic_inc_unchecked(&totBufAllocCount);
42649 #endif /* CONFIG_CIFS_STATS2 */
42650 }
42651
42652 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
42653 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
42654 atomic_inc(&smBufAllocCount);
42655 #ifdef CONFIG_CIFS_STATS2
42656 - atomic_inc(&totSmBufAllocCount);
42657 + atomic_inc_unchecked(&totSmBufAllocCount);
42658 #endif /* CONFIG_CIFS_STATS2 */
42659
42660 }
42661 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
42662 index 6901578..d402eb5 100644
42663 --- a/fs/coda/cache.c
42664 +++ b/fs/coda/cache.c
42665 @@ -24,7 +24,7 @@
42666 #include "coda_linux.h"
42667 #include "coda_cache.h"
42668
42669 -static atomic_t permission_epoch = ATOMIC_INIT(0);
42670 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
42671
42672 /* replace or extend an acl cache hit */
42673 void coda_cache_enter(struct inode *inode, int mask)
42674 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
42675 struct coda_inode_info *cii = ITOC(inode);
42676
42677 spin_lock(&cii->c_lock);
42678 - cii->c_cached_epoch = atomic_read(&permission_epoch);
42679 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
42680 if (cii->c_uid != current_fsuid()) {
42681 cii->c_uid = current_fsuid();
42682 cii->c_cached_perm = mask;
42683 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
42684 {
42685 struct coda_inode_info *cii = ITOC(inode);
42686 spin_lock(&cii->c_lock);
42687 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
42688 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
42689 spin_unlock(&cii->c_lock);
42690 }
42691
42692 /* remove all acl caches */
42693 void coda_cache_clear_all(struct super_block *sb)
42694 {
42695 - atomic_inc(&permission_epoch);
42696 + atomic_inc_unchecked(&permission_epoch);
42697 }
42698
42699
42700 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
42701 spin_lock(&cii->c_lock);
42702 hit = (mask & cii->c_cached_perm) == mask &&
42703 cii->c_uid == current_fsuid() &&
42704 - cii->c_cached_epoch == atomic_read(&permission_epoch);
42705 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
42706 spin_unlock(&cii->c_lock);
42707
42708 return hit;
42709 diff --git a/fs/compat.c b/fs/compat.c
42710 index c987875..08771ca 100644
42711 --- a/fs/compat.c
42712 +++ b/fs/compat.c
42713 @@ -132,8 +132,8 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim
42714 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
42715 {
42716 compat_ino_t ino = stat->ino;
42717 - typeof(ubuf->st_uid) uid = 0;
42718 - typeof(ubuf->st_gid) gid = 0;
42719 + typeof(((struct compat_stat *)0)->st_uid) uid = 0;
42720 + typeof(((struct compat_stat *)0)->st_gid) gid = 0;
42721 int err;
42722
42723 SET_UID(uid, stat->uid);
42724 @@ -504,7 +504,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
42725
42726 set_fs(KERNEL_DS);
42727 /* The __user pointer cast is valid because of the set_fs() */
42728 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
42729 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
42730 set_fs(oldfs);
42731 /* truncating is ok because it's a user address */
42732 if (!ret)
42733 @@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
42734 goto out;
42735
42736 ret = -EINVAL;
42737 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
42738 + if (nr_segs > UIO_MAXIOV)
42739 goto out;
42740 if (nr_segs > fast_segs) {
42741 ret = -ENOMEM;
42742 @@ -845,6 +845,7 @@ struct compat_old_linux_dirent {
42743
42744 struct compat_readdir_callback {
42745 struct compat_old_linux_dirent __user *dirent;
42746 + struct file * file;
42747 int result;
42748 };
42749
42750 @@ -862,6 +863,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
42751 buf->result = -EOVERFLOW;
42752 return -EOVERFLOW;
42753 }
42754 +
42755 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42756 + return 0;
42757 +
42758 buf->result++;
42759 dirent = buf->dirent;
42760 if (!access_ok(VERIFY_WRITE, dirent,
42761 @@ -894,6 +899,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
42762
42763 buf.result = 0;
42764 buf.dirent = dirent;
42765 + buf.file = file;
42766
42767 error = vfs_readdir(file, compat_fillonedir, &buf);
42768 if (buf.result)
42769 @@ -914,6 +920,7 @@ struct compat_linux_dirent {
42770 struct compat_getdents_callback {
42771 struct compat_linux_dirent __user *current_dir;
42772 struct compat_linux_dirent __user *previous;
42773 + struct file * file;
42774 int count;
42775 int error;
42776 };
42777 @@ -935,6 +942,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
42778 buf->error = -EOVERFLOW;
42779 return -EOVERFLOW;
42780 }
42781 +
42782 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42783 + return 0;
42784 +
42785 dirent = buf->previous;
42786 if (dirent) {
42787 if (__put_user(offset, &dirent->d_off))
42788 @@ -982,6 +993,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
42789 buf.previous = NULL;
42790 buf.count = count;
42791 buf.error = 0;
42792 + buf.file = file;
42793
42794 error = vfs_readdir(file, compat_filldir, &buf);
42795 if (error >= 0)
42796 @@ -1003,6 +1015,7 @@ out:
42797 struct compat_getdents_callback64 {
42798 struct linux_dirent64 __user *current_dir;
42799 struct linux_dirent64 __user *previous;
42800 + struct file * file;
42801 int count;
42802 int error;
42803 };
42804 @@ -1019,6 +1032,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
42805 buf->error = -EINVAL; /* only used if we fail.. */
42806 if (reclen > buf->count)
42807 return -EINVAL;
42808 +
42809 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42810 + return 0;
42811 +
42812 dirent = buf->previous;
42813
42814 if (dirent) {
42815 @@ -1070,13 +1087,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
42816 buf.previous = NULL;
42817 buf.count = count;
42818 buf.error = 0;
42819 + buf.file = file;
42820
42821 error = vfs_readdir(file, compat_filldir64, &buf);
42822 if (error >= 0)
42823 error = buf.error;
42824 lastdirent = buf.previous;
42825 if (lastdirent) {
42826 - typeof(lastdirent->d_off) d_off = file->f_pos;
42827 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
42828 if (__put_user_unaligned(d_off, &lastdirent->d_off))
42829 error = -EFAULT;
42830 else
42831 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
42832 index 112e45a..b59845b 100644
42833 --- a/fs/compat_binfmt_elf.c
42834 +++ b/fs/compat_binfmt_elf.c
42835 @@ -30,11 +30,13 @@
42836 #undef elf_phdr
42837 #undef elf_shdr
42838 #undef elf_note
42839 +#undef elf_dyn
42840 #undef elf_addr_t
42841 #define elfhdr elf32_hdr
42842 #define elf_phdr elf32_phdr
42843 #define elf_shdr elf32_shdr
42844 #define elf_note elf32_note
42845 +#define elf_dyn Elf32_Dyn
42846 #define elf_addr_t Elf32_Addr
42847
42848 /*
42849 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
42850 index 51352de..93292ff 100644
42851 --- a/fs/compat_ioctl.c
42852 +++ b/fs/compat_ioctl.c
42853 @@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
42854
42855 err = get_user(palp, &up->palette);
42856 err |= get_user(length, &up->length);
42857 + if (err)
42858 + return -EFAULT;
42859
42860 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
42861 err = put_user(compat_ptr(palp), &up_native->palette);
42862 @@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
42863 return -EFAULT;
42864 if (__get_user(udata, &ss32->iomem_base))
42865 return -EFAULT;
42866 - ss.iomem_base = compat_ptr(udata);
42867 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
42868 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
42869 __get_user(ss.port_high, &ss32->port_high))
42870 return -EFAULT;
42871 @@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
42872 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
42873 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
42874 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
42875 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
42876 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
42877 return -EFAULT;
42878
42879 return ioctl_preallocate(file, p);
42880 @@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
42881 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
42882 {
42883 unsigned int a, b;
42884 - a = *(unsigned int *)p;
42885 - b = *(unsigned int *)q;
42886 + a = *(const unsigned int *)p;
42887 + b = *(const unsigned int *)q;
42888 if (a > b)
42889 return 1;
42890 if (a < b)
42891 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
42892 index 9a37a9b..35792b6 100644
42893 --- a/fs/configfs/dir.c
42894 +++ b/fs/configfs/dir.c
42895 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
42896 }
42897 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
42898 struct configfs_dirent *next;
42899 - const char * name;
42900 + const unsigned char * name;
42901 + char d_name[sizeof(next->s_dentry->d_iname)];
42902 int len;
42903 struct inode *inode = NULL;
42904
42905 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
42906 continue;
42907
42908 name = configfs_get_name(next);
42909 - len = strlen(name);
42910 + if (next->s_dentry && name == next->s_dentry->d_iname) {
42911 + len = next->s_dentry->d_name.len;
42912 + memcpy(d_name, name, len);
42913 + name = d_name;
42914 + } else
42915 + len = strlen(name);
42916
42917 /*
42918 * We'll have a dentry and an inode for
42919 diff --git a/fs/dcache.c b/fs/dcache.c
42920 index f7908ae..920a680 100644
42921 --- a/fs/dcache.c
42922 +++ b/fs/dcache.c
42923 @@ -3042,7 +3042,7 @@ void __init vfs_caches_init(unsigned long mempages)
42924 mempages -= reserve;
42925
42926 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
42927 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
42928 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
42929
42930 dcache_init();
42931 inode_init();
42932 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
42933 index f3a257d..715ac0f 100644
42934 --- a/fs/debugfs/inode.c
42935 +++ b/fs/debugfs/inode.c
42936 @@ -261,7 +261,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
42937 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
42938 {
42939 return debugfs_create_file(name,
42940 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
42941 + S_IFDIR | S_IRWXU,
42942 +#else
42943 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
42944 +#endif
42945 parent, NULL, NULL);
42946 }
42947 EXPORT_SYMBOL_GPL(debugfs_create_dir);
42948 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
42949 index af11098..81e3bbe 100644
42950 --- a/fs/ecryptfs/inode.c
42951 +++ b/fs/ecryptfs/inode.c
42952 @@ -691,7 +691,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
42953 old_fs = get_fs();
42954 set_fs(get_ds());
42955 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
42956 - (char __user *)lower_buf,
42957 + (char __force_user *)lower_buf,
42958 lower_bufsiz);
42959 set_fs(old_fs);
42960 if (rc < 0)
42961 @@ -737,7 +737,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
42962 }
42963 old_fs = get_fs();
42964 set_fs(get_ds());
42965 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
42966 + rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
42967 set_fs(old_fs);
42968 if (rc < 0) {
42969 kfree(buf);
42970 @@ -752,7 +752,7 @@ out:
42971 static void
42972 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
42973 {
42974 - char *buf = nd_get_link(nd);
42975 + const char *buf = nd_get_link(nd);
42976 if (!IS_ERR(buf)) {
42977 /* Free the char* */
42978 kfree(buf);
42979 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
42980 index 0dc5a3d..d3cdeea 100644
42981 --- a/fs/ecryptfs/miscdev.c
42982 +++ b/fs/ecryptfs/miscdev.c
42983 @@ -328,7 +328,7 @@ check_list:
42984 goto out_unlock_msg_ctx;
42985 i = 5;
42986 if (msg_ctx->msg) {
42987 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
42988 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
42989 goto out_unlock_msg_ctx;
42990 i += packet_length_size;
42991 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
42992 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
42993 index 608c1c3..7d040a8 100644
42994 --- a/fs/ecryptfs/read_write.c
42995 +++ b/fs/ecryptfs/read_write.c
42996 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
42997 return -EIO;
42998 fs_save = get_fs();
42999 set_fs(get_ds());
43000 - rc = vfs_write(lower_file, data, size, &offset);
43001 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
43002 set_fs(fs_save);
43003 mark_inode_dirty_sync(ecryptfs_inode);
43004 return rc;
43005 @@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
43006 return -EIO;
43007 fs_save = get_fs();
43008 set_fs(get_ds());
43009 - rc = vfs_read(lower_file, data, size, &offset);
43010 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
43011 set_fs(fs_save);
43012 return rc;
43013 }
43014 diff --git a/fs/exec.c b/fs/exec.c
43015 index 3625464..cdeecdb 100644
43016 --- a/fs/exec.c
43017 +++ b/fs/exec.c
43018 @@ -55,12 +55,28 @@
43019 #include <linux/pipe_fs_i.h>
43020 #include <linux/oom.h>
43021 #include <linux/compat.h>
43022 +#include <linux/random.h>
43023 +#include <linux/seq_file.h>
43024 +
43025 +#ifdef CONFIG_PAX_REFCOUNT
43026 +#include <linux/kallsyms.h>
43027 +#include <linux/kdebug.h>
43028 +#endif
43029
43030 #include <asm/uaccess.h>
43031 #include <asm/mmu_context.h>
43032 #include <asm/tlb.h>
43033 #include "internal.h"
43034
43035 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
43036 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
43037 +#endif
43038 +
43039 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
43040 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
43041 +EXPORT_SYMBOL(pax_set_initial_flags_func);
43042 +#endif
43043 +
43044 int core_uses_pid;
43045 char core_pattern[CORENAME_MAX_SIZE] = "core";
43046 unsigned int core_pipe_limit;
43047 @@ -70,7 +86,7 @@ struct core_name {
43048 char *corename;
43049 int used, size;
43050 };
43051 -static atomic_t call_count = ATOMIC_INIT(1);
43052 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
43053
43054 /* The maximal length of core_pattern is also specified in sysctl.c */
43055
43056 @@ -188,18 +204,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43057 int write)
43058 {
43059 struct page *page;
43060 - int ret;
43061
43062 -#ifdef CONFIG_STACK_GROWSUP
43063 - if (write) {
43064 - ret = expand_downwards(bprm->vma, pos);
43065 - if (ret < 0)
43066 - return NULL;
43067 - }
43068 -#endif
43069 - ret = get_user_pages(current, bprm->mm, pos,
43070 - 1, write, 1, &page, NULL);
43071 - if (ret <= 0)
43072 + if (0 > expand_downwards(bprm->vma, pos))
43073 + return NULL;
43074 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
43075 return NULL;
43076
43077 if (write) {
43078 @@ -215,6 +223,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43079 if (size <= ARG_MAX)
43080 return page;
43081
43082 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43083 + // only allow 512KB for argv+env on suid/sgid binaries
43084 + // to prevent easy ASLR exhaustion
43085 + if (((bprm->cred->euid != current_euid()) ||
43086 + (bprm->cred->egid != current_egid())) &&
43087 + (size > (512 * 1024))) {
43088 + put_page(page);
43089 + return NULL;
43090 + }
43091 +#endif
43092 +
43093 /*
43094 * Limit to 1/4-th the stack size for the argv+env strings.
43095 * This ensures that:
43096 @@ -274,6 +293,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43097 vma->vm_end = STACK_TOP_MAX;
43098 vma->vm_start = vma->vm_end - PAGE_SIZE;
43099 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
43100 +
43101 +#ifdef CONFIG_PAX_SEGMEXEC
43102 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
43103 +#endif
43104 +
43105 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
43106 INIT_LIST_HEAD(&vma->anon_vma_chain);
43107
43108 @@ -288,6 +312,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43109 mm->stack_vm = mm->total_vm = 1;
43110 up_write(&mm->mmap_sem);
43111 bprm->p = vma->vm_end - sizeof(void *);
43112 +
43113 +#ifdef CONFIG_PAX_RANDUSTACK
43114 + if (randomize_va_space)
43115 + bprm->p ^= random32() & ~PAGE_MASK;
43116 +#endif
43117 +
43118 return 0;
43119 err:
43120 up_write(&mm->mmap_sem);
43121 @@ -396,19 +426,7 @@ err:
43122 return err;
43123 }
43124
43125 -struct user_arg_ptr {
43126 -#ifdef CONFIG_COMPAT
43127 - bool is_compat;
43128 -#endif
43129 - union {
43130 - const char __user *const __user *native;
43131 -#ifdef CONFIG_COMPAT
43132 - compat_uptr_t __user *compat;
43133 -#endif
43134 - } ptr;
43135 -};
43136 -
43137 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43138 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43139 {
43140 const char __user *native;
43141
43142 @@ -417,14 +435,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43143 compat_uptr_t compat;
43144
43145 if (get_user(compat, argv.ptr.compat + nr))
43146 - return ERR_PTR(-EFAULT);
43147 + return (const char __force_user *)ERR_PTR(-EFAULT);
43148
43149 return compat_ptr(compat);
43150 }
43151 #endif
43152
43153 if (get_user(native, argv.ptr.native + nr))
43154 - return ERR_PTR(-EFAULT);
43155 + return (const char __force_user *)ERR_PTR(-EFAULT);
43156
43157 return native;
43158 }
43159 @@ -443,7 +461,7 @@ static int count(struct user_arg_ptr argv, int max)
43160 if (!p)
43161 break;
43162
43163 - if (IS_ERR(p))
43164 + if (IS_ERR((const char __force_kernel *)p))
43165 return -EFAULT;
43166
43167 if (i++ >= max)
43168 @@ -477,7 +495,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
43169
43170 ret = -EFAULT;
43171 str = get_user_arg_ptr(argv, argc);
43172 - if (IS_ERR(str))
43173 + if (IS_ERR((const char __force_kernel *)str))
43174 goto out;
43175
43176 len = strnlen_user(str, MAX_ARG_STRLEN);
43177 @@ -559,7 +577,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
43178 int r;
43179 mm_segment_t oldfs = get_fs();
43180 struct user_arg_ptr argv = {
43181 - .ptr.native = (const char __user *const __user *)__argv,
43182 + .ptr.native = (const char __force_user *const __force_user *)__argv,
43183 };
43184
43185 set_fs(KERNEL_DS);
43186 @@ -594,7 +612,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43187 unsigned long new_end = old_end - shift;
43188 struct mmu_gather tlb;
43189
43190 - BUG_ON(new_start > new_end);
43191 + if (new_start >= new_end || new_start < mmap_min_addr)
43192 + return -ENOMEM;
43193
43194 /*
43195 * ensure there are no vmas between where we want to go
43196 @@ -603,6 +622,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43197 if (vma != find_vma(mm, new_start))
43198 return -EFAULT;
43199
43200 +#ifdef CONFIG_PAX_SEGMEXEC
43201 + BUG_ON(pax_find_mirror_vma(vma));
43202 +#endif
43203 +
43204 /*
43205 * cover the whole range: [new_start, old_end)
43206 */
43207 @@ -683,10 +706,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43208 stack_top = arch_align_stack(stack_top);
43209 stack_top = PAGE_ALIGN(stack_top);
43210
43211 - if (unlikely(stack_top < mmap_min_addr) ||
43212 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
43213 - return -ENOMEM;
43214 -
43215 stack_shift = vma->vm_end - stack_top;
43216
43217 bprm->p -= stack_shift;
43218 @@ -698,8 +717,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
43219 bprm->exec -= stack_shift;
43220
43221 down_write(&mm->mmap_sem);
43222 +
43223 + /* Move stack pages down in memory. */
43224 + if (stack_shift) {
43225 + ret = shift_arg_pages(vma, stack_shift);
43226 + if (ret)
43227 + goto out_unlock;
43228 + }
43229 +
43230 vm_flags = VM_STACK_FLAGS;
43231
43232 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43233 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43234 + vm_flags &= ~VM_EXEC;
43235 +
43236 +#ifdef CONFIG_PAX_MPROTECT
43237 + if (mm->pax_flags & MF_PAX_MPROTECT)
43238 + vm_flags &= ~VM_MAYEXEC;
43239 +#endif
43240 +
43241 + }
43242 +#endif
43243 +
43244 /*
43245 * Adjust stack execute permissions; explicitly enable for
43246 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
43247 @@ -718,13 +757,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43248 goto out_unlock;
43249 BUG_ON(prev != vma);
43250
43251 - /* Move stack pages down in memory. */
43252 - if (stack_shift) {
43253 - ret = shift_arg_pages(vma, stack_shift);
43254 - if (ret)
43255 - goto out_unlock;
43256 - }
43257 -
43258 /* mprotect_fixup is overkill to remove the temporary stack flags */
43259 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
43260
43261 @@ -805,7 +837,7 @@ int kernel_read(struct file *file, loff_t offset,
43262 old_fs = get_fs();
43263 set_fs(get_ds());
43264 /* The cast to a user pointer is valid due to the set_fs() */
43265 - result = vfs_read(file, (void __user *)addr, count, &pos);
43266 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
43267 set_fs(old_fs);
43268 return result;
43269 }
43270 @@ -1067,6 +1099,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
43271 perf_event_comm(tsk);
43272 }
43273
43274 +static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
43275 +{
43276 + int i, ch;
43277 +
43278 + /* Copies the binary name from after last slash */
43279 + for (i = 0; (ch = *(fn++)) != '\0';) {
43280 + if (ch == '/')
43281 + i = 0; /* overwrite what we wrote */
43282 + else
43283 + if (i < len - 1)
43284 + tcomm[i++] = ch;
43285 + }
43286 + tcomm[i] = '\0';
43287 +}
43288 +
43289 int flush_old_exec(struct linux_binprm * bprm)
43290 {
43291 int retval;
43292 @@ -1081,6 +1128,7 @@ int flush_old_exec(struct linux_binprm * bprm)
43293
43294 set_mm_exe_file(bprm->mm, bprm->file);
43295
43296 + filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
43297 /*
43298 * Release all of the old mmap stuff
43299 */
43300 @@ -1112,10 +1160,6 @@ EXPORT_SYMBOL(would_dump);
43301
43302 void setup_new_exec(struct linux_binprm * bprm)
43303 {
43304 - int i, ch;
43305 - const char *name;
43306 - char tcomm[sizeof(current->comm)];
43307 -
43308 arch_pick_mmap_layout(current->mm);
43309
43310 /* This is the point of no return */
43311 @@ -1126,18 +1170,7 @@ void setup_new_exec(struct linux_binprm * bprm)
43312 else
43313 set_dumpable(current->mm, suid_dumpable);
43314
43315 - name = bprm->filename;
43316 -
43317 - /* Copies the binary name from after last slash */
43318 - for (i=0; (ch = *(name++)) != '\0';) {
43319 - if (ch == '/')
43320 - i = 0; /* overwrite what we wrote */
43321 - else
43322 - if (i < (sizeof(tcomm) - 1))
43323 - tcomm[i++] = ch;
43324 - }
43325 - tcomm[i] = '\0';
43326 - set_task_comm(current, tcomm);
43327 + set_task_comm(current, bprm->tcomm);
43328
43329 /* Set the new mm task size. We have to do that late because it may
43330 * depend on TIF_32BIT which is only updated in flush_thread() on
43331 @@ -1247,7 +1280,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
43332 }
43333 rcu_read_unlock();
43334
43335 - if (p->fs->users > n_fs) {
43336 + if (atomic_read(&p->fs->users) > n_fs) {
43337 bprm->unsafe |= LSM_UNSAFE_SHARE;
43338 } else {
43339 res = -EAGAIN;
43340 @@ -1442,6 +1475,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
43341
43342 EXPORT_SYMBOL(search_binary_handler);
43343
43344 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43345 +static DEFINE_PER_CPU(u64, exec_counter);
43346 +static int __init init_exec_counters(void)
43347 +{
43348 + unsigned int cpu;
43349 +
43350 + for_each_possible_cpu(cpu) {
43351 + per_cpu(exec_counter, cpu) = (u64)cpu;
43352 + }
43353 +
43354 + return 0;
43355 +}
43356 +early_initcall(init_exec_counters);
43357 +static inline void increment_exec_counter(void)
43358 +{
43359 + BUILD_BUG_ON(NR_CPUS > (1 << 16));
43360 + current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
43361 +}
43362 +#else
43363 +static inline void increment_exec_counter(void) {}
43364 +#endif
43365 +
43366 /*
43367 * sys_execve() executes a new program.
43368 */
43369 @@ -1450,6 +1505,11 @@ static int do_execve_common(const char *filename,
43370 struct user_arg_ptr envp,
43371 struct pt_regs *regs)
43372 {
43373 +#ifdef CONFIG_GRKERNSEC
43374 + struct file *old_exec_file;
43375 + struct acl_subject_label *old_acl;
43376 + struct rlimit old_rlim[RLIM_NLIMITS];
43377 +#endif
43378 struct linux_binprm *bprm;
43379 struct file *file;
43380 struct files_struct *displaced;
43381 @@ -1457,6 +1517,8 @@ static int do_execve_common(const char *filename,
43382 int retval;
43383 const struct cred *cred = current_cred();
43384
43385 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
43386 +
43387 /*
43388 * We move the actual failure in case of RLIMIT_NPROC excess from
43389 * set*uid() to execve() because too many poorly written programs
43390 @@ -1497,12 +1559,27 @@ static int do_execve_common(const char *filename,
43391 if (IS_ERR(file))
43392 goto out_unmark;
43393
43394 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
43395 + retval = -EPERM;
43396 + goto out_file;
43397 + }
43398 +
43399 sched_exec();
43400
43401 bprm->file = file;
43402 bprm->filename = filename;
43403 bprm->interp = filename;
43404
43405 + if (gr_process_user_ban()) {
43406 + retval = -EPERM;
43407 + goto out_file;
43408 + }
43409 +
43410 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
43411 + retval = -EACCES;
43412 + goto out_file;
43413 + }
43414 +
43415 retval = bprm_mm_init(bprm);
43416 if (retval)
43417 goto out_file;
43418 @@ -1519,24 +1596,65 @@ static int do_execve_common(const char *filename,
43419 if (retval < 0)
43420 goto out;
43421
43422 +#ifdef CONFIG_GRKERNSEC
43423 + old_acl = current->acl;
43424 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
43425 + old_exec_file = current->exec_file;
43426 + get_file(file);
43427 + current->exec_file = file;
43428 +#endif
43429 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43430 + /* limit suid stack to 8MB
43431 + we saved the old limits above and will restore them if this exec fails
43432 + */
43433 + if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
43434 + (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
43435 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
43436 +#endif
43437 +
43438 + if (!gr_tpe_allow(file)) {
43439 + retval = -EACCES;
43440 + goto out_fail;
43441 + }
43442 +
43443 + if (gr_check_crash_exec(file)) {
43444 + retval = -EACCES;
43445 + goto out_fail;
43446 + }
43447 +
43448 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
43449 + bprm->unsafe);
43450 + if (retval < 0)
43451 + goto out_fail;
43452 +
43453 retval = copy_strings_kernel(1, &bprm->filename, bprm);
43454 if (retval < 0)
43455 - goto out;
43456 + goto out_fail;
43457
43458 bprm->exec = bprm->p;
43459 retval = copy_strings(bprm->envc, envp, bprm);
43460 if (retval < 0)
43461 - goto out;
43462 + goto out_fail;
43463
43464 retval = copy_strings(bprm->argc, argv, bprm);
43465 if (retval < 0)
43466 - goto out;
43467 + goto out_fail;
43468 +
43469 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
43470 +
43471 + gr_handle_exec_args(bprm, argv);
43472
43473 retval = search_binary_handler(bprm,regs);
43474 if (retval < 0)
43475 - goto out;
43476 + goto out_fail;
43477 +#ifdef CONFIG_GRKERNSEC
43478 + if (old_exec_file)
43479 + fput(old_exec_file);
43480 +#endif
43481
43482 /* execve succeeded */
43483 +
43484 + increment_exec_counter();
43485 current->fs->in_exec = 0;
43486 current->in_execve = 0;
43487 acct_update_integrals(current);
43488 @@ -1545,6 +1663,14 @@ static int do_execve_common(const char *filename,
43489 put_files_struct(displaced);
43490 return retval;
43491
43492 +out_fail:
43493 +#ifdef CONFIG_GRKERNSEC
43494 + current->acl = old_acl;
43495 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
43496 + fput(current->exec_file);
43497 + current->exec_file = old_exec_file;
43498 +#endif
43499 +
43500 out:
43501 if (bprm->mm) {
43502 acct_arg_size(bprm, 0);
43503 @@ -1618,7 +1744,7 @@ static int expand_corename(struct core_name *cn)
43504 {
43505 char *old_corename = cn->corename;
43506
43507 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
43508 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
43509 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
43510
43511 if (!cn->corename) {
43512 @@ -1715,7 +1841,7 @@ static int format_corename(struct core_name *cn, long signr)
43513 int pid_in_pattern = 0;
43514 int err = 0;
43515
43516 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
43517 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
43518 cn->corename = kmalloc(cn->size, GFP_KERNEL);
43519 cn->used = 0;
43520
43521 @@ -1812,6 +1938,218 @@ out:
43522 return ispipe;
43523 }
43524
43525 +int pax_check_flags(unsigned long *flags)
43526 +{
43527 + int retval = 0;
43528 +
43529 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
43530 + if (*flags & MF_PAX_SEGMEXEC)
43531 + {
43532 + *flags &= ~MF_PAX_SEGMEXEC;
43533 + retval = -EINVAL;
43534 + }
43535 +#endif
43536 +
43537 + if ((*flags & MF_PAX_PAGEEXEC)
43538 +
43539 +#ifdef CONFIG_PAX_PAGEEXEC
43540 + && (*flags & MF_PAX_SEGMEXEC)
43541 +#endif
43542 +
43543 + )
43544 + {
43545 + *flags &= ~MF_PAX_PAGEEXEC;
43546 + retval = -EINVAL;
43547 + }
43548 +
43549 + if ((*flags & MF_PAX_MPROTECT)
43550 +
43551 +#ifdef CONFIG_PAX_MPROTECT
43552 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43553 +#endif
43554 +
43555 + )
43556 + {
43557 + *flags &= ~MF_PAX_MPROTECT;
43558 + retval = -EINVAL;
43559 + }
43560 +
43561 + if ((*flags & MF_PAX_EMUTRAMP)
43562 +
43563 +#ifdef CONFIG_PAX_EMUTRAMP
43564 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43565 +#endif
43566 +
43567 + )
43568 + {
43569 + *flags &= ~MF_PAX_EMUTRAMP;
43570 + retval = -EINVAL;
43571 + }
43572 +
43573 + return retval;
43574 +}
43575 +
43576 +EXPORT_SYMBOL(pax_check_flags);
43577 +
43578 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43579 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
43580 +{
43581 + struct task_struct *tsk = current;
43582 + struct mm_struct *mm = current->mm;
43583 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
43584 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
43585 + char *path_exec = NULL;
43586 + char *path_fault = NULL;
43587 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
43588 +
43589 + if (buffer_exec && buffer_fault) {
43590 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
43591 +
43592 + down_read(&mm->mmap_sem);
43593 + vma = mm->mmap;
43594 + while (vma && (!vma_exec || !vma_fault)) {
43595 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
43596 + vma_exec = vma;
43597 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
43598 + vma_fault = vma;
43599 + vma = vma->vm_next;
43600 + }
43601 + if (vma_exec) {
43602 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
43603 + if (IS_ERR(path_exec))
43604 + path_exec = "<path too long>";
43605 + else {
43606 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
43607 + if (path_exec) {
43608 + *path_exec = 0;
43609 + path_exec = buffer_exec;
43610 + } else
43611 + path_exec = "<path too long>";
43612 + }
43613 + }
43614 + if (vma_fault) {
43615 + start = vma_fault->vm_start;
43616 + end = vma_fault->vm_end;
43617 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
43618 + if (vma_fault->vm_file) {
43619 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
43620 + if (IS_ERR(path_fault))
43621 + path_fault = "<path too long>";
43622 + else {
43623 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
43624 + if (path_fault) {
43625 + *path_fault = 0;
43626 + path_fault = buffer_fault;
43627 + } else
43628 + path_fault = "<path too long>";
43629 + }
43630 + } else
43631 + path_fault = "<anonymous mapping>";
43632 + }
43633 + up_read(&mm->mmap_sem);
43634 + }
43635 + if (tsk->signal->curr_ip)
43636 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
43637 + else
43638 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
43639 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
43640 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
43641 + task_uid(tsk), task_euid(tsk), pc, sp);
43642 + free_page((unsigned long)buffer_exec);
43643 + free_page((unsigned long)buffer_fault);
43644 + pax_report_insns(regs, pc, sp);
43645 + do_coredump(SIGKILL, SIGKILL, regs);
43646 +}
43647 +#endif
43648 +
43649 +#ifdef CONFIG_PAX_REFCOUNT
43650 +void pax_report_refcount_overflow(struct pt_regs *regs)
43651 +{
43652 + if (current->signal->curr_ip)
43653 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
43654 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
43655 + else
43656 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
43657 + current->comm, task_pid_nr(current), current_uid(), current_euid());
43658 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
43659 + show_regs(regs);
43660 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
43661 +}
43662 +#endif
43663 +
43664 +#ifdef CONFIG_PAX_USERCOPY
43665 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
43666 +int object_is_on_stack(const void *obj, unsigned long len)
43667 +{
43668 + const void * const stack = task_stack_page(current);
43669 + const void * const stackend = stack + THREAD_SIZE;
43670 +
43671 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
43672 + const void *frame = NULL;
43673 + const void *oldframe;
43674 +#endif
43675 +
43676 + if (obj + len < obj)
43677 + return -1;
43678 +
43679 + if (obj + len <= stack || stackend <= obj)
43680 + return 0;
43681 +
43682 + if (obj < stack || stackend < obj + len)
43683 + return -1;
43684 +
43685 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
43686 + oldframe = __builtin_frame_address(1);
43687 + if (oldframe)
43688 + frame = __builtin_frame_address(2);
43689 + /*
43690 + low ----------------------------------------------> high
43691 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
43692 + ^----------------^
43693 + allow copies only within here
43694 + */
43695 + while (stack <= frame && frame < stackend) {
43696 + /* if obj + len extends past the last frame, this
43697 + check won't pass and the next frame will be 0,
43698 + causing us to bail out and correctly report
43699 + the copy as invalid
43700 + */
43701 + if (obj + len <= frame)
43702 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
43703 + oldframe = frame;
43704 + frame = *(const void * const *)frame;
43705 + }
43706 + return -1;
43707 +#else
43708 + return 1;
43709 +#endif
43710 +}
43711 +
43712 +__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
43713 +{
43714 + if (current->signal->curr_ip)
43715 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
43716 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
43717 + else
43718 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
43719 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
43720 + dump_stack();
43721 + gr_handle_kernel_exploit();
43722 + do_group_exit(SIGKILL);
43723 +}
43724 +#endif
43725 +
43726 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
43727 +void pax_track_stack(void)
43728 +{
43729 + unsigned long sp = (unsigned long)&sp;
43730 + if (sp < current_thread_info()->lowest_stack &&
43731 + sp > (unsigned long)task_stack_page(current))
43732 + current_thread_info()->lowest_stack = sp;
43733 +}
43734 +EXPORT_SYMBOL(pax_track_stack);
43735 +#endif
43736 +
43737 static int zap_process(struct task_struct *start, int exit_code)
43738 {
43739 struct task_struct *t;
43740 @@ -2023,17 +2361,17 @@ static void wait_for_dump_helpers(struct file *file)
43741 pipe = file->f_path.dentry->d_inode->i_pipe;
43742
43743 pipe_lock(pipe);
43744 - pipe->readers++;
43745 - pipe->writers--;
43746 + atomic_inc(&pipe->readers);
43747 + atomic_dec(&pipe->writers);
43748
43749 - while ((pipe->readers > 1) && (!signal_pending(current))) {
43750 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
43751 wake_up_interruptible_sync(&pipe->wait);
43752 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
43753 pipe_wait(pipe);
43754 }
43755
43756 - pipe->readers--;
43757 - pipe->writers++;
43758 + atomic_dec(&pipe->readers);
43759 + atomic_inc(&pipe->writers);
43760 pipe_unlock(pipe);
43761
43762 }
43763 @@ -2094,7 +2432,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
43764 int retval = 0;
43765 int flag = 0;
43766 int ispipe;
43767 - static atomic_t core_dump_count = ATOMIC_INIT(0);
43768 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
43769 struct coredump_params cprm = {
43770 .signr = signr,
43771 .regs = regs,
43772 @@ -2109,6 +2447,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
43773
43774 audit_core_dumps(signr);
43775
43776 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
43777 + gr_handle_brute_attach(current, cprm.mm_flags);
43778 +
43779 binfmt = mm->binfmt;
43780 if (!binfmt || !binfmt->core_dump)
43781 goto fail;
43782 @@ -2176,7 +2517,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
43783 }
43784 cprm.limit = RLIM_INFINITY;
43785
43786 - dump_count = atomic_inc_return(&core_dump_count);
43787 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
43788 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
43789 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
43790 task_tgid_vnr(current), current->comm);
43791 @@ -2203,6 +2544,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
43792 } else {
43793 struct inode *inode;
43794
43795 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
43796 +
43797 if (cprm.limit < binfmt->min_coredump)
43798 goto fail_unlock;
43799
43800 @@ -2246,7 +2589,7 @@ close_fail:
43801 filp_close(cprm.file, NULL);
43802 fail_dropcount:
43803 if (ispipe)
43804 - atomic_dec(&core_dump_count);
43805 + atomic_dec_unchecked(&core_dump_count);
43806 fail_unlock:
43807 kfree(cn.corename);
43808 fail_corename:
43809 @@ -2265,7 +2608,7 @@ fail:
43810 */
43811 int dump_write(struct file *file, const void *addr, int nr)
43812 {
43813 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
43814 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
43815 }
43816 EXPORT_SYMBOL(dump_write);
43817
43818 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
43819 index a8cbe1b..fed04cb 100644
43820 --- a/fs/ext2/balloc.c
43821 +++ b/fs/ext2/balloc.c
43822 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
43823
43824 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
43825 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
43826 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
43827 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
43828 sbi->s_resuid != current_fsuid() &&
43829 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
43830 return 0;
43831 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
43832 index a203892..4e64db5 100644
43833 --- a/fs/ext3/balloc.c
43834 +++ b/fs/ext3/balloc.c
43835 @@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
43836
43837 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
43838 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
43839 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
43840 + if (free_blocks < root_blocks + 1 &&
43841 !use_reservation && sbi->s_resuid != current_fsuid() &&
43842 - (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
43843 + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
43844 + !capable_nolog(CAP_SYS_RESOURCE)) {
43845 return 0;
43846 }
43847 return 1;
43848 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
43849 index 12ccacd..a6035fce0 100644
43850 --- a/fs/ext4/balloc.c
43851 +++ b/fs/ext4/balloc.c
43852 @@ -436,8 +436,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
43853 /* Hm, nope. Are (enough) root reserved clusters available? */
43854 if (sbi->s_resuid == current_fsuid() ||
43855 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
43856 - capable(CAP_SYS_RESOURCE) ||
43857 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
43858 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
43859 + capable_nolog(CAP_SYS_RESOURCE)) {
43860
43861 if (free_clusters >= (nclusters + dirty_clusters))
43862 return 1;
43863 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
43864 index 5b0e26a..0aa002d 100644
43865 --- a/fs/ext4/ext4.h
43866 +++ b/fs/ext4/ext4.h
43867 @@ -1208,19 +1208,19 @@ struct ext4_sb_info {
43868 unsigned long s_mb_last_start;
43869
43870 /* stats for buddy allocator */
43871 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
43872 - atomic_t s_bal_success; /* we found long enough chunks */
43873 - atomic_t s_bal_allocated; /* in blocks */
43874 - atomic_t s_bal_ex_scanned; /* total extents scanned */
43875 - atomic_t s_bal_goals; /* goal hits */
43876 - atomic_t s_bal_breaks; /* too long searches */
43877 - atomic_t s_bal_2orders; /* 2^order hits */
43878 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
43879 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
43880 + atomic_unchecked_t s_bal_allocated; /* in blocks */
43881 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
43882 + atomic_unchecked_t s_bal_goals; /* goal hits */
43883 + atomic_unchecked_t s_bal_breaks; /* too long searches */
43884 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
43885 spinlock_t s_bal_lock;
43886 unsigned long s_mb_buddies_generated;
43887 unsigned long long s_mb_generation_time;
43888 - atomic_t s_mb_lost_chunks;
43889 - atomic_t s_mb_preallocated;
43890 - atomic_t s_mb_discarded;
43891 + atomic_unchecked_t s_mb_lost_chunks;
43892 + atomic_unchecked_t s_mb_preallocated;
43893 + atomic_unchecked_t s_mb_discarded;
43894 atomic_t s_lock_busy;
43895
43896 /* locality groups */
43897 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
43898 index e2d8be8..c7f0ce9 100644
43899 --- a/fs/ext4/mballoc.c
43900 +++ b/fs/ext4/mballoc.c
43901 @@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
43902 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
43903
43904 if (EXT4_SB(sb)->s_mb_stats)
43905 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
43906 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
43907
43908 break;
43909 }
43910 @@ -2088,7 +2088,7 @@ repeat:
43911 ac->ac_status = AC_STATUS_CONTINUE;
43912 ac->ac_flags |= EXT4_MB_HINT_FIRST;
43913 cr = 3;
43914 - atomic_inc(&sbi->s_mb_lost_chunks);
43915 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
43916 goto repeat;
43917 }
43918 }
43919 @@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb)
43920 if (sbi->s_mb_stats) {
43921 ext4_msg(sb, KERN_INFO,
43922 "mballoc: %u blocks %u reqs (%u success)",
43923 - atomic_read(&sbi->s_bal_allocated),
43924 - atomic_read(&sbi->s_bal_reqs),
43925 - atomic_read(&sbi->s_bal_success));
43926 + atomic_read_unchecked(&sbi->s_bal_allocated),
43927 + atomic_read_unchecked(&sbi->s_bal_reqs),
43928 + atomic_read_unchecked(&sbi->s_bal_success));
43929 ext4_msg(sb, KERN_INFO,
43930 "mballoc: %u extents scanned, %u goal hits, "
43931 "%u 2^N hits, %u breaks, %u lost",
43932 - atomic_read(&sbi->s_bal_ex_scanned),
43933 - atomic_read(&sbi->s_bal_goals),
43934 - atomic_read(&sbi->s_bal_2orders),
43935 - atomic_read(&sbi->s_bal_breaks),
43936 - atomic_read(&sbi->s_mb_lost_chunks));
43937 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
43938 + atomic_read_unchecked(&sbi->s_bal_goals),
43939 + atomic_read_unchecked(&sbi->s_bal_2orders),
43940 + atomic_read_unchecked(&sbi->s_bal_breaks),
43941 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
43942 ext4_msg(sb, KERN_INFO,
43943 "mballoc: %lu generated and it took %Lu",
43944 sbi->s_mb_buddies_generated,
43945 sbi->s_mb_generation_time);
43946 ext4_msg(sb, KERN_INFO,
43947 "mballoc: %u preallocated, %u discarded",
43948 - atomic_read(&sbi->s_mb_preallocated),
43949 - atomic_read(&sbi->s_mb_discarded));
43950 + atomic_read_unchecked(&sbi->s_mb_preallocated),
43951 + atomic_read_unchecked(&sbi->s_mb_discarded));
43952 }
43953
43954 free_percpu(sbi->s_locality_groups);
43955 @@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
43956 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
43957
43958 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
43959 - atomic_inc(&sbi->s_bal_reqs);
43960 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
43961 + atomic_inc_unchecked(&sbi->s_bal_reqs);
43962 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
43963 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
43964 - atomic_inc(&sbi->s_bal_success);
43965 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
43966 + atomic_inc_unchecked(&sbi->s_bal_success);
43967 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
43968 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
43969 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
43970 - atomic_inc(&sbi->s_bal_goals);
43971 + atomic_inc_unchecked(&sbi->s_bal_goals);
43972 if (ac->ac_found > sbi->s_mb_max_to_scan)
43973 - atomic_inc(&sbi->s_bal_breaks);
43974 + atomic_inc_unchecked(&sbi->s_bal_breaks);
43975 }
43976
43977 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
43978 @@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
43979 trace_ext4_mb_new_inode_pa(ac, pa);
43980
43981 ext4_mb_use_inode_pa(ac, pa);
43982 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
43983 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
43984
43985 ei = EXT4_I(ac->ac_inode);
43986 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
43987 @@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
43988 trace_ext4_mb_new_group_pa(ac, pa);
43989
43990 ext4_mb_use_group_pa(ac, pa);
43991 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
43992 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
43993
43994 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
43995 lg = ac->ac_lg;
43996 @@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
43997 * from the bitmap and continue.
43998 */
43999 }
44000 - atomic_add(free, &sbi->s_mb_discarded);
44001 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
44002
44003 return err;
44004 }
44005 @@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
44006 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
44007 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
44008 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
44009 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44010 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44011 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
44012
44013 return 0;
44014 diff --git a/fs/fcntl.c b/fs/fcntl.c
44015 index 22764c7..86372c9 100644
44016 --- a/fs/fcntl.c
44017 +++ b/fs/fcntl.c
44018 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
44019 if (err)
44020 return err;
44021
44022 + if (gr_handle_chroot_fowner(pid, type))
44023 + return -ENOENT;
44024 + if (gr_check_protected_task_fowner(pid, type))
44025 + return -EACCES;
44026 +
44027 f_modown(filp, pid, type, force);
44028 return 0;
44029 }
44030 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
44031
44032 static int f_setown_ex(struct file *filp, unsigned long arg)
44033 {
44034 - struct f_owner_ex * __user owner_p = (void * __user)arg;
44035 + struct f_owner_ex __user *owner_p = (void __user *)arg;
44036 struct f_owner_ex owner;
44037 struct pid *pid;
44038 int type;
44039 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
44040
44041 static int f_getown_ex(struct file *filp, unsigned long arg)
44042 {
44043 - struct f_owner_ex * __user owner_p = (void * __user)arg;
44044 + struct f_owner_ex __user *owner_p = (void __user *)arg;
44045 struct f_owner_ex owner;
44046 int ret = 0;
44047
44048 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
44049 switch (cmd) {
44050 case F_DUPFD:
44051 case F_DUPFD_CLOEXEC:
44052 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
44053 if (arg >= rlimit(RLIMIT_NOFILE))
44054 break;
44055 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
44056 diff --git a/fs/fifo.c b/fs/fifo.c
44057 index b1a524d..4ee270e 100644
44058 --- a/fs/fifo.c
44059 +++ b/fs/fifo.c
44060 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
44061 */
44062 filp->f_op = &read_pipefifo_fops;
44063 pipe->r_counter++;
44064 - if (pipe->readers++ == 0)
44065 + if (atomic_inc_return(&pipe->readers) == 1)
44066 wake_up_partner(inode);
44067
44068 - if (!pipe->writers) {
44069 + if (!atomic_read(&pipe->writers)) {
44070 if ((filp->f_flags & O_NONBLOCK)) {
44071 /* suppress POLLHUP until we have
44072 * seen a writer */
44073 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
44074 * errno=ENXIO when there is no process reading the FIFO.
44075 */
44076 ret = -ENXIO;
44077 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
44078 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
44079 goto err;
44080
44081 filp->f_op = &write_pipefifo_fops;
44082 pipe->w_counter++;
44083 - if (!pipe->writers++)
44084 + if (atomic_inc_return(&pipe->writers) == 1)
44085 wake_up_partner(inode);
44086
44087 - if (!pipe->readers) {
44088 + if (!atomic_read(&pipe->readers)) {
44089 wait_for_partner(inode, &pipe->r_counter);
44090 if (signal_pending(current))
44091 goto err_wr;
44092 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
44093 */
44094 filp->f_op = &rdwr_pipefifo_fops;
44095
44096 - pipe->readers++;
44097 - pipe->writers++;
44098 + atomic_inc(&pipe->readers);
44099 + atomic_inc(&pipe->writers);
44100 pipe->r_counter++;
44101 pipe->w_counter++;
44102 - if (pipe->readers == 1 || pipe->writers == 1)
44103 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
44104 wake_up_partner(inode);
44105 break;
44106
44107 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
44108 return 0;
44109
44110 err_rd:
44111 - if (!--pipe->readers)
44112 + if (atomic_dec_and_test(&pipe->readers))
44113 wake_up_interruptible(&pipe->wait);
44114 ret = -ERESTARTSYS;
44115 goto err;
44116
44117 err_wr:
44118 - if (!--pipe->writers)
44119 + if (atomic_dec_and_test(&pipe->writers))
44120 wake_up_interruptible(&pipe->wait);
44121 ret = -ERESTARTSYS;
44122 goto err;
44123
44124 err:
44125 - if (!pipe->readers && !pipe->writers)
44126 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
44127 free_pipe_info(inode);
44128
44129 err_nocleanup:
44130 diff --git a/fs/file.c b/fs/file.c
44131 index 4c6992d..104cdea 100644
44132 --- a/fs/file.c
44133 +++ b/fs/file.c
44134 @@ -15,6 +15,7 @@
44135 #include <linux/slab.h>
44136 #include <linux/vmalloc.h>
44137 #include <linux/file.h>
44138 +#include <linux/security.h>
44139 #include <linux/fdtable.h>
44140 #include <linux/bitops.h>
44141 #include <linux/interrupt.h>
44142 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
44143 * N.B. For clone tasks sharing a files structure, this test
44144 * will limit the total number of files that can be opened.
44145 */
44146 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
44147 if (nr >= rlimit(RLIMIT_NOFILE))
44148 return -EMFILE;
44149
44150 diff --git a/fs/filesystems.c b/fs/filesystems.c
44151 index 0845f84..7b4ebef 100644
44152 --- a/fs/filesystems.c
44153 +++ b/fs/filesystems.c
44154 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(const char *name)
44155 int len = dot ? dot - name : strlen(name);
44156
44157 fs = __get_fs_type(name, len);
44158 +
44159 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
44160 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
44161 +#else
44162 if (!fs && (request_module("%.*s", len, name) == 0))
44163 +#endif
44164 fs = __get_fs_type(name, len);
44165
44166 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
44167 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
44168 index 78b519c..a8b4979 100644
44169 --- a/fs/fs_struct.c
44170 +++ b/fs/fs_struct.c
44171 @@ -4,6 +4,7 @@
44172 #include <linux/path.h>
44173 #include <linux/slab.h>
44174 #include <linux/fs_struct.h>
44175 +#include <linux/grsecurity.h>
44176 #include "internal.h"
44177
44178 static inline void path_get_longterm(struct path *path)
44179 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
44180 old_root = fs->root;
44181 fs->root = *path;
44182 path_get_longterm(path);
44183 + gr_set_chroot_entries(current, path);
44184 write_seqcount_end(&fs->seq);
44185 spin_unlock(&fs->lock);
44186 if (old_root.dentry)
44187 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
44188 && fs->root.mnt == old_root->mnt) {
44189 path_get_longterm(new_root);
44190 fs->root = *new_root;
44191 + gr_set_chroot_entries(p, new_root);
44192 count++;
44193 }
44194 if (fs->pwd.dentry == old_root->dentry
44195 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
44196 spin_lock(&fs->lock);
44197 write_seqcount_begin(&fs->seq);
44198 tsk->fs = NULL;
44199 - kill = !--fs->users;
44200 + gr_clear_chroot_entries(tsk);
44201 + kill = !atomic_dec_return(&fs->users);
44202 write_seqcount_end(&fs->seq);
44203 spin_unlock(&fs->lock);
44204 task_unlock(tsk);
44205 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44206 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
44207 /* We don't need to lock fs - think why ;-) */
44208 if (fs) {
44209 - fs->users = 1;
44210 + atomic_set(&fs->users, 1);
44211 fs->in_exec = 0;
44212 spin_lock_init(&fs->lock);
44213 seqcount_init(&fs->seq);
44214 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44215 spin_lock(&old->lock);
44216 fs->root = old->root;
44217 path_get_longterm(&fs->root);
44218 + /* instead of calling gr_set_chroot_entries here,
44219 + we call it from every caller of this function
44220 + */
44221 fs->pwd = old->pwd;
44222 path_get_longterm(&fs->pwd);
44223 spin_unlock(&old->lock);
44224 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
44225
44226 task_lock(current);
44227 spin_lock(&fs->lock);
44228 - kill = !--fs->users;
44229 + kill = !atomic_dec_return(&fs->users);
44230 current->fs = new_fs;
44231 + gr_set_chroot_entries(current, &new_fs->root);
44232 spin_unlock(&fs->lock);
44233 task_unlock(current);
44234
44235 @@ -164,13 +172,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
44236
44237 int current_umask(void)
44238 {
44239 - return current->fs->umask;
44240 + return current->fs->umask | gr_acl_umask();
44241 }
44242 EXPORT_SYMBOL(current_umask);
44243
44244 /* to be mentioned only in INIT_TASK */
44245 struct fs_struct init_fs = {
44246 - .users = 1,
44247 + .users = ATOMIC_INIT(1),
44248 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
44249 .seq = SEQCNT_ZERO,
44250 .umask = 0022,
44251 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
44252 task_lock(current);
44253
44254 spin_lock(&init_fs.lock);
44255 - init_fs.users++;
44256 + atomic_inc(&init_fs.users);
44257 spin_unlock(&init_fs.lock);
44258
44259 spin_lock(&fs->lock);
44260 current->fs = &init_fs;
44261 - kill = !--fs->users;
44262 + gr_set_chroot_entries(current, &current->fs->root);
44263 + kill = !atomic_dec_return(&fs->users);
44264 spin_unlock(&fs->lock);
44265
44266 task_unlock(current);
44267 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
44268 index 9905350..02eaec4 100644
44269 --- a/fs/fscache/cookie.c
44270 +++ b/fs/fscache/cookie.c
44271 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
44272 parent ? (char *) parent->def->name : "<no-parent>",
44273 def->name, netfs_data);
44274
44275 - fscache_stat(&fscache_n_acquires);
44276 + fscache_stat_unchecked(&fscache_n_acquires);
44277
44278 /* if there's no parent cookie, then we don't create one here either */
44279 if (!parent) {
44280 - fscache_stat(&fscache_n_acquires_null);
44281 + fscache_stat_unchecked(&fscache_n_acquires_null);
44282 _leave(" [no parent]");
44283 return NULL;
44284 }
44285 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
44286 /* allocate and initialise a cookie */
44287 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
44288 if (!cookie) {
44289 - fscache_stat(&fscache_n_acquires_oom);
44290 + fscache_stat_unchecked(&fscache_n_acquires_oom);
44291 _leave(" [ENOMEM]");
44292 return NULL;
44293 }
44294 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44295
44296 switch (cookie->def->type) {
44297 case FSCACHE_COOKIE_TYPE_INDEX:
44298 - fscache_stat(&fscache_n_cookie_index);
44299 + fscache_stat_unchecked(&fscache_n_cookie_index);
44300 break;
44301 case FSCACHE_COOKIE_TYPE_DATAFILE:
44302 - fscache_stat(&fscache_n_cookie_data);
44303 + fscache_stat_unchecked(&fscache_n_cookie_data);
44304 break;
44305 default:
44306 - fscache_stat(&fscache_n_cookie_special);
44307 + fscache_stat_unchecked(&fscache_n_cookie_special);
44308 break;
44309 }
44310
44311 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44312 if (fscache_acquire_non_index_cookie(cookie) < 0) {
44313 atomic_dec(&parent->n_children);
44314 __fscache_cookie_put(cookie);
44315 - fscache_stat(&fscache_n_acquires_nobufs);
44316 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
44317 _leave(" = NULL");
44318 return NULL;
44319 }
44320 }
44321
44322 - fscache_stat(&fscache_n_acquires_ok);
44323 + fscache_stat_unchecked(&fscache_n_acquires_ok);
44324 _leave(" = %p", cookie);
44325 return cookie;
44326 }
44327 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
44328 cache = fscache_select_cache_for_object(cookie->parent);
44329 if (!cache) {
44330 up_read(&fscache_addremove_sem);
44331 - fscache_stat(&fscache_n_acquires_no_cache);
44332 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
44333 _leave(" = -ENOMEDIUM [no cache]");
44334 return -ENOMEDIUM;
44335 }
44336 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
44337 object = cache->ops->alloc_object(cache, cookie);
44338 fscache_stat_d(&fscache_n_cop_alloc_object);
44339 if (IS_ERR(object)) {
44340 - fscache_stat(&fscache_n_object_no_alloc);
44341 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
44342 ret = PTR_ERR(object);
44343 goto error;
44344 }
44345
44346 - fscache_stat(&fscache_n_object_alloc);
44347 + fscache_stat_unchecked(&fscache_n_object_alloc);
44348
44349 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
44350
44351 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
44352 struct fscache_object *object;
44353 struct hlist_node *_p;
44354
44355 - fscache_stat(&fscache_n_updates);
44356 + fscache_stat_unchecked(&fscache_n_updates);
44357
44358 if (!cookie) {
44359 - fscache_stat(&fscache_n_updates_null);
44360 + fscache_stat_unchecked(&fscache_n_updates_null);
44361 _leave(" [no cookie]");
44362 return;
44363 }
44364 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44365 struct fscache_object *object;
44366 unsigned long event;
44367
44368 - fscache_stat(&fscache_n_relinquishes);
44369 + fscache_stat_unchecked(&fscache_n_relinquishes);
44370 if (retire)
44371 - fscache_stat(&fscache_n_relinquishes_retire);
44372 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
44373
44374 if (!cookie) {
44375 - fscache_stat(&fscache_n_relinquishes_null);
44376 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
44377 _leave(" [no cookie]");
44378 return;
44379 }
44380 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44381
44382 /* wait for the cookie to finish being instantiated (or to fail) */
44383 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
44384 - fscache_stat(&fscache_n_relinquishes_waitcrt);
44385 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
44386 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
44387 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
44388 }
44389 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
44390 index f6aad48..88dcf26 100644
44391 --- a/fs/fscache/internal.h
44392 +++ b/fs/fscache/internal.h
44393 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
44394 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
44395 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
44396
44397 -extern atomic_t fscache_n_op_pend;
44398 -extern atomic_t fscache_n_op_run;
44399 -extern atomic_t fscache_n_op_enqueue;
44400 -extern atomic_t fscache_n_op_deferred_release;
44401 -extern atomic_t fscache_n_op_release;
44402 -extern atomic_t fscache_n_op_gc;
44403 -extern atomic_t fscache_n_op_cancelled;
44404 -extern atomic_t fscache_n_op_rejected;
44405 +extern atomic_unchecked_t fscache_n_op_pend;
44406 +extern atomic_unchecked_t fscache_n_op_run;
44407 +extern atomic_unchecked_t fscache_n_op_enqueue;
44408 +extern atomic_unchecked_t fscache_n_op_deferred_release;
44409 +extern atomic_unchecked_t fscache_n_op_release;
44410 +extern atomic_unchecked_t fscache_n_op_gc;
44411 +extern atomic_unchecked_t fscache_n_op_cancelled;
44412 +extern atomic_unchecked_t fscache_n_op_rejected;
44413
44414 -extern atomic_t fscache_n_attr_changed;
44415 -extern atomic_t fscache_n_attr_changed_ok;
44416 -extern atomic_t fscache_n_attr_changed_nobufs;
44417 -extern atomic_t fscache_n_attr_changed_nomem;
44418 -extern atomic_t fscache_n_attr_changed_calls;
44419 +extern atomic_unchecked_t fscache_n_attr_changed;
44420 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
44421 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
44422 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
44423 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
44424
44425 -extern atomic_t fscache_n_allocs;
44426 -extern atomic_t fscache_n_allocs_ok;
44427 -extern atomic_t fscache_n_allocs_wait;
44428 -extern atomic_t fscache_n_allocs_nobufs;
44429 -extern atomic_t fscache_n_allocs_intr;
44430 -extern atomic_t fscache_n_allocs_object_dead;
44431 -extern atomic_t fscache_n_alloc_ops;
44432 -extern atomic_t fscache_n_alloc_op_waits;
44433 +extern atomic_unchecked_t fscache_n_allocs;
44434 +extern atomic_unchecked_t fscache_n_allocs_ok;
44435 +extern atomic_unchecked_t fscache_n_allocs_wait;
44436 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
44437 +extern atomic_unchecked_t fscache_n_allocs_intr;
44438 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
44439 +extern atomic_unchecked_t fscache_n_alloc_ops;
44440 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
44441
44442 -extern atomic_t fscache_n_retrievals;
44443 -extern atomic_t fscache_n_retrievals_ok;
44444 -extern atomic_t fscache_n_retrievals_wait;
44445 -extern atomic_t fscache_n_retrievals_nodata;
44446 -extern atomic_t fscache_n_retrievals_nobufs;
44447 -extern atomic_t fscache_n_retrievals_intr;
44448 -extern atomic_t fscache_n_retrievals_nomem;
44449 -extern atomic_t fscache_n_retrievals_object_dead;
44450 -extern atomic_t fscache_n_retrieval_ops;
44451 -extern atomic_t fscache_n_retrieval_op_waits;
44452 +extern atomic_unchecked_t fscache_n_retrievals;
44453 +extern atomic_unchecked_t fscache_n_retrievals_ok;
44454 +extern atomic_unchecked_t fscache_n_retrievals_wait;
44455 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
44456 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
44457 +extern atomic_unchecked_t fscache_n_retrievals_intr;
44458 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
44459 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
44460 +extern atomic_unchecked_t fscache_n_retrieval_ops;
44461 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
44462
44463 -extern atomic_t fscache_n_stores;
44464 -extern atomic_t fscache_n_stores_ok;
44465 -extern atomic_t fscache_n_stores_again;
44466 -extern atomic_t fscache_n_stores_nobufs;
44467 -extern atomic_t fscache_n_stores_oom;
44468 -extern atomic_t fscache_n_store_ops;
44469 -extern atomic_t fscache_n_store_calls;
44470 -extern atomic_t fscache_n_store_pages;
44471 -extern atomic_t fscache_n_store_radix_deletes;
44472 -extern atomic_t fscache_n_store_pages_over_limit;
44473 +extern atomic_unchecked_t fscache_n_stores;
44474 +extern atomic_unchecked_t fscache_n_stores_ok;
44475 +extern atomic_unchecked_t fscache_n_stores_again;
44476 +extern atomic_unchecked_t fscache_n_stores_nobufs;
44477 +extern atomic_unchecked_t fscache_n_stores_oom;
44478 +extern atomic_unchecked_t fscache_n_store_ops;
44479 +extern atomic_unchecked_t fscache_n_store_calls;
44480 +extern atomic_unchecked_t fscache_n_store_pages;
44481 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
44482 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
44483
44484 -extern atomic_t fscache_n_store_vmscan_not_storing;
44485 -extern atomic_t fscache_n_store_vmscan_gone;
44486 -extern atomic_t fscache_n_store_vmscan_busy;
44487 -extern atomic_t fscache_n_store_vmscan_cancelled;
44488 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
44489 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
44490 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
44491 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
44492
44493 -extern atomic_t fscache_n_marks;
44494 -extern atomic_t fscache_n_uncaches;
44495 +extern atomic_unchecked_t fscache_n_marks;
44496 +extern atomic_unchecked_t fscache_n_uncaches;
44497
44498 -extern atomic_t fscache_n_acquires;
44499 -extern atomic_t fscache_n_acquires_null;
44500 -extern atomic_t fscache_n_acquires_no_cache;
44501 -extern atomic_t fscache_n_acquires_ok;
44502 -extern atomic_t fscache_n_acquires_nobufs;
44503 -extern atomic_t fscache_n_acquires_oom;
44504 +extern atomic_unchecked_t fscache_n_acquires;
44505 +extern atomic_unchecked_t fscache_n_acquires_null;
44506 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
44507 +extern atomic_unchecked_t fscache_n_acquires_ok;
44508 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
44509 +extern atomic_unchecked_t fscache_n_acquires_oom;
44510
44511 -extern atomic_t fscache_n_updates;
44512 -extern atomic_t fscache_n_updates_null;
44513 -extern atomic_t fscache_n_updates_run;
44514 +extern atomic_unchecked_t fscache_n_updates;
44515 +extern atomic_unchecked_t fscache_n_updates_null;
44516 +extern atomic_unchecked_t fscache_n_updates_run;
44517
44518 -extern atomic_t fscache_n_relinquishes;
44519 -extern atomic_t fscache_n_relinquishes_null;
44520 -extern atomic_t fscache_n_relinquishes_waitcrt;
44521 -extern atomic_t fscache_n_relinquishes_retire;
44522 +extern atomic_unchecked_t fscache_n_relinquishes;
44523 +extern atomic_unchecked_t fscache_n_relinquishes_null;
44524 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
44525 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
44526
44527 -extern atomic_t fscache_n_cookie_index;
44528 -extern atomic_t fscache_n_cookie_data;
44529 -extern atomic_t fscache_n_cookie_special;
44530 +extern atomic_unchecked_t fscache_n_cookie_index;
44531 +extern atomic_unchecked_t fscache_n_cookie_data;
44532 +extern atomic_unchecked_t fscache_n_cookie_special;
44533
44534 -extern atomic_t fscache_n_object_alloc;
44535 -extern atomic_t fscache_n_object_no_alloc;
44536 -extern atomic_t fscache_n_object_lookups;
44537 -extern atomic_t fscache_n_object_lookups_negative;
44538 -extern atomic_t fscache_n_object_lookups_positive;
44539 -extern atomic_t fscache_n_object_lookups_timed_out;
44540 -extern atomic_t fscache_n_object_created;
44541 -extern atomic_t fscache_n_object_avail;
44542 -extern atomic_t fscache_n_object_dead;
44543 +extern atomic_unchecked_t fscache_n_object_alloc;
44544 +extern atomic_unchecked_t fscache_n_object_no_alloc;
44545 +extern atomic_unchecked_t fscache_n_object_lookups;
44546 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
44547 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
44548 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
44549 +extern atomic_unchecked_t fscache_n_object_created;
44550 +extern atomic_unchecked_t fscache_n_object_avail;
44551 +extern atomic_unchecked_t fscache_n_object_dead;
44552
44553 -extern atomic_t fscache_n_checkaux_none;
44554 -extern atomic_t fscache_n_checkaux_okay;
44555 -extern atomic_t fscache_n_checkaux_update;
44556 -extern atomic_t fscache_n_checkaux_obsolete;
44557 +extern atomic_unchecked_t fscache_n_checkaux_none;
44558 +extern atomic_unchecked_t fscache_n_checkaux_okay;
44559 +extern atomic_unchecked_t fscache_n_checkaux_update;
44560 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
44561
44562 extern atomic_t fscache_n_cop_alloc_object;
44563 extern atomic_t fscache_n_cop_lookup_object;
44564 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
44565 atomic_inc(stat);
44566 }
44567
44568 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
44569 +{
44570 + atomic_inc_unchecked(stat);
44571 +}
44572 +
44573 static inline void fscache_stat_d(atomic_t *stat)
44574 {
44575 atomic_dec(stat);
44576 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
44577
44578 #define __fscache_stat(stat) (NULL)
44579 #define fscache_stat(stat) do {} while (0)
44580 +#define fscache_stat_unchecked(stat) do {} while (0)
44581 #define fscache_stat_d(stat) do {} while (0)
44582 #endif
44583
44584 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
44585 index b6b897c..0ffff9c 100644
44586 --- a/fs/fscache/object.c
44587 +++ b/fs/fscache/object.c
44588 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44589 /* update the object metadata on disk */
44590 case FSCACHE_OBJECT_UPDATING:
44591 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
44592 - fscache_stat(&fscache_n_updates_run);
44593 + fscache_stat_unchecked(&fscache_n_updates_run);
44594 fscache_stat(&fscache_n_cop_update_object);
44595 object->cache->ops->update_object(object);
44596 fscache_stat_d(&fscache_n_cop_update_object);
44597 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44598 spin_lock(&object->lock);
44599 object->state = FSCACHE_OBJECT_DEAD;
44600 spin_unlock(&object->lock);
44601 - fscache_stat(&fscache_n_object_dead);
44602 + fscache_stat_unchecked(&fscache_n_object_dead);
44603 goto terminal_transit;
44604
44605 /* handle the parent cache of this object being withdrawn from
44606 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44607 spin_lock(&object->lock);
44608 object->state = FSCACHE_OBJECT_DEAD;
44609 spin_unlock(&object->lock);
44610 - fscache_stat(&fscache_n_object_dead);
44611 + fscache_stat_unchecked(&fscache_n_object_dead);
44612 goto terminal_transit;
44613
44614 /* complain about the object being woken up once it is
44615 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
44616 parent->cookie->def->name, cookie->def->name,
44617 object->cache->tag->name);
44618
44619 - fscache_stat(&fscache_n_object_lookups);
44620 + fscache_stat_unchecked(&fscache_n_object_lookups);
44621 fscache_stat(&fscache_n_cop_lookup_object);
44622 ret = object->cache->ops->lookup_object(object);
44623 fscache_stat_d(&fscache_n_cop_lookup_object);
44624 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
44625 if (ret == -ETIMEDOUT) {
44626 /* probably stuck behind another object, so move this one to
44627 * the back of the queue */
44628 - fscache_stat(&fscache_n_object_lookups_timed_out);
44629 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
44630 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
44631 }
44632
44633 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
44634
44635 spin_lock(&object->lock);
44636 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
44637 - fscache_stat(&fscache_n_object_lookups_negative);
44638 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
44639
44640 /* transit here to allow write requests to begin stacking up
44641 * and read requests to begin returning ENODATA */
44642 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
44643 * result, in which case there may be data available */
44644 spin_lock(&object->lock);
44645 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
44646 - fscache_stat(&fscache_n_object_lookups_positive);
44647 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
44648
44649 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
44650
44651 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
44652 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
44653 } else {
44654 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
44655 - fscache_stat(&fscache_n_object_created);
44656 + fscache_stat_unchecked(&fscache_n_object_created);
44657
44658 object->state = FSCACHE_OBJECT_AVAILABLE;
44659 spin_unlock(&object->lock);
44660 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
44661 fscache_enqueue_dependents(object);
44662
44663 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
44664 - fscache_stat(&fscache_n_object_avail);
44665 + fscache_stat_unchecked(&fscache_n_object_avail);
44666
44667 _leave("");
44668 }
44669 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
44670 enum fscache_checkaux result;
44671
44672 if (!object->cookie->def->check_aux) {
44673 - fscache_stat(&fscache_n_checkaux_none);
44674 + fscache_stat_unchecked(&fscache_n_checkaux_none);
44675 return FSCACHE_CHECKAUX_OKAY;
44676 }
44677
44678 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
44679 switch (result) {
44680 /* entry okay as is */
44681 case FSCACHE_CHECKAUX_OKAY:
44682 - fscache_stat(&fscache_n_checkaux_okay);
44683 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
44684 break;
44685
44686 /* entry requires update */
44687 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
44688 - fscache_stat(&fscache_n_checkaux_update);
44689 + fscache_stat_unchecked(&fscache_n_checkaux_update);
44690 break;
44691
44692 /* entry requires deletion */
44693 case FSCACHE_CHECKAUX_OBSOLETE:
44694 - fscache_stat(&fscache_n_checkaux_obsolete);
44695 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
44696 break;
44697
44698 default:
44699 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
44700 index 30afdfa..2256596 100644
44701 --- a/fs/fscache/operation.c
44702 +++ b/fs/fscache/operation.c
44703 @@ -17,7 +17,7 @@
44704 #include <linux/slab.h>
44705 #include "internal.h"
44706
44707 -atomic_t fscache_op_debug_id;
44708 +atomic_unchecked_t fscache_op_debug_id;
44709 EXPORT_SYMBOL(fscache_op_debug_id);
44710
44711 /**
44712 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
44713 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
44714 ASSERTCMP(atomic_read(&op->usage), >, 0);
44715
44716 - fscache_stat(&fscache_n_op_enqueue);
44717 + fscache_stat_unchecked(&fscache_n_op_enqueue);
44718 switch (op->flags & FSCACHE_OP_TYPE) {
44719 case FSCACHE_OP_ASYNC:
44720 _debug("queue async");
44721 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
44722 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
44723 if (op->processor)
44724 fscache_enqueue_operation(op);
44725 - fscache_stat(&fscache_n_op_run);
44726 + fscache_stat_unchecked(&fscache_n_op_run);
44727 }
44728
44729 /*
44730 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
44731 if (object->n_ops > 1) {
44732 atomic_inc(&op->usage);
44733 list_add_tail(&op->pend_link, &object->pending_ops);
44734 - fscache_stat(&fscache_n_op_pend);
44735 + fscache_stat_unchecked(&fscache_n_op_pend);
44736 } else if (!list_empty(&object->pending_ops)) {
44737 atomic_inc(&op->usage);
44738 list_add_tail(&op->pend_link, &object->pending_ops);
44739 - fscache_stat(&fscache_n_op_pend);
44740 + fscache_stat_unchecked(&fscache_n_op_pend);
44741 fscache_start_operations(object);
44742 } else {
44743 ASSERTCMP(object->n_in_progress, ==, 0);
44744 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
44745 object->n_exclusive++; /* reads and writes must wait */
44746 atomic_inc(&op->usage);
44747 list_add_tail(&op->pend_link, &object->pending_ops);
44748 - fscache_stat(&fscache_n_op_pend);
44749 + fscache_stat_unchecked(&fscache_n_op_pend);
44750 ret = 0;
44751 } else {
44752 /* not allowed to submit ops in any other state */
44753 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
44754 if (object->n_exclusive > 0) {
44755 atomic_inc(&op->usage);
44756 list_add_tail(&op->pend_link, &object->pending_ops);
44757 - fscache_stat(&fscache_n_op_pend);
44758 + fscache_stat_unchecked(&fscache_n_op_pend);
44759 } else if (!list_empty(&object->pending_ops)) {
44760 atomic_inc(&op->usage);
44761 list_add_tail(&op->pend_link, &object->pending_ops);
44762 - fscache_stat(&fscache_n_op_pend);
44763 + fscache_stat_unchecked(&fscache_n_op_pend);
44764 fscache_start_operations(object);
44765 } else {
44766 ASSERTCMP(object->n_exclusive, ==, 0);
44767 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
44768 object->n_ops++;
44769 atomic_inc(&op->usage);
44770 list_add_tail(&op->pend_link, &object->pending_ops);
44771 - fscache_stat(&fscache_n_op_pend);
44772 + fscache_stat_unchecked(&fscache_n_op_pend);
44773 ret = 0;
44774 } else if (object->state == FSCACHE_OBJECT_DYING ||
44775 object->state == FSCACHE_OBJECT_LC_DYING ||
44776 object->state == FSCACHE_OBJECT_WITHDRAWING) {
44777 - fscache_stat(&fscache_n_op_rejected);
44778 + fscache_stat_unchecked(&fscache_n_op_rejected);
44779 ret = -ENOBUFS;
44780 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
44781 fscache_report_unexpected_submission(object, op, ostate);
44782 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
44783
44784 ret = -EBUSY;
44785 if (!list_empty(&op->pend_link)) {
44786 - fscache_stat(&fscache_n_op_cancelled);
44787 + fscache_stat_unchecked(&fscache_n_op_cancelled);
44788 list_del_init(&op->pend_link);
44789 object->n_ops--;
44790 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
44791 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
44792 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
44793 BUG();
44794
44795 - fscache_stat(&fscache_n_op_release);
44796 + fscache_stat_unchecked(&fscache_n_op_release);
44797
44798 if (op->release) {
44799 op->release(op);
44800 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
44801 * lock, and defer it otherwise */
44802 if (!spin_trylock(&object->lock)) {
44803 _debug("defer put");
44804 - fscache_stat(&fscache_n_op_deferred_release);
44805 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
44806
44807 cache = object->cache;
44808 spin_lock(&cache->op_gc_list_lock);
44809 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
44810
44811 _debug("GC DEFERRED REL OBJ%x OP%x",
44812 object->debug_id, op->debug_id);
44813 - fscache_stat(&fscache_n_op_gc);
44814 + fscache_stat_unchecked(&fscache_n_op_gc);
44815
44816 ASSERTCMP(atomic_read(&op->usage), ==, 0);
44817
44818 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
44819 index 3f7a59b..cf196cc 100644
44820 --- a/fs/fscache/page.c
44821 +++ b/fs/fscache/page.c
44822 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
44823 val = radix_tree_lookup(&cookie->stores, page->index);
44824 if (!val) {
44825 rcu_read_unlock();
44826 - fscache_stat(&fscache_n_store_vmscan_not_storing);
44827 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
44828 __fscache_uncache_page(cookie, page);
44829 return true;
44830 }
44831 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
44832 spin_unlock(&cookie->stores_lock);
44833
44834 if (xpage) {
44835 - fscache_stat(&fscache_n_store_vmscan_cancelled);
44836 - fscache_stat(&fscache_n_store_radix_deletes);
44837 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
44838 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
44839 ASSERTCMP(xpage, ==, page);
44840 } else {
44841 - fscache_stat(&fscache_n_store_vmscan_gone);
44842 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
44843 }
44844
44845 wake_up_bit(&cookie->flags, 0);
44846 @@ -107,7 +107,7 @@ page_busy:
44847 /* we might want to wait here, but that could deadlock the allocator as
44848 * the work threads writing to the cache may all end up sleeping
44849 * on memory allocation */
44850 - fscache_stat(&fscache_n_store_vmscan_busy);
44851 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
44852 return false;
44853 }
44854 EXPORT_SYMBOL(__fscache_maybe_release_page);
44855 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
44856 FSCACHE_COOKIE_STORING_TAG);
44857 if (!radix_tree_tag_get(&cookie->stores, page->index,
44858 FSCACHE_COOKIE_PENDING_TAG)) {
44859 - fscache_stat(&fscache_n_store_radix_deletes);
44860 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
44861 xpage = radix_tree_delete(&cookie->stores, page->index);
44862 }
44863 spin_unlock(&cookie->stores_lock);
44864 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
44865
44866 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
44867
44868 - fscache_stat(&fscache_n_attr_changed_calls);
44869 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
44870
44871 if (fscache_object_is_active(object)) {
44872 fscache_stat(&fscache_n_cop_attr_changed);
44873 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
44874
44875 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
44876
44877 - fscache_stat(&fscache_n_attr_changed);
44878 + fscache_stat_unchecked(&fscache_n_attr_changed);
44879
44880 op = kzalloc(sizeof(*op), GFP_KERNEL);
44881 if (!op) {
44882 - fscache_stat(&fscache_n_attr_changed_nomem);
44883 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
44884 _leave(" = -ENOMEM");
44885 return -ENOMEM;
44886 }
44887 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
44888 if (fscache_submit_exclusive_op(object, op) < 0)
44889 goto nobufs;
44890 spin_unlock(&cookie->lock);
44891 - fscache_stat(&fscache_n_attr_changed_ok);
44892 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
44893 fscache_put_operation(op);
44894 _leave(" = 0");
44895 return 0;
44896 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
44897 nobufs:
44898 spin_unlock(&cookie->lock);
44899 kfree(op);
44900 - fscache_stat(&fscache_n_attr_changed_nobufs);
44901 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
44902 _leave(" = %d", -ENOBUFS);
44903 return -ENOBUFS;
44904 }
44905 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
44906 /* allocate a retrieval operation and attempt to submit it */
44907 op = kzalloc(sizeof(*op), GFP_NOIO);
44908 if (!op) {
44909 - fscache_stat(&fscache_n_retrievals_nomem);
44910 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
44911 return NULL;
44912 }
44913
44914 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
44915 return 0;
44916 }
44917
44918 - fscache_stat(&fscache_n_retrievals_wait);
44919 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
44920
44921 jif = jiffies;
44922 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
44923 fscache_wait_bit_interruptible,
44924 TASK_INTERRUPTIBLE) != 0) {
44925 - fscache_stat(&fscache_n_retrievals_intr);
44926 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
44927 _leave(" = -ERESTARTSYS");
44928 return -ERESTARTSYS;
44929 }
44930 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
44931 */
44932 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
44933 struct fscache_retrieval *op,
44934 - atomic_t *stat_op_waits,
44935 - atomic_t *stat_object_dead)
44936 + atomic_unchecked_t *stat_op_waits,
44937 + atomic_unchecked_t *stat_object_dead)
44938 {
44939 int ret;
44940
44941 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
44942 goto check_if_dead;
44943
44944 _debug(">>> WT");
44945 - fscache_stat(stat_op_waits);
44946 + fscache_stat_unchecked(stat_op_waits);
44947 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
44948 fscache_wait_bit_interruptible,
44949 TASK_INTERRUPTIBLE) < 0) {
44950 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
44951
44952 check_if_dead:
44953 if (unlikely(fscache_object_is_dead(object))) {
44954 - fscache_stat(stat_object_dead);
44955 + fscache_stat_unchecked(stat_object_dead);
44956 return -ENOBUFS;
44957 }
44958 return 0;
44959 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
44960
44961 _enter("%p,%p,,,", cookie, page);
44962
44963 - fscache_stat(&fscache_n_retrievals);
44964 + fscache_stat_unchecked(&fscache_n_retrievals);
44965
44966 if (hlist_empty(&cookie->backing_objects))
44967 goto nobufs;
44968 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
44969 goto nobufs_unlock;
44970 spin_unlock(&cookie->lock);
44971
44972 - fscache_stat(&fscache_n_retrieval_ops);
44973 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
44974
44975 /* pin the netfs read context in case we need to do the actual netfs
44976 * read because we've encountered a cache read failure */
44977 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
44978
44979 error:
44980 if (ret == -ENOMEM)
44981 - fscache_stat(&fscache_n_retrievals_nomem);
44982 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
44983 else if (ret == -ERESTARTSYS)
44984 - fscache_stat(&fscache_n_retrievals_intr);
44985 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
44986 else if (ret == -ENODATA)
44987 - fscache_stat(&fscache_n_retrievals_nodata);
44988 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
44989 else if (ret < 0)
44990 - fscache_stat(&fscache_n_retrievals_nobufs);
44991 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
44992 else
44993 - fscache_stat(&fscache_n_retrievals_ok);
44994 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
44995
44996 fscache_put_retrieval(op);
44997 _leave(" = %d", ret);
44998 @@ -429,7 +429,7 @@ nobufs_unlock:
44999 spin_unlock(&cookie->lock);
45000 kfree(op);
45001 nobufs:
45002 - fscache_stat(&fscache_n_retrievals_nobufs);
45003 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45004 _leave(" = -ENOBUFS");
45005 return -ENOBUFS;
45006 }
45007 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45008
45009 _enter("%p,,%d,,,", cookie, *nr_pages);
45010
45011 - fscache_stat(&fscache_n_retrievals);
45012 + fscache_stat_unchecked(&fscache_n_retrievals);
45013
45014 if (hlist_empty(&cookie->backing_objects))
45015 goto nobufs;
45016 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45017 goto nobufs_unlock;
45018 spin_unlock(&cookie->lock);
45019
45020 - fscache_stat(&fscache_n_retrieval_ops);
45021 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
45022
45023 /* pin the netfs read context in case we need to do the actual netfs
45024 * read because we've encountered a cache read failure */
45025 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45026
45027 error:
45028 if (ret == -ENOMEM)
45029 - fscache_stat(&fscache_n_retrievals_nomem);
45030 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45031 else if (ret == -ERESTARTSYS)
45032 - fscache_stat(&fscache_n_retrievals_intr);
45033 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45034 else if (ret == -ENODATA)
45035 - fscache_stat(&fscache_n_retrievals_nodata);
45036 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45037 else if (ret < 0)
45038 - fscache_stat(&fscache_n_retrievals_nobufs);
45039 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45040 else
45041 - fscache_stat(&fscache_n_retrievals_ok);
45042 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
45043
45044 fscache_put_retrieval(op);
45045 _leave(" = %d", ret);
45046 @@ -545,7 +545,7 @@ nobufs_unlock:
45047 spin_unlock(&cookie->lock);
45048 kfree(op);
45049 nobufs:
45050 - fscache_stat(&fscache_n_retrievals_nobufs);
45051 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45052 _leave(" = -ENOBUFS");
45053 return -ENOBUFS;
45054 }
45055 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45056
45057 _enter("%p,%p,,,", cookie, page);
45058
45059 - fscache_stat(&fscache_n_allocs);
45060 + fscache_stat_unchecked(&fscache_n_allocs);
45061
45062 if (hlist_empty(&cookie->backing_objects))
45063 goto nobufs;
45064 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45065 goto nobufs_unlock;
45066 spin_unlock(&cookie->lock);
45067
45068 - fscache_stat(&fscache_n_alloc_ops);
45069 + fscache_stat_unchecked(&fscache_n_alloc_ops);
45070
45071 ret = fscache_wait_for_retrieval_activation(
45072 object, op,
45073 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45074
45075 error:
45076 if (ret == -ERESTARTSYS)
45077 - fscache_stat(&fscache_n_allocs_intr);
45078 + fscache_stat_unchecked(&fscache_n_allocs_intr);
45079 else if (ret < 0)
45080 - fscache_stat(&fscache_n_allocs_nobufs);
45081 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45082 else
45083 - fscache_stat(&fscache_n_allocs_ok);
45084 + fscache_stat_unchecked(&fscache_n_allocs_ok);
45085
45086 fscache_put_retrieval(op);
45087 _leave(" = %d", ret);
45088 @@ -625,7 +625,7 @@ nobufs_unlock:
45089 spin_unlock(&cookie->lock);
45090 kfree(op);
45091 nobufs:
45092 - fscache_stat(&fscache_n_allocs_nobufs);
45093 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45094 _leave(" = -ENOBUFS");
45095 return -ENOBUFS;
45096 }
45097 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45098
45099 spin_lock(&cookie->stores_lock);
45100
45101 - fscache_stat(&fscache_n_store_calls);
45102 + fscache_stat_unchecked(&fscache_n_store_calls);
45103
45104 /* find a page to store */
45105 page = NULL;
45106 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45107 page = results[0];
45108 _debug("gang %d [%lx]", n, page->index);
45109 if (page->index > op->store_limit) {
45110 - fscache_stat(&fscache_n_store_pages_over_limit);
45111 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
45112 goto superseded;
45113 }
45114
45115 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45116 spin_unlock(&cookie->stores_lock);
45117 spin_unlock(&object->lock);
45118
45119 - fscache_stat(&fscache_n_store_pages);
45120 + fscache_stat_unchecked(&fscache_n_store_pages);
45121 fscache_stat(&fscache_n_cop_write_page);
45122 ret = object->cache->ops->write_page(op, page);
45123 fscache_stat_d(&fscache_n_cop_write_page);
45124 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45125 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45126 ASSERT(PageFsCache(page));
45127
45128 - fscache_stat(&fscache_n_stores);
45129 + fscache_stat_unchecked(&fscache_n_stores);
45130
45131 op = kzalloc(sizeof(*op), GFP_NOIO);
45132 if (!op)
45133 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45134 spin_unlock(&cookie->stores_lock);
45135 spin_unlock(&object->lock);
45136
45137 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
45138 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
45139 op->store_limit = object->store_limit;
45140
45141 if (fscache_submit_op(object, &op->op) < 0)
45142 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45143
45144 spin_unlock(&cookie->lock);
45145 radix_tree_preload_end();
45146 - fscache_stat(&fscache_n_store_ops);
45147 - fscache_stat(&fscache_n_stores_ok);
45148 + fscache_stat_unchecked(&fscache_n_store_ops);
45149 + fscache_stat_unchecked(&fscache_n_stores_ok);
45150
45151 /* the work queue now carries its own ref on the object */
45152 fscache_put_operation(&op->op);
45153 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45154 return 0;
45155
45156 already_queued:
45157 - fscache_stat(&fscache_n_stores_again);
45158 + fscache_stat_unchecked(&fscache_n_stores_again);
45159 already_pending:
45160 spin_unlock(&cookie->stores_lock);
45161 spin_unlock(&object->lock);
45162 spin_unlock(&cookie->lock);
45163 radix_tree_preload_end();
45164 kfree(op);
45165 - fscache_stat(&fscache_n_stores_ok);
45166 + fscache_stat_unchecked(&fscache_n_stores_ok);
45167 _leave(" = 0");
45168 return 0;
45169
45170 @@ -851,14 +851,14 @@ nobufs:
45171 spin_unlock(&cookie->lock);
45172 radix_tree_preload_end();
45173 kfree(op);
45174 - fscache_stat(&fscache_n_stores_nobufs);
45175 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
45176 _leave(" = -ENOBUFS");
45177 return -ENOBUFS;
45178
45179 nomem_free:
45180 kfree(op);
45181 nomem:
45182 - fscache_stat(&fscache_n_stores_oom);
45183 + fscache_stat_unchecked(&fscache_n_stores_oom);
45184 _leave(" = -ENOMEM");
45185 return -ENOMEM;
45186 }
45187 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
45188 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45189 ASSERTCMP(page, !=, NULL);
45190
45191 - fscache_stat(&fscache_n_uncaches);
45192 + fscache_stat_unchecked(&fscache_n_uncaches);
45193
45194 /* cache withdrawal may beat us to it */
45195 if (!PageFsCache(page))
45196 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
45197 unsigned long loop;
45198
45199 #ifdef CONFIG_FSCACHE_STATS
45200 - atomic_add(pagevec->nr, &fscache_n_marks);
45201 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
45202 #endif
45203
45204 for (loop = 0; loop < pagevec->nr; loop++) {
45205 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
45206 index 4765190..2a067f2 100644
45207 --- a/fs/fscache/stats.c
45208 +++ b/fs/fscache/stats.c
45209 @@ -18,95 +18,95 @@
45210 /*
45211 * operation counters
45212 */
45213 -atomic_t fscache_n_op_pend;
45214 -atomic_t fscache_n_op_run;
45215 -atomic_t fscache_n_op_enqueue;
45216 -atomic_t fscache_n_op_requeue;
45217 -atomic_t fscache_n_op_deferred_release;
45218 -atomic_t fscache_n_op_release;
45219 -atomic_t fscache_n_op_gc;
45220 -atomic_t fscache_n_op_cancelled;
45221 -atomic_t fscache_n_op_rejected;
45222 +atomic_unchecked_t fscache_n_op_pend;
45223 +atomic_unchecked_t fscache_n_op_run;
45224 +atomic_unchecked_t fscache_n_op_enqueue;
45225 +atomic_unchecked_t fscache_n_op_requeue;
45226 +atomic_unchecked_t fscache_n_op_deferred_release;
45227 +atomic_unchecked_t fscache_n_op_release;
45228 +atomic_unchecked_t fscache_n_op_gc;
45229 +atomic_unchecked_t fscache_n_op_cancelled;
45230 +atomic_unchecked_t fscache_n_op_rejected;
45231
45232 -atomic_t fscache_n_attr_changed;
45233 -atomic_t fscache_n_attr_changed_ok;
45234 -atomic_t fscache_n_attr_changed_nobufs;
45235 -atomic_t fscache_n_attr_changed_nomem;
45236 -atomic_t fscache_n_attr_changed_calls;
45237 +atomic_unchecked_t fscache_n_attr_changed;
45238 +atomic_unchecked_t fscache_n_attr_changed_ok;
45239 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
45240 +atomic_unchecked_t fscache_n_attr_changed_nomem;
45241 +atomic_unchecked_t fscache_n_attr_changed_calls;
45242
45243 -atomic_t fscache_n_allocs;
45244 -atomic_t fscache_n_allocs_ok;
45245 -atomic_t fscache_n_allocs_wait;
45246 -atomic_t fscache_n_allocs_nobufs;
45247 -atomic_t fscache_n_allocs_intr;
45248 -atomic_t fscache_n_allocs_object_dead;
45249 -atomic_t fscache_n_alloc_ops;
45250 -atomic_t fscache_n_alloc_op_waits;
45251 +atomic_unchecked_t fscache_n_allocs;
45252 +atomic_unchecked_t fscache_n_allocs_ok;
45253 +atomic_unchecked_t fscache_n_allocs_wait;
45254 +atomic_unchecked_t fscache_n_allocs_nobufs;
45255 +atomic_unchecked_t fscache_n_allocs_intr;
45256 +atomic_unchecked_t fscache_n_allocs_object_dead;
45257 +atomic_unchecked_t fscache_n_alloc_ops;
45258 +atomic_unchecked_t fscache_n_alloc_op_waits;
45259
45260 -atomic_t fscache_n_retrievals;
45261 -atomic_t fscache_n_retrievals_ok;
45262 -atomic_t fscache_n_retrievals_wait;
45263 -atomic_t fscache_n_retrievals_nodata;
45264 -atomic_t fscache_n_retrievals_nobufs;
45265 -atomic_t fscache_n_retrievals_intr;
45266 -atomic_t fscache_n_retrievals_nomem;
45267 -atomic_t fscache_n_retrievals_object_dead;
45268 -atomic_t fscache_n_retrieval_ops;
45269 -atomic_t fscache_n_retrieval_op_waits;
45270 +atomic_unchecked_t fscache_n_retrievals;
45271 +atomic_unchecked_t fscache_n_retrievals_ok;
45272 +atomic_unchecked_t fscache_n_retrievals_wait;
45273 +atomic_unchecked_t fscache_n_retrievals_nodata;
45274 +atomic_unchecked_t fscache_n_retrievals_nobufs;
45275 +atomic_unchecked_t fscache_n_retrievals_intr;
45276 +atomic_unchecked_t fscache_n_retrievals_nomem;
45277 +atomic_unchecked_t fscache_n_retrievals_object_dead;
45278 +atomic_unchecked_t fscache_n_retrieval_ops;
45279 +atomic_unchecked_t fscache_n_retrieval_op_waits;
45280
45281 -atomic_t fscache_n_stores;
45282 -atomic_t fscache_n_stores_ok;
45283 -atomic_t fscache_n_stores_again;
45284 -atomic_t fscache_n_stores_nobufs;
45285 -atomic_t fscache_n_stores_oom;
45286 -atomic_t fscache_n_store_ops;
45287 -atomic_t fscache_n_store_calls;
45288 -atomic_t fscache_n_store_pages;
45289 -atomic_t fscache_n_store_radix_deletes;
45290 -atomic_t fscache_n_store_pages_over_limit;
45291 +atomic_unchecked_t fscache_n_stores;
45292 +atomic_unchecked_t fscache_n_stores_ok;
45293 +atomic_unchecked_t fscache_n_stores_again;
45294 +atomic_unchecked_t fscache_n_stores_nobufs;
45295 +atomic_unchecked_t fscache_n_stores_oom;
45296 +atomic_unchecked_t fscache_n_store_ops;
45297 +atomic_unchecked_t fscache_n_store_calls;
45298 +atomic_unchecked_t fscache_n_store_pages;
45299 +atomic_unchecked_t fscache_n_store_radix_deletes;
45300 +atomic_unchecked_t fscache_n_store_pages_over_limit;
45301
45302 -atomic_t fscache_n_store_vmscan_not_storing;
45303 -atomic_t fscache_n_store_vmscan_gone;
45304 -atomic_t fscache_n_store_vmscan_busy;
45305 -atomic_t fscache_n_store_vmscan_cancelled;
45306 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45307 +atomic_unchecked_t fscache_n_store_vmscan_gone;
45308 +atomic_unchecked_t fscache_n_store_vmscan_busy;
45309 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45310
45311 -atomic_t fscache_n_marks;
45312 -atomic_t fscache_n_uncaches;
45313 +atomic_unchecked_t fscache_n_marks;
45314 +atomic_unchecked_t fscache_n_uncaches;
45315
45316 -atomic_t fscache_n_acquires;
45317 -atomic_t fscache_n_acquires_null;
45318 -atomic_t fscache_n_acquires_no_cache;
45319 -atomic_t fscache_n_acquires_ok;
45320 -atomic_t fscache_n_acquires_nobufs;
45321 -atomic_t fscache_n_acquires_oom;
45322 +atomic_unchecked_t fscache_n_acquires;
45323 +atomic_unchecked_t fscache_n_acquires_null;
45324 +atomic_unchecked_t fscache_n_acquires_no_cache;
45325 +atomic_unchecked_t fscache_n_acquires_ok;
45326 +atomic_unchecked_t fscache_n_acquires_nobufs;
45327 +atomic_unchecked_t fscache_n_acquires_oom;
45328
45329 -atomic_t fscache_n_updates;
45330 -atomic_t fscache_n_updates_null;
45331 -atomic_t fscache_n_updates_run;
45332 +atomic_unchecked_t fscache_n_updates;
45333 +atomic_unchecked_t fscache_n_updates_null;
45334 +atomic_unchecked_t fscache_n_updates_run;
45335
45336 -atomic_t fscache_n_relinquishes;
45337 -atomic_t fscache_n_relinquishes_null;
45338 -atomic_t fscache_n_relinquishes_waitcrt;
45339 -atomic_t fscache_n_relinquishes_retire;
45340 +atomic_unchecked_t fscache_n_relinquishes;
45341 +atomic_unchecked_t fscache_n_relinquishes_null;
45342 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45343 +atomic_unchecked_t fscache_n_relinquishes_retire;
45344
45345 -atomic_t fscache_n_cookie_index;
45346 -atomic_t fscache_n_cookie_data;
45347 -atomic_t fscache_n_cookie_special;
45348 +atomic_unchecked_t fscache_n_cookie_index;
45349 +atomic_unchecked_t fscache_n_cookie_data;
45350 +atomic_unchecked_t fscache_n_cookie_special;
45351
45352 -atomic_t fscache_n_object_alloc;
45353 -atomic_t fscache_n_object_no_alloc;
45354 -atomic_t fscache_n_object_lookups;
45355 -atomic_t fscache_n_object_lookups_negative;
45356 -atomic_t fscache_n_object_lookups_positive;
45357 -atomic_t fscache_n_object_lookups_timed_out;
45358 -atomic_t fscache_n_object_created;
45359 -atomic_t fscache_n_object_avail;
45360 -atomic_t fscache_n_object_dead;
45361 +atomic_unchecked_t fscache_n_object_alloc;
45362 +atomic_unchecked_t fscache_n_object_no_alloc;
45363 +atomic_unchecked_t fscache_n_object_lookups;
45364 +atomic_unchecked_t fscache_n_object_lookups_negative;
45365 +atomic_unchecked_t fscache_n_object_lookups_positive;
45366 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
45367 +atomic_unchecked_t fscache_n_object_created;
45368 +atomic_unchecked_t fscache_n_object_avail;
45369 +atomic_unchecked_t fscache_n_object_dead;
45370
45371 -atomic_t fscache_n_checkaux_none;
45372 -atomic_t fscache_n_checkaux_okay;
45373 -atomic_t fscache_n_checkaux_update;
45374 -atomic_t fscache_n_checkaux_obsolete;
45375 +atomic_unchecked_t fscache_n_checkaux_none;
45376 +atomic_unchecked_t fscache_n_checkaux_okay;
45377 +atomic_unchecked_t fscache_n_checkaux_update;
45378 +atomic_unchecked_t fscache_n_checkaux_obsolete;
45379
45380 atomic_t fscache_n_cop_alloc_object;
45381 atomic_t fscache_n_cop_lookup_object;
45382 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
45383 seq_puts(m, "FS-Cache statistics\n");
45384
45385 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
45386 - atomic_read(&fscache_n_cookie_index),
45387 - atomic_read(&fscache_n_cookie_data),
45388 - atomic_read(&fscache_n_cookie_special));
45389 + atomic_read_unchecked(&fscache_n_cookie_index),
45390 + atomic_read_unchecked(&fscache_n_cookie_data),
45391 + atomic_read_unchecked(&fscache_n_cookie_special));
45392
45393 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
45394 - atomic_read(&fscache_n_object_alloc),
45395 - atomic_read(&fscache_n_object_no_alloc),
45396 - atomic_read(&fscache_n_object_avail),
45397 - atomic_read(&fscache_n_object_dead));
45398 + atomic_read_unchecked(&fscache_n_object_alloc),
45399 + atomic_read_unchecked(&fscache_n_object_no_alloc),
45400 + atomic_read_unchecked(&fscache_n_object_avail),
45401 + atomic_read_unchecked(&fscache_n_object_dead));
45402 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
45403 - atomic_read(&fscache_n_checkaux_none),
45404 - atomic_read(&fscache_n_checkaux_okay),
45405 - atomic_read(&fscache_n_checkaux_update),
45406 - atomic_read(&fscache_n_checkaux_obsolete));
45407 + atomic_read_unchecked(&fscache_n_checkaux_none),
45408 + atomic_read_unchecked(&fscache_n_checkaux_okay),
45409 + atomic_read_unchecked(&fscache_n_checkaux_update),
45410 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
45411
45412 seq_printf(m, "Pages : mrk=%u unc=%u\n",
45413 - atomic_read(&fscache_n_marks),
45414 - atomic_read(&fscache_n_uncaches));
45415 + atomic_read_unchecked(&fscache_n_marks),
45416 + atomic_read_unchecked(&fscache_n_uncaches));
45417
45418 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
45419 " oom=%u\n",
45420 - atomic_read(&fscache_n_acquires),
45421 - atomic_read(&fscache_n_acquires_null),
45422 - atomic_read(&fscache_n_acquires_no_cache),
45423 - atomic_read(&fscache_n_acquires_ok),
45424 - atomic_read(&fscache_n_acquires_nobufs),
45425 - atomic_read(&fscache_n_acquires_oom));
45426 + atomic_read_unchecked(&fscache_n_acquires),
45427 + atomic_read_unchecked(&fscache_n_acquires_null),
45428 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
45429 + atomic_read_unchecked(&fscache_n_acquires_ok),
45430 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
45431 + atomic_read_unchecked(&fscache_n_acquires_oom));
45432
45433 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
45434 - atomic_read(&fscache_n_object_lookups),
45435 - atomic_read(&fscache_n_object_lookups_negative),
45436 - atomic_read(&fscache_n_object_lookups_positive),
45437 - atomic_read(&fscache_n_object_created),
45438 - atomic_read(&fscache_n_object_lookups_timed_out));
45439 + atomic_read_unchecked(&fscache_n_object_lookups),
45440 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
45441 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
45442 + atomic_read_unchecked(&fscache_n_object_created),
45443 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
45444
45445 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
45446 - atomic_read(&fscache_n_updates),
45447 - atomic_read(&fscache_n_updates_null),
45448 - atomic_read(&fscache_n_updates_run));
45449 + atomic_read_unchecked(&fscache_n_updates),
45450 + atomic_read_unchecked(&fscache_n_updates_null),
45451 + atomic_read_unchecked(&fscache_n_updates_run));
45452
45453 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
45454 - atomic_read(&fscache_n_relinquishes),
45455 - atomic_read(&fscache_n_relinquishes_null),
45456 - atomic_read(&fscache_n_relinquishes_waitcrt),
45457 - atomic_read(&fscache_n_relinquishes_retire));
45458 + atomic_read_unchecked(&fscache_n_relinquishes),
45459 + atomic_read_unchecked(&fscache_n_relinquishes_null),
45460 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
45461 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
45462
45463 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
45464 - atomic_read(&fscache_n_attr_changed),
45465 - atomic_read(&fscache_n_attr_changed_ok),
45466 - atomic_read(&fscache_n_attr_changed_nobufs),
45467 - atomic_read(&fscache_n_attr_changed_nomem),
45468 - atomic_read(&fscache_n_attr_changed_calls));
45469 + atomic_read_unchecked(&fscache_n_attr_changed),
45470 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
45471 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
45472 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
45473 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
45474
45475 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
45476 - atomic_read(&fscache_n_allocs),
45477 - atomic_read(&fscache_n_allocs_ok),
45478 - atomic_read(&fscache_n_allocs_wait),
45479 - atomic_read(&fscache_n_allocs_nobufs),
45480 - atomic_read(&fscache_n_allocs_intr));
45481 + atomic_read_unchecked(&fscache_n_allocs),
45482 + atomic_read_unchecked(&fscache_n_allocs_ok),
45483 + atomic_read_unchecked(&fscache_n_allocs_wait),
45484 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
45485 + atomic_read_unchecked(&fscache_n_allocs_intr));
45486 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
45487 - atomic_read(&fscache_n_alloc_ops),
45488 - atomic_read(&fscache_n_alloc_op_waits),
45489 - atomic_read(&fscache_n_allocs_object_dead));
45490 + atomic_read_unchecked(&fscache_n_alloc_ops),
45491 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
45492 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
45493
45494 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
45495 " int=%u oom=%u\n",
45496 - atomic_read(&fscache_n_retrievals),
45497 - atomic_read(&fscache_n_retrievals_ok),
45498 - atomic_read(&fscache_n_retrievals_wait),
45499 - atomic_read(&fscache_n_retrievals_nodata),
45500 - atomic_read(&fscache_n_retrievals_nobufs),
45501 - atomic_read(&fscache_n_retrievals_intr),
45502 - atomic_read(&fscache_n_retrievals_nomem));
45503 + atomic_read_unchecked(&fscache_n_retrievals),
45504 + atomic_read_unchecked(&fscache_n_retrievals_ok),
45505 + atomic_read_unchecked(&fscache_n_retrievals_wait),
45506 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
45507 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
45508 + atomic_read_unchecked(&fscache_n_retrievals_intr),
45509 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
45510 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
45511 - atomic_read(&fscache_n_retrieval_ops),
45512 - atomic_read(&fscache_n_retrieval_op_waits),
45513 - atomic_read(&fscache_n_retrievals_object_dead));
45514 + atomic_read_unchecked(&fscache_n_retrieval_ops),
45515 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
45516 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
45517
45518 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
45519 - atomic_read(&fscache_n_stores),
45520 - atomic_read(&fscache_n_stores_ok),
45521 - atomic_read(&fscache_n_stores_again),
45522 - atomic_read(&fscache_n_stores_nobufs),
45523 - atomic_read(&fscache_n_stores_oom));
45524 + atomic_read_unchecked(&fscache_n_stores),
45525 + atomic_read_unchecked(&fscache_n_stores_ok),
45526 + atomic_read_unchecked(&fscache_n_stores_again),
45527 + atomic_read_unchecked(&fscache_n_stores_nobufs),
45528 + atomic_read_unchecked(&fscache_n_stores_oom));
45529 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
45530 - atomic_read(&fscache_n_store_ops),
45531 - atomic_read(&fscache_n_store_calls),
45532 - atomic_read(&fscache_n_store_pages),
45533 - atomic_read(&fscache_n_store_radix_deletes),
45534 - atomic_read(&fscache_n_store_pages_over_limit));
45535 + atomic_read_unchecked(&fscache_n_store_ops),
45536 + atomic_read_unchecked(&fscache_n_store_calls),
45537 + atomic_read_unchecked(&fscache_n_store_pages),
45538 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
45539 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
45540
45541 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
45542 - atomic_read(&fscache_n_store_vmscan_not_storing),
45543 - atomic_read(&fscache_n_store_vmscan_gone),
45544 - atomic_read(&fscache_n_store_vmscan_busy),
45545 - atomic_read(&fscache_n_store_vmscan_cancelled));
45546 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
45547 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
45548 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
45549 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
45550
45551 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
45552 - atomic_read(&fscache_n_op_pend),
45553 - atomic_read(&fscache_n_op_run),
45554 - atomic_read(&fscache_n_op_enqueue),
45555 - atomic_read(&fscache_n_op_cancelled),
45556 - atomic_read(&fscache_n_op_rejected));
45557 + atomic_read_unchecked(&fscache_n_op_pend),
45558 + atomic_read_unchecked(&fscache_n_op_run),
45559 + atomic_read_unchecked(&fscache_n_op_enqueue),
45560 + atomic_read_unchecked(&fscache_n_op_cancelled),
45561 + atomic_read_unchecked(&fscache_n_op_rejected));
45562 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
45563 - atomic_read(&fscache_n_op_deferred_release),
45564 - atomic_read(&fscache_n_op_release),
45565 - atomic_read(&fscache_n_op_gc));
45566 + atomic_read_unchecked(&fscache_n_op_deferred_release),
45567 + atomic_read_unchecked(&fscache_n_op_release),
45568 + atomic_read_unchecked(&fscache_n_op_gc));
45569
45570 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
45571 atomic_read(&fscache_n_cop_alloc_object),
45572 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
45573 index 3426521..3b75162 100644
45574 --- a/fs/fuse/cuse.c
45575 +++ b/fs/fuse/cuse.c
45576 @@ -587,10 +587,12 @@ static int __init cuse_init(void)
45577 INIT_LIST_HEAD(&cuse_conntbl[i]);
45578
45579 /* inherit and extend fuse_dev_operations */
45580 - cuse_channel_fops = fuse_dev_operations;
45581 - cuse_channel_fops.owner = THIS_MODULE;
45582 - cuse_channel_fops.open = cuse_channel_open;
45583 - cuse_channel_fops.release = cuse_channel_release;
45584 + pax_open_kernel();
45585 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
45586 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
45587 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
45588 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
45589 + pax_close_kernel();
45590
45591 cuse_class = class_create(THIS_MODULE, "cuse");
45592 if (IS_ERR(cuse_class))
45593 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
45594 index 2aaf3ea..8e50863 100644
45595 --- a/fs/fuse/dev.c
45596 +++ b/fs/fuse/dev.c
45597 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
45598 ret = 0;
45599 pipe_lock(pipe);
45600
45601 - if (!pipe->readers) {
45602 + if (!atomic_read(&pipe->readers)) {
45603 send_sig(SIGPIPE, current, 0);
45604 if (!ret)
45605 ret = -EPIPE;
45606 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
45607 index 9f63e49..d8a64c0 100644
45608 --- a/fs/fuse/dir.c
45609 +++ b/fs/fuse/dir.c
45610 @@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *dentry)
45611 return link;
45612 }
45613
45614 -static void free_link(char *link)
45615 +static void free_link(const char *link)
45616 {
45617 if (!IS_ERR(link))
45618 free_page((unsigned long) link);
45619 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
45620 index cfd4959..a780959 100644
45621 --- a/fs/gfs2/inode.c
45622 +++ b/fs/gfs2/inode.c
45623 @@ -1490,7 +1490,7 @@ out:
45624
45625 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
45626 {
45627 - char *s = nd_get_link(nd);
45628 + const char *s = nd_get_link(nd);
45629 if (!IS_ERR(s))
45630 kfree(s);
45631 }
45632 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
45633 index 0be5a78..9cfb853 100644
45634 --- a/fs/hugetlbfs/inode.c
45635 +++ b/fs/hugetlbfs/inode.c
45636 @@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs_fs_type = {
45637 .kill_sb = kill_litter_super,
45638 };
45639
45640 -static struct vfsmount *hugetlbfs_vfsmount;
45641 +struct vfsmount *hugetlbfs_vfsmount;
45642
45643 static int can_do_hugetlb_shm(void)
45644 {
45645 diff --git a/fs/inode.c b/fs/inode.c
45646 index ee4e66b..0451521 100644
45647 --- a/fs/inode.c
45648 +++ b/fs/inode.c
45649 @@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
45650
45651 #ifdef CONFIG_SMP
45652 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
45653 - static atomic_t shared_last_ino;
45654 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
45655 + static atomic_unchecked_t shared_last_ino;
45656 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
45657
45658 res = next - LAST_INO_BATCH;
45659 }
45660 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
45661 index e513f19..2ab1351 100644
45662 --- a/fs/jffs2/erase.c
45663 +++ b/fs/jffs2/erase.c
45664 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
45665 struct jffs2_unknown_node marker = {
45666 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
45667 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
45668 - .totlen = cpu_to_je32(c->cleanmarker_size)
45669 + .totlen = cpu_to_je32(c->cleanmarker_size),
45670 + .hdr_crc = cpu_to_je32(0)
45671 };
45672
45673 jffs2_prealloc_raw_node_refs(c, jeb, 1);
45674 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
45675 index b09e51d..e482afa 100644
45676 --- a/fs/jffs2/wbuf.c
45677 +++ b/fs/jffs2/wbuf.c
45678 @@ -1011,7 +1011,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
45679 {
45680 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
45681 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
45682 - .totlen = constant_cpu_to_je32(8)
45683 + .totlen = constant_cpu_to_je32(8),
45684 + .hdr_crc = constant_cpu_to_je32(0)
45685 };
45686
45687 /*
45688 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
45689 index a44eff0..462e07d 100644
45690 --- a/fs/jfs/super.c
45691 +++ b/fs/jfs/super.c
45692 @@ -802,7 +802,7 @@ static int __init init_jfs_fs(void)
45693
45694 jfs_inode_cachep =
45695 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
45696 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
45697 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
45698 init_once);
45699 if (jfs_inode_cachep == NULL)
45700 return -ENOMEM;
45701 diff --git a/fs/libfs.c b/fs/libfs.c
45702 index f6d411e..e82a08d 100644
45703 --- a/fs/libfs.c
45704 +++ b/fs/libfs.c
45705 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
45706
45707 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
45708 struct dentry *next;
45709 + char d_name[sizeof(next->d_iname)];
45710 + const unsigned char *name;
45711 +
45712 next = list_entry(p, struct dentry, d_u.d_child);
45713 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
45714 if (!simple_positive(next)) {
45715 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
45716
45717 spin_unlock(&next->d_lock);
45718 spin_unlock(&dentry->d_lock);
45719 - if (filldir(dirent, next->d_name.name,
45720 + name = next->d_name.name;
45721 + if (name == next->d_iname) {
45722 + memcpy(d_name, name, next->d_name.len);
45723 + name = d_name;
45724 + }
45725 + if (filldir(dirent, name,
45726 next->d_name.len, filp->f_pos,
45727 next->d_inode->i_ino,
45728 dt_type(next->d_inode)) < 0)
45729 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
45730 index 8392cb8..80d6193 100644
45731 --- a/fs/lockd/clntproc.c
45732 +++ b/fs/lockd/clntproc.c
45733 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
45734 /*
45735 * Cookie counter for NLM requests
45736 */
45737 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
45738 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
45739
45740 void nlmclnt_next_cookie(struct nlm_cookie *c)
45741 {
45742 - u32 cookie = atomic_inc_return(&nlm_cookie);
45743 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
45744
45745 memcpy(c->data, &cookie, 4);
45746 c->len=4;
45747 diff --git a/fs/locks.c b/fs/locks.c
45748 index 637694b..f84a121 100644
45749 --- a/fs/locks.c
45750 +++ b/fs/locks.c
45751 @@ -2074,16 +2074,16 @@ void locks_remove_flock(struct file *filp)
45752 return;
45753
45754 if (filp->f_op && filp->f_op->flock) {
45755 - struct file_lock fl = {
45756 + struct file_lock flock = {
45757 .fl_pid = current->tgid,
45758 .fl_file = filp,
45759 .fl_flags = FL_FLOCK,
45760 .fl_type = F_UNLCK,
45761 .fl_end = OFFSET_MAX,
45762 };
45763 - filp->f_op->flock(filp, F_SETLKW, &fl);
45764 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
45765 - fl.fl_ops->fl_release_private(&fl);
45766 + filp->f_op->flock(filp, F_SETLKW, &flock);
45767 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
45768 + flock.fl_ops->fl_release_private(&flock);
45769 }
45770
45771 lock_flocks();
45772 diff --git a/fs/namei.c b/fs/namei.c
45773 index 744e942..24ef47f 100644
45774 --- a/fs/namei.c
45775 +++ b/fs/namei.c
45776 @@ -279,16 +279,32 @@ int generic_permission(struct inode *inode, int mask)
45777 if (ret != -EACCES)
45778 return ret;
45779
45780 +#ifdef CONFIG_GRKERNSEC
45781 + /* we'll block if we have to log due to a denied capability use */
45782 + if (mask & MAY_NOT_BLOCK)
45783 + return -ECHILD;
45784 +#endif
45785 +
45786 if (S_ISDIR(inode->i_mode)) {
45787 /* DACs are overridable for directories */
45788 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
45789 - return 0;
45790 if (!(mask & MAY_WRITE))
45791 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
45792 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
45793 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
45794 return 0;
45795 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
45796 + return 0;
45797 return -EACCES;
45798 }
45799 /*
45800 + * Searching includes executable on directories, else just read.
45801 + */
45802 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
45803 + if (mask == MAY_READ)
45804 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
45805 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
45806 + return 0;
45807 +
45808 + /*
45809 * Read/write DACs are always overridable.
45810 * Executable DACs are overridable when there is
45811 * at least one exec bit set.
45812 @@ -297,14 +313,6 @@ int generic_permission(struct inode *inode, int mask)
45813 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
45814 return 0;
45815
45816 - /*
45817 - * Searching includes executable on directories, else just read.
45818 - */
45819 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
45820 - if (mask == MAY_READ)
45821 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
45822 - return 0;
45823 -
45824 return -EACCES;
45825 }
45826
45827 @@ -653,11 +661,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
45828 return error;
45829 }
45830
45831 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
45832 + dentry->d_inode, dentry, nd->path.mnt)) {
45833 + error = -EACCES;
45834 + *p = ERR_PTR(error); /* no ->put_link(), please */
45835 + path_put(&nd->path);
45836 + return error;
45837 + }
45838 +
45839 nd->last_type = LAST_BIND;
45840 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
45841 error = PTR_ERR(*p);
45842 if (!IS_ERR(*p)) {
45843 - char *s = nd_get_link(nd);
45844 + const char *s = nd_get_link(nd);
45845 error = 0;
45846 if (s)
45847 error = __vfs_follow_link(nd, s);
45848 @@ -1624,6 +1640,21 @@ static int path_lookupat(int dfd, const char *name,
45849 if (!err)
45850 err = complete_walk(nd);
45851
45852 + if (!(nd->flags & LOOKUP_PARENT)) {
45853 +#ifdef CONFIG_GRKERNSEC
45854 + if (flags & LOOKUP_RCU) {
45855 + if (!err)
45856 + path_put(&nd->path);
45857 + err = -ECHILD;
45858 + } else
45859 +#endif
45860 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
45861 + if (!err)
45862 + path_put(&nd->path);
45863 + err = -ENOENT;
45864 + }
45865 + }
45866 +
45867 if (!err && nd->flags & LOOKUP_DIRECTORY) {
45868 if (!nd->inode->i_op->lookup) {
45869 path_put(&nd->path);
45870 @@ -1651,6 +1682,15 @@ static int do_path_lookup(int dfd, const char *name,
45871 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
45872
45873 if (likely(!retval)) {
45874 + if (*name != '/' && nd->path.dentry && nd->inode) {
45875 +#ifdef CONFIG_GRKERNSEC
45876 + if (flags & LOOKUP_RCU)
45877 + return -ECHILD;
45878 +#endif
45879 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
45880 + return -ENOENT;
45881 + }
45882 +
45883 if (unlikely(!audit_dummy_context())) {
45884 if (nd->path.dentry && nd->inode)
45885 audit_inode(name, nd->path.dentry);
45886 @@ -2048,6 +2088,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
45887 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
45888 return -EPERM;
45889
45890 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
45891 + return -EPERM;
45892 + if (gr_handle_rawio(inode))
45893 + return -EPERM;
45894 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
45895 + return -EACCES;
45896 +
45897 return 0;
45898 }
45899
45900 @@ -2109,6 +2156,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45901 error = complete_walk(nd);
45902 if (error)
45903 return ERR_PTR(error);
45904 +#ifdef CONFIG_GRKERNSEC
45905 + if (nd->flags & LOOKUP_RCU) {
45906 + error = -ECHILD;
45907 + goto exit;
45908 + }
45909 +#endif
45910 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
45911 + error = -ENOENT;
45912 + goto exit;
45913 + }
45914 audit_inode(pathname, nd->path.dentry);
45915 if (open_flag & O_CREAT) {
45916 error = -EISDIR;
45917 @@ -2119,6 +2176,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45918 error = complete_walk(nd);
45919 if (error)
45920 return ERR_PTR(error);
45921 +#ifdef CONFIG_GRKERNSEC
45922 + if (nd->flags & LOOKUP_RCU) {
45923 + error = -ECHILD;
45924 + goto exit;
45925 + }
45926 +#endif
45927 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
45928 + error = -ENOENT;
45929 + goto exit;
45930 + }
45931 audit_inode(pathname, dir);
45932 goto ok;
45933 }
45934 @@ -2140,6 +2207,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45935 error = complete_walk(nd);
45936 if (error)
45937 return ERR_PTR(-ECHILD);
45938 +#ifdef CONFIG_GRKERNSEC
45939 + if (nd->flags & LOOKUP_RCU) {
45940 + error = -ECHILD;
45941 + goto exit;
45942 + }
45943 +#endif
45944 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
45945 + error = -ENOENT;
45946 + goto exit;
45947 + }
45948
45949 error = -ENOTDIR;
45950 if (nd->flags & LOOKUP_DIRECTORY) {
45951 @@ -2180,6 +2257,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45952 /* Negative dentry, just create the file */
45953 if (!dentry->d_inode) {
45954 int mode = op->mode;
45955 +
45956 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
45957 + error = -EACCES;
45958 + goto exit_mutex_unlock;
45959 + }
45960 +
45961 if (!IS_POSIXACL(dir->d_inode))
45962 mode &= ~current_umask();
45963 /*
45964 @@ -2203,6 +2286,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45965 error = vfs_create(dir->d_inode, dentry, mode, nd);
45966 if (error)
45967 goto exit_mutex_unlock;
45968 + else
45969 + gr_handle_create(path->dentry, path->mnt);
45970 mutex_unlock(&dir->d_inode->i_mutex);
45971 dput(nd->path.dentry);
45972 nd->path.dentry = dentry;
45973 @@ -2212,6 +2297,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45974 /*
45975 * It already exists.
45976 */
45977 +
45978 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
45979 + error = -ENOENT;
45980 + goto exit_mutex_unlock;
45981 + }
45982 +
45983 + /* only check if O_CREAT is specified, all other checks need to go
45984 + into may_open */
45985 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
45986 + error = -EACCES;
45987 + goto exit_mutex_unlock;
45988 + }
45989 +
45990 mutex_unlock(&dir->d_inode->i_mutex);
45991 audit_inode(pathname, path->dentry);
45992
45993 @@ -2424,6 +2522,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
45994 *path = nd.path;
45995 return dentry;
45996 eexist:
45997 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
45998 + dput(dentry);
45999 + dentry = ERR_PTR(-ENOENT);
46000 + goto fail;
46001 + }
46002 dput(dentry);
46003 dentry = ERR_PTR(-EEXIST);
46004 fail:
46005 @@ -2446,6 +2549,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
46006 }
46007 EXPORT_SYMBOL(user_path_create);
46008
46009 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
46010 +{
46011 + char *tmp = getname(pathname);
46012 + struct dentry *res;
46013 + if (IS_ERR(tmp))
46014 + return ERR_CAST(tmp);
46015 + res = kern_path_create(dfd, tmp, path, is_dir);
46016 + if (IS_ERR(res))
46017 + putname(tmp);
46018 + else
46019 + *to = tmp;
46020 + return res;
46021 +}
46022 +
46023 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
46024 {
46025 int error = may_create(dir, dentry);
46026 @@ -2513,6 +2630,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
46027 error = mnt_want_write(path.mnt);
46028 if (error)
46029 goto out_dput;
46030 +
46031 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
46032 + error = -EPERM;
46033 + goto out_drop_write;
46034 + }
46035 +
46036 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
46037 + error = -EACCES;
46038 + goto out_drop_write;
46039 + }
46040 +
46041 error = security_path_mknod(&path, dentry, mode, dev);
46042 if (error)
46043 goto out_drop_write;
46044 @@ -2530,6 +2658,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
46045 }
46046 out_drop_write:
46047 mnt_drop_write(path.mnt);
46048 +
46049 + if (!error)
46050 + gr_handle_create(dentry, path.mnt);
46051 out_dput:
46052 dput(dentry);
46053 mutex_unlock(&path.dentry->d_inode->i_mutex);
46054 @@ -2579,12 +2710,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
46055 error = mnt_want_write(path.mnt);
46056 if (error)
46057 goto out_dput;
46058 +
46059 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
46060 + error = -EACCES;
46061 + goto out_drop_write;
46062 + }
46063 +
46064 error = security_path_mkdir(&path, dentry, mode);
46065 if (error)
46066 goto out_drop_write;
46067 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
46068 out_drop_write:
46069 mnt_drop_write(path.mnt);
46070 +
46071 + if (!error)
46072 + gr_handle_create(dentry, path.mnt);
46073 out_dput:
46074 dput(dentry);
46075 mutex_unlock(&path.dentry->d_inode->i_mutex);
46076 @@ -2664,6 +2804,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46077 char * name;
46078 struct dentry *dentry;
46079 struct nameidata nd;
46080 + ino_t saved_ino = 0;
46081 + dev_t saved_dev = 0;
46082
46083 error = user_path_parent(dfd, pathname, &nd, &name);
46084 if (error)
46085 @@ -2692,6 +2834,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
46086 error = -ENOENT;
46087 goto exit3;
46088 }
46089 +
46090 + saved_ino = dentry->d_inode->i_ino;
46091 + saved_dev = gr_get_dev_from_dentry(dentry);
46092 +
46093 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
46094 + error = -EACCES;
46095 + goto exit3;
46096 + }
46097 +
46098 error = mnt_want_write(nd.path.mnt);
46099 if (error)
46100 goto exit3;
46101 @@ -2699,6 +2850,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46102 if (error)
46103 goto exit4;
46104 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
46105 + if (!error && (saved_dev || saved_ino))
46106 + gr_handle_delete(saved_ino, saved_dev);
46107 exit4:
46108 mnt_drop_write(nd.path.mnt);
46109 exit3:
46110 @@ -2761,6 +2914,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46111 struct dentry *dentry;
46112 struct nameidata nd;
46113 struct inode *inode = NULL;
46114 + ino_t saved_ino = 0;
46115 + dev_t saved_dev = 0;
46116
46117 error = user_path_parent(dfd, pathname, &nd, &name);
46118 if (error)
46119 @@ -2783,6 +2938,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46120 if (!inode)
46121 goto slashes;
46122 ihold(inode);
46123 +
46124 + if (inode->i_nlink <= 1) {
46125 + saved_ino = inode->i_ino;
46126 + saved_dev = gr_get_dev_from_dentry(dentry);
46127 + }
46128 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
46129 + error = -EACCES;
46130 + goto exit2;
46131 + }
46132 +
46133 error = mnt_want_write(nd.path.mnt);
46134 if (error)
46135 goto exit2;
46136 @@ -2790,6 +2955,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46137 if (error)
46138 goto exit3;
46139 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
46140 + if (!error && (saved_ino || saved_dev))
46141 + gr_handle_delete(saved_ino, saved_dev);
46142 exit3:
46143 mnt_drop_write(nd.path.mnt);
46144 exit2:
46145 @@ -2865,10 +3032,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
46146 error = mnt_want_write(path.mnt);
46147 if (error)
46148 goto out_dput;
46149 +
46150 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
46151 + error = -EACCES;
46152 + goto out_drop_write;
46153 + }
46154 +
46155 error = security_path_symlink(&path, dentry, from);
46156 if (error)
46157 goto out_drop_write;
46158 error = vfs_symlink(path.dentry->d_inode, dentry, from);
46159 + if (!error)
46160 + gr_handle_create(dentry, path.mnt);
46161 out_drop_write:
46162 mnt_drop_write(path.mnt);
46163 out_dput:
46164 @@ -2940,6 +3115,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46165 {
46166 struct dentry *new_dentry;
46167 struct path old_path, new_path;
46168 + char *to = NULL;
46169 int how = 0;
46170 int error;
46171
46172 @@ -2963,7 +3139,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46173 if (error)
46174 return error;
46175
46176 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
46177 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
46178 error = PTR_ERR(new_dentry);
46179 if (IS_ERR(new_dentry))
46180 goto out;
46181 @@ -2974,13 +3150,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46182 error = mnt_want_write(new_path.mnt);
46183 if (error)
46184 goto out_dput;
46185 +
46186 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
46187 + old_path.dentry->d_inode,
46188 + old_path.dentry->d_inode->i_mode, to)) {
46189 + error = -EACCES;
46190 + goto out_drop_write;
46191 + }
46192 +
46193 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
46194 + old_path.dentry, old_path.mnt, to)) {
46195 + error = -EACCES;
46196 + goto out_drop_write;
46197 + }
46198 +
46199 error = security_path_link(old_path.dentry, &new_path, new_dentry);
46200 if (error)
46201 goto out_drop_write;
46202 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
46203 + if (!error)
46204 + gr_handle_create(new_dentry, new_path.mnt);
46205 out_drop_write:
46206 mnt_drop_write(new_path.mnt);
46207 out_dput:
46208 + putname(to);
46209 dput(new_dentry);
46210 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
46211 path_put(&new_path);
46212 @@ -3208,6 +3401,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46213 if (new_dentry == trap)
46214 goto exit5;
46215
46216 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
46217 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
46218 + to);
46219 + if (error)
46220 + goto exit5;
46221 +
46222 error = mnt_want_write(oldnd.path.mnt);
46223 if (error)
46224 goto exit5;
46225 @@ -3217,6 +3416,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46226 goto exit6;
46227 error = vfs_rename(old_dir->d_inode, old_dentry,
46228 new_dir->d_inode, new_dentry);
46229 + if (!error)
46230 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
46231 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
46232 exit6:
46233 mnt_drop_write(oldnd.path.mnt);
46234 exit5:
46235 @@ -3242,6 +3444,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
46236
46237 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
46238 {
46239 + char tmpbuf[64];
46240 + const char *newlink;
46241 int len;
46242
46243 len = PTR_ERR(link);
46244 @@ -3251,7 +3455,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
46245 len = strlen(link);
46246 if (len > (unsigned) buflen)
46247 len = buflen;
46248 - if (copy_to_user(buffer, link, len))
46249 +
46250 + if (len < sizeof(tmpbuf)) {
46251 + memcpy(tmpbuf, link, len);
46252 + newlink = tmpbuf;
46253 + } else
46254 + newlink = link;
46255 +
46256 + if (copy_to_user(buffer, newlink, len))
46257 len = -EFAULT;
46258 out:
46259 return len;
46260 diff --git a/fs/namespace.c b/fs/namespace.c
46261 index cfc6d44..b4632a5 100644
46262 --- a/fs/namespace.c
46263 +++ b/fs/namespace.c
46264 @@ -1326,6 +1326,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
46265 if (!(sb->s_flags & MS_RDONLY))
46266 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
46267 up_write(&sb->s_umount);
46268 +
46269 + gr_log_remount(mnt->mnt_devname, retval);
46270 +
46271 return retval;
46272 }
46273
46274 @@ -1345,6 +1348,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
46275 br_write_unlock(vfsmount_lock);
46276 up_write(&namespace_sem);
46277 release_mounts(&umount_list);
46278 +
46279 + gr_log_unmount(mnt->mnt_devname, retval);
46280 +
46281 return retval;
46282 }
46283
46284 @@ -2336,6 +2342,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
46285 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
46286 MS_STRICTATIME);
46287
46288 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
46289 + retval = -EPERM;
46290 + goto dput_out;
46291 + }
46292 +
46293 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
46294 + retval = -EPERM;
46295 + goto dput_out;
46296 + }
46297 +
46298 if (flags & MS_REMOUNT)
46299 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
46300 data_page);
46301 @@ -2350,6 +2366,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
46302 dev_name, data_page);
46303 dput_out:
46304 path_put(&path);
46305 +
46306 + gr_log_mount(dev_name, dir_name, retval);
46307 +
46308 return retval;
46309 }
46310
46311 @@ -2605,6 +2624,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
46312 if (error)
46313 goto out2;
46314
46315 + if (gr_handle_chroot_pivot()) {
46316 + error = -EPERM;
46317 + goto out2;
46318 + }
46319 +
46320 get_fs_root(current->fs, &root);
46321 error = lock_mount(&old);
46322 if (error)
46323 diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
46324 index 3db6b82..a57597e 100644
46325 --- a/fs/nfs/blocklayout/blocklayout.c
46326 +++ b/fs/nfs/blocklayout/blocklayout.c
46327 @@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block_extent *be, sector_t isect)
46328 */
46329 struct parallel_io {
46330 struct kref refcnt;
46331 - struct rpc_call_ops call_ops;
46332 + rpc_call_ops_no_const call_ops;
46333 void (*pnfs_callback) (void *data);
46334 void *data;
46335 };
46336 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
46337 index 50a15fa..ca113f9 100644
46338 --- a/fs/nfs/inode.c
46339 +++ b/fs/nfs/inode.c
46340 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
46341 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
46342 nfsi->attrtimeo_timestamp = jiffies;
46343
46344 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
46345 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
46346 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
46347 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
46348 else
46349 @@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
46350 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
46351 }
46352
46353 -static atomic_long_t nfs_attr_generation_counter;
46354 +static atomic_long_unchecked_t nfs_attr_generation_counter;
46355
46356 static unsigned long nfs_read_attr_generation_counter(void)
46357 {
46358 - return atomic_long_read(&nfs_attr_generation_counter);
46359 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
46360 }
46361
46362 unsigned long nfs_inc_attr_generation_counter(void)
46363 {
46364 - return atomic_long_inc_return(&nfs_attr_generation_counter);
46365 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
46366 }
46367
46368 void nfs_fattr_init(struct nfs_fattr *fattr)
46369 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
46370 index 7a2e442..8e544cc 100644
46371 --- a/fs/nfsd/vfs.c
46372 +++ b/fs/nfsd/vfs.c
46373 @@ -914,7 +914,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
46374 } else {
46375 oldfs = get_fs();
46376 set_fs(KERNEL_DS);
46377 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
46378 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
46379 set_fs(oldfs);
46380 }
46381
46382 @@ -1018,7 +1018,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
46383
46384 /* Write the data. */
46385 oldfs = get_fs(); set_fs(KERNEL_DS);
46386 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
46387 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
46388 set_fs(oldfs);
46389 if (host_err < 0)
46390 goto out_nfserr;
46391 @@ -1553,7 +1553,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
46392 */
46393
46394 oldfs = get_fs(); set_fs(KERNEL_DS);
46395 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
46396 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
46397 set_fs(oldfs);
46398
46399 if (host_err < 0)
46400 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
46401 index 9fde1c0..14e8827 100644
46402 --- a/fs/notify/fanotify/fanotify_user.c
46403 +++ b/fs/notify/fanotify/fanotify_user.c
46404 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
46405 goto out_close_fd;
46406
46407 ret = -EFAULT;
46408 - if (copy_to_user(buf, &fanotify_event_metadata,
46409 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
46410 + copy_to_user(buf, &fanotify_event_metadata,
46411 fanotify_event_metadata.event_len))
46412 goto out_kill_access_response;
46413
46414 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
46415 index ee18815..7aa5d01 100644
46416 --- a/fs/notify/notification.c
46417 +++ b/fs/notify/notification.c
46418 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
46419 * get set to 0 so it will never get 'freed'
46420 */
46421 static struct fsnotify_event *q_overflow_event;
46422 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46423 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46424
46425 /**
46426 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
46427 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46428 */
46429 u32 fsnotify_get_cookie(void)
46430 {
46431 - return atomic_inc_return(&fsnotify_sync_cookie);
46432 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
46433 }
46434 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
46435
46436 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
46437 index 99e3610..02c1068 100644
46438 --- a/fs/ntfs/dir.c
46439 +++ b/fs/ntfs/dir.c
46440 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
46441 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
46442 ~(s64)(ndir->itype.index.block_size - 1)));
46443 /* Bounds checks. */
46444 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
46445 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
46446 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
46447 "inode 0x%lx or driver bug.", vdir->i_ino);
46448 goto err_out;
46449 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
46450 index c587e2d..3641eaa 100644
46451 --- a/fs/ntfs/file.c
46452 +++ b/fs/ntfs/file.c
46453 @@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
46454 #endif /* NTFS_RW */
46455 };
46456
46457 -const struct file_operations ntfs_empty_file_ops = {};
46458 +const struct file_operations ntfs_empty_file_ops __read_only;
46459
46460 -const struct inode_operations ntfs_empty_inode_ops = {};
46461 +const struct inode_operations ntfs_empty_inode_ops __read_only;
46462 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
46463 index 210c352..a174f83 100644
46464 --- a/fs/ocfs2/localalloc.c
46465 +++ b/fs/ocfs2/localalloc.c
46466 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
46467 goto bail;
46468 }
46469
46470 - atomic_inc(&osb->alloc_stats.moves);
46471 + atomic_inc_unchecked(&osb->alloc_stats.moves);
46472
46473 bail:
46474 if (handle)
46475 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
46476 index d355e6e..578d905 100644
46477 --- a/fs/ocfs2/ocfs2.h
46478 +++ b/fs/ocfs2/ocfs2.h
46479 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
46480
46481 struct ocfs2_alloc_stats
46482 {
46483 - atomic_t moves;
46484 - atomic_t local_data;
46485 - atomic_t bitmap_data;
46486 - atomic_t bg_allocs;
46487 - atomic_t bg_extends;
46488 + atomic_unchecked_t moves;
46489 + atomic_unchecked_t local_data;
46490 + atomic_unchecked_t bitmap_data;
46491 + atomic_unchecked_t bg_allocs;
46492 + atomic_unchecked_t bg_extends;
46493 };
46494
46495 enum ocfs2_local_alloc_state
46496 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
46497 index ba5d97e..c77db25 100644
46498 --- a/fs/ocfs2/suballoc.c
46499 +++ b/fs/ocfs2/suballoc.c
46500 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
46501 mlog_errno(status);
46502 goto bail;
46503 }
46504 - atomic_inc(&osb->alloc_stats.bg_extends);
46505 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
46506
46507 /* You should never ask for this much metadata */
46508 BUG_ON(bits_wanted >
46509 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
46510 mlog_errno(status);
46511 goto bail;
46512 }
46513 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46514 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46515
46516 *suballoc_loc = res.sr_bg_blkno;
46517 *suballoc_bit_start = res.sr_bit_offset;
46518 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
46519 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
46520 res->sr_bits);
46521
46522 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46523 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46524
46525 BUG_ON(res->sr_bits != 1);
46526
46527 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
46528 mlog_errno(status);
46529 goto bail;
46530 }
46531 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46532 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46533
46534 BUG_ON(res.sr_bits != 1);
46535
46536 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
46537 cluster_start,
46538 num_clusters);
46539 if (!status)
46540 - atomic_inc(&osb->alloc_stats.local_data);
46541 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
46542 } else {
46543 if (min_clusters > (osb->bitmap_cpg - 1)) {
46544 /* The only paths asking for contiguousness
46545 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
46546 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
46547 res.sr_bg_blkno,
46548 res.sr_bit_offset);
46549 - atomic_inc(&osb->alloc_stats.bitmap_data);
46550 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
46551 *num_clusters = res.sr_bits;
46552 }
46553 }
46554 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
46555 index 4994f8b..eaab8eb 100644
46556 --- a/fs/ocfs2/super.c
46557 +++ b/fs/ocfs2/super.c
46558 @@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
46559 "%10s => GlobalAllocs: %d LocalAllocs: %d "
46560 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
46561 "Stats",
46562 - atomic_read(&osb->alloc_stats.bitmap_data),
46563 - atomic_read(&osb->alloc_stats.local_data),
46564 - atomic_read(&osb->alloc_stats.bg_allocs),
46565 - atomic_read(&osb->alloc_stats.moves),
46566 - atomic_read(&osb->alloc_stats.bg_extends));
46567 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
46568 + atomic_read_unchecked(&osb->alloc_stats.local_data),
46569 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
46570 + atomic_read_unchecked(&osb->alloc_stats.moves),
46571 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
46572
46573 out += snprintf(buf + out, len - out,
46574 "%10s => State: %u Descriptor: %llu Size: %u bits "
46575 @@ -2119,11 +2119,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
46576 spin_lock_init(&osb->osb_xattr_lock);
46577 ocfs2_init_steal_slots(osb);
46578
46579 - atomic_set(&osb->alloc_stats.moves, 0);
46580 - atomic_set(&osb->alloc_stats.local_data, 0);
46581 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
46582 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
46583 - atomic_set(&osb->alloc_stats.bg_extends, 0);
46584 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
46585 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
46586 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
46587 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
46588 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
46589
46590 /* Copy the blockcheck stats from the superblock probe */
46591 osb->osb_ecc_stats = *stats;
46592 diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
46593 index 5d22872..523db20 100644
46594 --- a/fs/ocfs2/symlink.c
46595 +++ b/fs/ocfs2/symlink.c
46596 @@ -142,7 +142,7 @@ bail:
46597
46598 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46599 {
46600 - char *link = nd_get_link(nd);
46601 + const char *link = nd_get_link(nd);
46602 if (!IS_ERR(link))
46603 kfree(link);
46604 }
46605 diff --git a/fs/open.c b/fs/open.c
46606 index 22c41b5..78894cf 100644
46607 --- a/fs/open.c
46608 +++ b/fs/open.c
46609 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
46610 error = locks_verify_truncate(inode, NULL, length);
46611 if (!error)
46612 error = security_path_truncate(&path);
46613 +
46614 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
46615 + error = -EACCES;
46616 +
46617 if (!error)
46618 error = do_truncate(path.dentry, length, 0, NULL);
46619
46620 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
46621 if (__mnt_is_readonly(path.mnt))
46622 res = -EROFS;
46623
46624 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
46625 + res = -EACCES;
46626 +
46627 out_path_release:
46628 path_put(&path);
46629 out:
46630 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
46631 if (error)
46632 goto dput_and_out;
46633
46634 + gr_log_chdir(path.dentry, path.mnt);
46635 +
46636 set_fs_pwd(current->fs, &path);
46637
46638 dput_and_out:
46639 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
46640 goto out_putf;
46641
46642 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
46643 +
46644 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
46645 + error = -EPERM;
46646 +
46647 + if (!error)
46648 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
46649 +
46650 if (!error)
46651 set_fs_pwd(current->fs, &file->f_path);
46652 out_putf:
46653 @@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
46654 if (error)
46655 goto dput_and_out;
46656
46657 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
46658 + goto dput_and_out;
46659 +
46660 set_fs_root(current->fs, &path);
46661 +
46662 + gr_handle_chroot_chdir(&path);
46663 +
46664 error = 0;
46665 dput_and_out:
46666 path_put(&path);
46667 @@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
46668 if (error)
46669 return error;
46670 mutex_lock(&inode->i_mutex);
46671 +
46672 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
46673 + error = -EACCES;
46674 + goto out_unlock;
46675 + }
46676 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
46677 + error = -EACCES;
46678 + goto out_unlock;
46679 + }
46680 +
46681 error = security_path_chmod(path->dentry, path->mnt, mode);
46682 if (error)
46683 goto out_unlock;
46684 @@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
46685 int error;
46686 struct iattr newattrs;
46687
46688 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
46689 + return -EACCES;
46690 +
46691 newattrs.ia_valid = ATTR_CTIME;
46692 if (user != (uid_t) -1) {
46693 newattrs.ia_valid |= ATTR_UID;
46694 diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
46695 index 6296b40..417c00f 100644
46696 --- a/fs/partitions/efi.c
46697 +++ b/fs/partitions/efi.c
46698 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
46699 if (!gpt)
46700 return NULL;
46701
46702 + if (!le32_to_cpu(gpt->num_partition_entries))
46703 + return NULL;
46704 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
46705 + if (!pte)
46706 + return NULL;
46707 +
46708 count = le32_to_cpu(gpt->num_partition_entries) *
46709 le32_to_cpu(gpt->sizeof_partition_entry);
46710 - if (!count)
46711 - return NULL;
46712 - pte = kzalloc(count, GFP_KERNEL);
46713 - if (!pte)
46714 - return NULL;
46715 -
46716 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
46717 (u8 *) pte,
46718 count) < count) {
46719 diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
46720 index bd8ae78..539d250 100644
46721 --- a/fs/partitions/ldm.c
46722 +++ b/fs/partitions/ldm.c
46723 @@ -1324,7 +1324,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
46724 goto found;
46725 }
46726
46727 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
46728 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
46729 if (!f) {
46730 ldm_crit ("Out of memory.");
46731 return false;
46732 diff --git a/fs/pipe.c b/fs/pipe.c
46733 index 4065f07..68c0706 100644
46734 --- a/fs/pipe.c
46735 +++ b/fs/pipe.c
46736 @@ -420,9 +420,9 @@ redo:
46737 }
46738 if (bufs) /* More to do? */
46739 continue;
46740 - if (!pipe->writers)
46741 + if (!atomic_read(&pipe->writers))
46742 break;
46743 - if (!pipe->waiting_writers) {
46744 + if (!atomic_read(&pipe->waiting_writers)) {
46745 /* syscall merging: Usually we must not sleep
46746 * if O_NONBLOCK is set, or if we got some data.
46747 * But if a writer sleeps in kernel space, then
46748 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
46749 mutex_lock(&inode->i_mutex);
46750 pipe = inode->i_pipe;
46751
46752 - if (!pipe->readers) {
46753 + if (!atomic_read(&pipe->readers)) {
46754 send_sig(SIGPIPE, current, 0);
46755 ret = -EPIPE;
46756 goto out;
46757 @@ -530,7 +530,7 @@ redo1:
46758 for (;;) {
46759 int bufs;
46760
46761 - if (!pipe->readers) {
46762 + if (!atomic_read(&pipe->readers)) {
46763 send_sig(SIGPIPE, current, 0);
46764 if (!ret)
46765 ret = -EPIPE;
46766 @@ -616,9 +616,9 @@ redo2:
46767 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
46768 do_wakeup = 0;
46769 }
46770 - pipe->waiting_writers++;
46771 + atomic_inc(&pipe->waiting_writers);
46772 pipe_wait(pipe);
46773 - pipe->waiting_writers--;
46774 + atomic_dec(&pipe->waiting_writers);
46775 }
46776 out:
46777 mutex_unlock(&inode->i_mutex);
46778 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
46779 mask = 0;
46780 if (filp->f_mode & FMODE_READ) {
46781 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
46782 - if (!pipe->writers && filp->f_version != pipe->w_counter)
46783 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
46784 mask |= POLLHUP;
46785 }
46786
46787 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
46788 * Most Unices do not set POLLERR for FIFOs but on Linux they
46789 * behave exactly like pipes for poll().
46790 */
46791 - if (!pipe->readers)
46792 + if (!atomic_read(&pipe->readers))
46793 mask |= POLLERR;
46794 }
46795
46796 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
46797
46798 mutex_lock(&inode->i_mutex);
46799 pipe = inode->i_pipe;
46800 - pipe->readers -= decr;
46801 - pipe->writers -= decw;
46802 + atomic_sub(decr, &pipe->readers);
46803 + atomic_sub(decw, &pipe->writers);
46804
46805 - if (!pipe->readers && !pipe->writers) {
46806 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
46807 free_pipe_info(inode);
46808 } else {
46809 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
46810 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
46811
46812 if (inode->i_pipe) {
46813 ret = 0;
46814 - inode->i_pipe->readers++;
46815 + atomic_inc(&inode->i_pipe->readers);
46816 }
46817
46818 mutex_unlock(&inode->i_mutex);
46819 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
46820
46821 if (inode->i_pipe) {
46822 ret = 0;
46823 - inode->i_pipe->writers++;
46824 + atomic_inc(&inode->i_pipe->writers);
46825 }
46826
46827 mutex_unlock(&inode->i_mutex);
46828 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
46829 if (inode->i_pipe) {
46830 ret = 0;
46831 if (filp->f_mode & FMODE_READ)
46832 - inode->i_pipe->readers++;
46833 + atomic_inc(&inode->i_pipe->readers);
46834 if (filp->f_mode & FMODE_WRITE)
46835 - inode->i_pipe->writers++;
46836 + atomic_inc(&inode->i_pipe->writers);
46837 }
46838
46839 mutex_unlock(&inode->i_mutex);
46840 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
46841 inode->i_pipe = NULL;
46842 }
46843
46844 -static struct vfsmount *pipe_mnt __read_mostly;
46845 +struct vfsmount *pipe_mnt __read_mostly;
46846
46847 /*
46848 * pipefs_dname() is called from d_path().
46849 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
46850 goto fail_iput;
46851 inode->i_pipe = pipe;
46852
46853 - pipe->readers = pipe->writers = 1;
46854 + atomic_set(&pipe->readers, 1);
46855 + atomic_set(&pipe->writers, 1);
46856 inode->i_fop = &rdwr_pipefifo_fops;
46857
46858 /*
46859 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
46860 index 15af622..0e9f4467 100644
46861 --- a/fs/proc/Kconfig
46862 +++ b/fs/proc/Kconfig
46863 @@ -30,12 +30,12 @@ config PROC_FS
46864
46865 config PROC_KCORE
46866 bool "/proc/kcore support" if !ARM
46867 - depends on PROC_FS && MMU
46868 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
46869
46870 config PROC_VMCORE
46871 bool "/proc/vmcore support"
46872 - depends on PROC_FS && CRASH_DUMP
46873 - default y
46874 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
46875 + default n
46876 help
46877 Exports the dump image of crashed kernel in ELF format.
46878
46879 @@ -59,8 +59,8 @@ config PROC_SYSCTL
46880 limited in memory.
46881
46882 config PROC_PAGE_MONITOR
46883 - default y
46884 - depends on PROC_FS && MMU
46885 + default n
46886 + depends on PROC_FS && MMU && !GRKERNSEC
46887 bool "Enable /proc page monitoring" if EXPERT
46888 help
46889 Various /proc files exist to monitor process memory utilization:
46890 diff --git a/fs/proc/array.c b/fs/proc/array.c
46891 index 3a1dafd..bf1bd84 100644
46892 --- a/fs/proc/array.c
46893 +++ b/fs/proc/array.c
46894 @@ -60,6 +60,7 @@
46895 #include <linux/tty.h>
46896 #include <linux/string.h>
46897 #include <linux/mman.h>
46898 +#include <linux/grsecurity.h>
46899 #include <linux/proc_fs.h>
46900 #include <linux/ioport.h>
46901 #include <linux/uaccess.h>
46902 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
46903 seq_putc(m, '\n');
46904 }
46905
46906 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46907 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
46908 +{
46909 + if (p->mm)
46910 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
46911 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
46912 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
46913 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
46914 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
46915 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
46916 + else
46917 + seq_printf(m, "PaX:\t-----\n");
46918 +}
46919 +#endif
46920 +
46921 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
46922 struct pid *pid, struct task_struct *task)
46923 {
46924 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
46925 task_cpus_allowed(m, task);
46926 cpuset_task_status_allowed(m, task);
46927 task_context_switch_counts(m, task);
46928 +
46929 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46930 + task_pax(m, task);
46931 +#endif
46932 +
46933 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
46934 + task_grsec_rbac(m, task);
46935 +#endif
46936 +
46937 return 0;
46938 }
46939
46940 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46941 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46942 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
46943 + _mm->pax_flags & MF_PAX_SEGMEXEC))
46944 +#endif
46945 +
46946 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
46947 struct pid *pid, struct task_struct *task, int whole)
46948 {
46949 @@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
46950 char tcomm[sizeof(task->comm)];
46951 unsigned long flags;
46952
46953 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46954 + if (current->exec_id != m->exec_id) {
46955 + gr_log_badprocpid("stat");
46956 + return 0;
46957 + }
46958 +#endif
46959 +
46960 state = *get_task_state(task);
46961 vsize = eip = esp = 0;
46962 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
46963 @@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
46964 gtime = task->gtime;
46965 }
46966
46967 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46968 + if (PAX_RAND_FLAGS(mm)) {
46969 + eip = 0;
46970 + esp = 0;
46971 + wchan = 0;
46972 + }
46973 +#endif
46974 +#ifdef CONFIG_GRKERNSEC_HIDESYM
46975 + wchan = 0;
46976 + eip =0;
46977 + esp =0;
46978 +#endif
46979 +
46980 /* scale priority and nice values from timeslices to -20..20 */
46981 /* to make it look like a "normal" Unix priority/nice value */
46982 priority = task_prio(task);
46983 @@ -489,9 +540,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
46984 vsize,
46985 mm ? get_mm_rss(mm) : 0,
46986 rsslim,
46987 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46988 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
46989 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
46990 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
46991 +#else
46992 mm ? (permitted ? mm->start_code : 1) : 0,
46993 mm ? (permitted ? mm->end_code : 1) : 0,
46994 (permitted && mm) ? mm->start_stack : 0,
46995 +#endif
46996 esp,
46997 eip,
46998 /* The signal information here is obsolete.
46999 @@ -533,8 +590,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47000 struct pid *pid, struct task_struct *task)
47001 {
47002 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
47003 - struct mm_struct *mm = get_task_mm(task);
47004 + struct mm_struct *mm;
47005
47006 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47007 + if (current->exec_id != m->exec_id) {
47008 + gr_log_badprocpid("statm");
47009 + return 0;
47010 + }
47011 +#endif
47012 + mm = get_task_mm(task);
47013 if (mm) {
47014 size = task_statm(mm, &shared, &text, &data, &resident);
47015 mmput(mm);
47016 @@ -544,3 +608,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47017
47018 return 0;
47019 }
47020 +
47021 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47022 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
47023 +{
47024 + u32 curr_ip = 0;
47025 + unsigned long flags;
47026 +
47027 + if (lock_task_sighand(task, &flags)) {
47028 + curr_ip = task->signal->curr_ip;
47029 + unlock_task_sighand(task, &flags);
47030 + }
47031 +
47032 + return sprintf(buffer, "%pI4\n", &curr_ip);
47033 +}
47034 +#endif
47035 diff --git a/fs/proc/base.c b/fs/proc/base.c
47036 index 1ace83d..f5e575d 100644
47037 --- a/fs/proc/base.c
47038 +++ b/fs/proc/base.c
47039 @@ -107,6 +107,22 @@ struct pid_entry {
47040 union proc_op op;
47041 };
47042
47043 +struct getdents_callback {
47044 + struct linux_dirent __user * current_dir;
47045 + struct linux_dirent __user * previous;
47046 + struct file * file;
47047 + int count;
47048 + int error;
47049 +};
47050 +
47051 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
47052 + loff_t offset, u64 ino, unsigned int d_type)
47053 +{
47054 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
47055 + buf->error = -EINVAL;
47056 + return 0;
47057 +}
47058 +
47059 #define NOD(NAME, MODE, IOP, FOP, OP) { \
47060 .name = (NAME), \
47061 .len = sizeof(NAME) - 1, \
47062 @@ -194,26 +210,6 @@ static int proc_root_link(struct inode *inode, struct path *path)
47063 return result;
47064 }
47065
47066 -static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
47067 -{
47068 - struct mm_struct *mm;
47069 - int err;
47070 -
47071 - err = mutex_lock_killable(&task->signal->cred_guard_mutex);
47072 - if (err)
47073 - return ERR_PTR(err);
47074 -
47075 - mm = get_task_mm(task);
47076 - if (mm && mm != current->mm &&
47077 - !ptrace_may_access(task, mode)) {
47078 - mmput(mm);
47079 - mm = ERR_PTR(-EACCES);
47080 - }
47081 - mutex_unlock(&task->signal->cred_guard_mutex);
47082 -
47083 - return mm;
47084 -}
47085 -
47086 struct mm_struct *mm_for_maps(struct task_struct *task)
47087 {
47088 return mm_access(task, PTRACE_MODE_READ);
47089 @@ -229,6 +225,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
47090 if (!mm->arg_end)
47091 goto out_mm; /* Shh! No looking before we're done */
47092
47093 + if (gr_acl_handle_procpidmem(task))
47094 + goto out_mm;
47095 +
47096 len = mm->arg_end - mm->arg_start;
47097
47098 if (len > PAGE_SIZE)
47099 @@ -256,12 +255,28 @@ out:
47100 return res;
47101 }
47102
47103 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47104 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47105 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47106 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47107 +#endif
47108 +
47109 static int proc_pid_auxv(struct task_struct *task, char *buffer)
47110 {
47111 struct mm_struct *mm = mm_for_maps(task);
47112 int res = PTR_ERR(mm);
47113 if (mm && !IS_ERR(mm)) {
47114 unsigned int nwords = 0;
47115 +
47116 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47117 + /* allow if we're currently ptracing this task */
47118 + if (PAX_RAND_FLAGS(mm) &&
47119 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
47120 + mmput(mm);
47121 + return 0;
47122 + }
47123 +#endif
47124 +
47125 do {
47126 nwords += 2;
47127 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
47128 @@ -275,7 +290,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
47129 }
47130
47131
47132 -#ifdef CONFIG_KALLSYMS
47133 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47134 /*
47135 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
47136 * Returns the resolved symbol. If that fails, simply return the address.
47137 @@ -314,7 +329,7 @@ static void unlock_trace(struct task_struct *task)
47138 mutex_unlock(&task->signal->cred_guard_mutex);
47139 }
47140
47141 -#ifdef CONFIG_STACKTRACE
47142 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47143
47144 #define MAX_STACK_TRACE_DEPTH 64
47145
47146 @@ -505,7 +520,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
47147 return count;
47148 }
47149
47150 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47151 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47152 static int proc_pid_syscall(struct task_struct *task, char *buffer)
47153 {
47154 long nr;
47155 @@ -534,7 +549,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
47156 /************************************************************************/
47157
47158 /* permission checks */
47159 -static int proc_fd_access_allowed(struct inode *inode)
47160 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
47161 {
47162 struct task_struct *task;
47163 int allowed = 0;
47164 @@ -544,7 +559,10 @@ static int proc_fd_access_allowed(struct inode *inode)
47165 */
47166 task = get_proc_task(inode);
47167 if (task) {
47168 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47169 + if (log)
47170 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
47171 + else
47172 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47173 put_task_struct(task);
47174 }
47175 return allowed;
47176 @@ -786,6 +804,10 @@ static int mem_open(struct inode* inode, struct file* file)
47177 file->f_mode |= FMODE_UNSIGNED_OFFSET;
47178 file->private_data = mm;
47179
47180 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47181 + file->f_version = current->exec_id;
47182 +#endif
47183 +
47184 return 0;
47185 }
47186
47187 @@ -797,6 +819,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
47188 ssize_t copied;
47189 char *page;
47190
47191 +#ifdef CONFIG_GRKERNSEC
47192 + if (write)
47193 + return -EPERM;
47194 +#endif
47195 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47196 + if (file->f_version != current->exec_id) {
47197 + gr_log_badprocpid("mem");
47198 + return 0;
47199 + }
47200 +#endif
47201 +
47202 if (!mm)
47203 return 0;
47204
47205 @@ -897,6 +930,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
47206 if (!task)
47207 goto out_no_task;
47208
47209 + if (gr_acl_handle_procpidmem(task))
47210 + goto out;
47211 +
47212 ret = -ENOMEM;
47213 page = (char *)__get_free_page(GFP_TEMPORARY);
47214 if (!page)
47215 @@ -1519,7 +1555,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
47216 path_put(&nd->path);
47217
47218 /* Are we allowed to snoop on the tasks file descriptors? */
47219 - if (!proc_fd_access_allowed(inode))
47220 + if (!proc_fd_access_allowed(inode,0))
47221 goto out;
47222
47223 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
47224 @@ -1558,8 +1594,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
47225 struct path path;
47226
47227 /* Are we allowed to snoop on the tasks file descriptors? */
47228 - if (!proc_fd_access_allowed(inode))
47229 - goto out;
47230 + /* logging this is needed for learning on chromium to work properly,
47231 + but we don't want to flood the logs from 'ps' which does a readlink
47232 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
47233 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
47234 + */
47235 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
47236 + if (!proc_fd_access_allowed(inode,0))
47237 + goto out;
47238 + } else {
47239 + if (!proc_fd_access_allowed(inode,1))
47240 + goto out;
47241 + }
47242
47243 error = PROC_I(inode)->op.proc_get_link(inode, &path);
47244 if (error)
47245 @@ -1624,7 +1670,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
47246 rcu_read_lock();
47247 cred = __task_cred(task);
47248 inode->i_uid = cred->euid;
47249 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47250 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47251 +#else
47252 inode->i_gid = cred->egid;
47253 +#endif
47254 rcu_read_unlock();
47255 }
47256 security_task_to_inode(task, inode);
47257 @@ -1642,6 +1692,9 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
47258 struct inode *inode = dentry->d_inode;
47259 struct task_struct *task;
47260 const struct cred *cred;
47261 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47262 + const struct cred *tmpcred = current_cred();
47263 +#endif
47264
47265 generic_fillattr(inode, stat);
47266
47267 @@ -1649,13 +1702,41 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
47268 stat->uid = 0;
47269 stat->gid = 0;
47270 task = pid_task(proc_pid(inode), PIDTYPE_PID);
47271 +
47272 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
47273 + rcu_read_unlock();
47274 + return -ENOENT;
47275 + }
47276 +
47277 if (task) {
47278 + cred = __task_cred(task);
47279 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47280 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
47281 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47282 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
47283 +#endif
47284 + ) {
47285 +#endif
47286 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
47287 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47288 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
47289 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47290 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
47291 +#endif
47292 task_dumpable(task)) {
47293 - cred = __task_cred(task);
47294 stat->uid = cred->euid;
47295 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47296 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
47297 +#else
47298 stat->gid = cred->egid;
47299 +#endif
47300 }
47301 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47302 + } else {
47303 + rcu_read_unlock();
47304 + return -ENOENT;
47305 + }
47306 +#endif
47307 }
47308 rcu_read_unlock();
47309 return 0;
47310 @@ -1692,11 +1773,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
47311
47312 if (task) {
47313 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
47314 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47315 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
47316 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47317 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
47318 +#endif
47319 task_dumpable(task)) {
47320 rcu_read_lock();
47321 cred = __task_cred(task);
47322 inode->i_uid = cred->euid;
47323 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47324 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47325 +#else
47326 inode->i_gid = cred->egid;
47327 +#endif
47328 rcu_read_unlock();
47329 } else {
47330 inode->i_uid = 0;
47331 @@ -1814,7 +1904,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
47332 int fd = proc_fd(inode);
47333
47334 if (task) {
47335 - files = get_files_struct(task);
47336 + if (!gr_acl_handle_procpidmem(task))
47337 + files = get_files_struct(task);
47338 put_task_struct(task);
47339 }
47340 if (files) {
47341 @@ -2082,11 +2173,21 @@ static const struct file_operations proc_fd_operations = {
47342 */
47343 static int proc_fd_permission(struct inode *inode, int mask)
47344 {
47345 + struct task_struct *task;
47346 int rv = generic_permission(inode, mask);
47347 - if (rv == 0)
47348 - return 0;
47349 +
47350 if (task_pid(current) == proc_pid(inode))
47351 rv = 0;
47352 +
47353 + task = get_proc_task(inode);
47354 + if (task == NULL)
47355 + return rv;
47356 +
47357 + if (gr_acl_handle_procpidmem(task))
47358 + rv = -EACCES;
47359 +
47360 + put_task_struct(task);
47361 +
47362 return rv;
47363 }
47364
47365 @@ -2196,6 +2297,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
47366 if (!task)
47367 goto out_no_task;
47368
47369 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47370 + goto out;
47371 +
47372 /*
47373 * Yes, it does not scale. And it should not. Don't add
47374 * new entries into /proc/<tgid>/ without very good reasons.
47375 @@ -2240,6 +2344,9 @@ static int proc_pident_readdir(struct file *filp,
47376 if (!task)
47377 goto out_no_task;
47378
47379 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47380 + goto out;
47381 +
47382 ret = 0;
47383 i = filp->f_pos;
47384 switch (i) {
47385 @@ -2510,7 +2617,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
47386 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
47387 void *cookie)
47388 {
47389 - char *s = nd_get_link(nd);
47390 + const char *s = nd_get_link(nd);
47391 if (!IS_ERR(s))
47392 __putname(s);
47393 }
47394 @@ -2708,7 +2815,7 @@ static const struct pid_entry tgid_base_stuff[] = {
47395 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
47396 #endif
47397 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
47398 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47399 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47400 INF("syscall", S_IRUGO, proc_pid_syscall),
47401 #endif
47402 INF("cmdline", S_IRUGO, proc_pid_cmdline),
47403 @@ -2733,10 +2840,10 @@ static const struct pid_entry tgid_base_stuff[] = {
47404 #ifdef CONFIG_SECURITY
47405 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
47406 #endif
47407 -#ifdef CONFIG_KALLSYMS
47408 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47409 INF("wchan", S_IRUGO, proc_pid_wchan),
47410 #endif
47411 -#ifdef CONFIG_STACKTRACE
47412 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47413 ONE("stack", S_IRUGO, proc_pid_stack),
47414 #endif
47415 #ifdef CONFIG_SCHEDSTATS
47416 @@ -2770,6 +2877,9 @@ static const struct pid_entry tgid_base_stuff[] = {
47417 #ifdef CONFIG_HARDWALL
47418 INF("hardwall", S_IRUGO, proc_pid_hardwall),
47419 #endif
47420 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47421 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
47422 +#endif
47423 };
47424
47425 static int proc_tgid_base_readdir(struct file * filp,
47426 @@ -2895,7 +3005,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
47427 if (!inode)
47428 goto out;
47429
47430 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47431 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
47432 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47433 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47434 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
47435 +#else
47436 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
47437 +#endif
47438 inode->i_op = &proc_tgid_base_inode_operations;
47439 inode->i_fop = &proc_tgid_base_operations;
47440 inode->i_flags|=S_IMMUTABLE;
47441 @@ -2937,7 +3054,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
47442 if (!task)
47443 goto out;
47444
47445 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47446 + goto out_put_task;
47447 +
47448 result = proc_pid_instantiate(dir, dentry, task, NULL);
47449 +out_put_task:
47450 put_task_struct(task);
47451 out:
47452 return result;
47453 @@ -3002,6 +3123,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
47454 {
47455 unsigned int nr;
47456 struct task_struct *reaper;
47457 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47458 + const struct cred *tmpcred = current_cred();
47459 + const struct cred *itercred;
47460 +#endif
47461 + filldir_t __filldir = filldir;
47462 struct tgid_iter iter;
47463 struct pid_namespace *ns;
47464
47465 @@ -3025,8 +3151,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
47466 for (iter = next_tgid(ns, iter);
47467 iter.task;
47468 iter.tgid += 1, iter = next_tgid(ns, iter)) {
47469 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47470 + rcu_read_lock();
47471 + itercred = __task_cred(iter.task);
47472 +#endif
47473 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
47474 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47475 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
47476 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47477 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
47478 +#endif
47479 + )
47480 +#endif
47481 + )
47482 + __filldir = &gr_fake_filldir;
47483 + else
47484 + __filldir = filldir;
47485 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47486 + rcu_read_unlock();
47487 +#endif
47488 filp->f_pos = iter.tgid + TGID_OFFSET;
47489 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
47490 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
47491 put_task_struct(iter.task);
47492 goto out;
47493 }
47494 @@ -3054,7 +3199,7 @@ static const struct pid_entry tid_base_stuff[] = {
47495 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
47496 #endif
47497 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
47498 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47499 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47500 INF("syscall", S_IRUGO, proc_pid_syscall),
47501 #endif
47502 INF("cmdline", S_IRUGO, proc_pid_cmdline),
47503 @@ -3078,10 +3223,10 @@ static const struct pid_entry tid_base_stuff[] = {
47504 #ifdef CONFIG_SECURITY
47505 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
47506 #endif
47507 -#ifdef CONFIG_KALLSYMS
47508 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47509 INF("wchan", S_IRUGO, proc_pid_wchan),
47510 #endif
47511 -#ifdef CONFIG_STACKTRACE
47512 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47513 ONE("stack", S_IRUGO, proc_pid_stack),
47514 #endif
47515 #ifdef CONFIG_SCHEDSTATS
47516 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
47517 index 82676e3..5f8518a 100644
47518 --- a/fs/proc/cmdline.c
47519 +++ b/fs/proc/cmdline.c
47520 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
47521
47522 static int __init proc_cmdline_init(void)
47523 {
47524 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
47525 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
47526 +#else
47527 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
47528 +#endif
47529 return 0;
47530 }
47531 module_init(proc_cmdline_init);
47532 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
47533 index b143471..bb105e5 100644
47534 --- a/fs/proc/devices.c
47535 +++ b/fs/proc/devices.c
47536 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
47537
47538 static int __init proc_devices_init(void)
47539 {
47540 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
47541 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
47542 +#else
47543 proc_create("devices", 0, NULL, &proc_devinfo_operations);
47544 +#endif
47545 return 0;
47546 }
47547 module_init(proc_devices_init);
47548 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
47549 index 7737c54..7172574 100644
47550 --- a/fs/proc/inode.c
47551 +++ b/fs/proc/inode.c
47552 @@ -18,12 +18,18 @@
47553 #include <linux/module.h>
47554 #include <linux/sysctl.h>
47555 #include <linux/slab.h>
47556 +#include <linux/grsecurity.h>
47557
47558 #include <asm/system.h>
47559 #include <asm/uaccess.h>
47560
47561 #include "internal.h"
47562
47563 +#ifdef CONFIG_PROC_SYSCTL
47564 +extern const struct inode_operations proc_sys_inode_operations;
47565 +extern const struct inode_operations proc_sys_dir_operations;
47566 +#endif
47567 +
47568 static void proc_evict_inode(struct inode *inode)
47569 {
47570 struct proc_dir_entry *de;
47571 @@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
47572 ns_ops = PROC_I(inode)->ns_ops;
47573 if (ns_ops && ns_ops->put)
47574 ns_ops->put(PROC_I(inode)->ns);
47575 +
47576 +#ifdef CONFIG_PROC_SYSCTL
47577 + if (inode->i_op == &proc_sys_inode_operations ||
47578 + inode->i_op == &proc_sys_dir_operations)
47579 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
47580 +#endif
47581 +
47582 }
47583
47584 static struct kmem_cache * proc_inode_cachep;
47585 @@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
47586 if (de->mode) {
47587 inode->i_mode = de->mode;
47588 inode->i_uid = de->uid;
47589 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47590 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47591 +#else
47592 inode->i_gid = de->gid;
47593 +#endif
47594 }
47595 if (de->size)
47596 inode->i_size = de->size;
47597 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
47598 index 7838e5c..ff92cbc 100644
47599 --- a/fs/proc/internal.h
47600 +++ b/fs/proc/internal.h
47601 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47602 struct pid *pid, struct task_struct *task);
47603 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47604 struct pid *pid, struct task_struct *task);
47605 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47606 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
47607 +#endif
47608 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
47609
47610 extern const struct file_operations proc_maps_operations;
47611 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
47612 index d245cb2..f4e8498 100644
47613 --- a/fs/proc/kcore.c
47614 +++ b/fs/proc/kcore.c
47615 @@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47616 * the addresses in the elf_phdr on our list.
47617 */
47618 start = kc_offset_to_vaddr(*fpos - elf_buflen);
47619 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
47620 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
47621 + if (tsz > buflen)
47622 tsz = buflen;
47623 -
47624 +
47625 while (buflen) {
47626 struct kcore_list *m;
47627
47628 @@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47629 kfree(elf_buf);
47630 } else {
47631 if (kern_addr_valid(start)) {
47632 - unsigned long n;
47633 + char *elf_buf;
47634 + mm_segment_t oldfs;
47635
47636 - n = copy_to_user(buffer, (char *)start, tsz);
47637 - /*
47638 - * We cannot distingush between fault on source
47639 - * and fault on destination. When this happens
47640 - * we clear too and hope it will trigger the
47641 - * EFAULT again.
47642 - */
47643 - if (n) {
47644 - if (clear_user(buffer + tsz - n,
47645 - n))
47646 + elf_buf = kmalloc(tsz, GFP_KERNEL);
47647 + if (!elf_buf)
47648 + return -ENOMEM;
47649 + oldfs = get_fs();
47650 + set_fs(KERNEL_DS);
47651 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
47652 + set_fs(oldfs);
47653 + if (copy_to_user(buffer, elf_buf, tsz)) {
47654 + kfree(elf_buf);
47655 return -EFAULT;
47656 + }
47657 }
47658 + set_fs(oldfs);
47659 + kfree(elf_buf);
47660 } else {
47661 if (clear_user(buffer, tsz))
47662 return -EFAULT;
47663 @@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47664
47665 static int open_kcore(struct inode *inode, struct file *filp)
47666 {
47667 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
47668 + return -EPERM;
47669 +#endif
47670 if (!capable(CAP_SYS_RAWIO))
47671 return -EPERM;
47672 if (kcore_need_update)
47673 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
47674 index 80e4645..53e5fcf 100644
47675 --- a/fs/proc/meminfo.c
47676 +++ b/fs/proc/meminfo.c
47677 @@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
47678 vmi.used >> 10,
47679 vmi.largest_chunk >> 10
47680 #ifdef CONFIG_MEMORY_FAILURE
47681 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
47682 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
47683 #endif
47684 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
47685 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
47686 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
47687 index b1822dd..df622cb 100644
47688 --- a/fs/proc/nommu.c
47689 +++ b/fs/proc/nommu.c
47690 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
47691 if (len < 1)
47692 len = 1;
47693 seq_printf(m, "%*c", len, ' ');
47694 - seq_path(m, &file->f_path, "");
47695 + seq_path(m, &file->f_path, "\n\\");
47696 }
47697
47698 seq_putc(m, '\n');
47699 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
47700 index f738024..876984a 100644
47701 --- a/fs/proc/proc_net.c
47702 +++ b/fs/proc/proc_net.c
47703 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
47704 struct task_struct *task;
47705 struct nsproxy *ns;
47706 struct net *net = NULL;
47707 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47708 + const struct cred *cred = current_cred();
47709 +#endif
47710 +
47711 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47712 + if (cred->fsuid)
47713 + return net;
47714 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47715 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
47716 + return net;
47717 +#endif
47718
47719 rcu_read_lock();
47720 task = pid_task(proc_pid(dir), PIDTYPE_PID);
47721 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
47722 index a6b6217..1e0579d 100644
47723 --- a/fs/proc/proc_sysctl.c
47724 +++ b/fs/proc/proc_sysctl.c
47725 @@ -9,11 +9,13 @@
47726 #include <linux/namei.h>
47727 #include "internal.h"
47728
47729 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
47730 +
47731 static const struct dentry_operations proc_sys_dentry_operations;
47732 static const struct file_operations proc_sys_file_operations;
47733 -static const struct inode_operations proc_sys_inode_operations;
47734 +const struct inode_operations proc_sys_inode_operations;
47735 static const struct file_operations proc_sys_dir_file_operations;
47736 -static const struct inode_operations proc_sys_dir_operations;
47737 +const struct inode_operations proc_sys_dir_operations;
47738
47739 void proc_sys_poll_notify(struct ctl_table_poll *poll)
47740 {
47741 @@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
47742
47743 err = NULL;
47744 d_set_d_op(dentry, &proc_sys_dentry_operations);
47745 +
47746 + gr_handle_proc_create(dentry, inode);
47747 +
47748 d_add(dentry, inode);
47749
47750 + if (gr_handle_sysctl(p, MAY_EXEC))
47751 + err = ERR_PTR(-ENOENT);
47752 +
47753 out:
47754 sysctl_head_finish(head);
47755 return err;
47756 @@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
47757 if (!table->proc_handler)
47758 goto out;
47759
47760 +#ifdef CONFIG_GRKERNSEC
47761 + error = -EPERM;
47762 + if (write && !capable(CAP_SYS_ADMIN))
47763 + goto out;
47764 +#endif
47765 +
47766 /* careful: calling conventions are nasty here */
47767 res = count;
47768 error = table->proc_handler(table, write, buf, &res, ppos);
47769 @@ -245,6 +259,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
47770 return -ENOMEM;
47771 } else {
47772 d_set_d_op(child, &proc_sys_dentry_operations);
47773 +
47774 + gr_handle_proc_create(child, inode);
47775 +
47776 d_add(child, inode);
47777 }
47778 } else {
47779 @@ -273,6 +290,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
47780 if (*pos < file->f_pos)
47781 continue;
47782
47783 + if (gr_handle_sysctl(table, 0))
47784 + continue;
47785 +
47786 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
47787 if (res)
47788 return res;
47789 @@ -398,6 +418,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
47790 if (IS_ERR(head))
47791 return PTR_ERR(head);
47792
47793 + if (table && gr_handle_sysctl(table, MAY_EXEC))
47794 + return -ENOENT;
47795 +
47796 generic_fillattr(inode, stat);
47797 if (table)
47798 stat->mode = (stat->mode & S_IFMT) | table->mode;
47799 @@ -420,13 +443,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
47800 .llseek = generic_file_llseek,
47801 };
47802
47803 -static const struct inode_operations proc_sys_inode_operations = {
47804 +const struct inode_operations proc_sys_inode_operations = {
47805 .permission = proc_sys_permission,
47806 .setattr = proc_sys_setattr,
47807 .getattr = proc_sys_getattr,
47808 };
47809
47810 -static const struct inode_operations proc_sys_dir_operations = {
47811 +const struct inode_operations proc_sys_dir_operations = {
47812 .lookup = proc_sys_lookup,
47813 .permission = proc_sys_permission,
47814 .setattr = proc_sys_setattr,
47815 diff --git a/fs/proc/root.c b/fs/proc/root.c
47816 index 03102d9..4ae347e 100644
47817 --- a/fs/proc/root.c
47818 +++ b/fs/proc/root.c
47819 @@ -121,7 +121,15 @@ void __init proc_root_init(void)
47820 #ifdef CONFIG_PROC_DEVICETREE
47821 proc_device_tree_init();
47822 #endif
47823 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
47824 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47825 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
47826 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47827 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
47828 +#endif
47829 +#else
47830 proc_mkdir("bus", NULL);
47831 +#endif
47832 proc_sys_init();
47833 }
47834
47835 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
47836 index 7dcd2a2..b2f410e 100644
47837 --- a/fs/proc/task_mmu.c
47838 +++ b/fs/proc/task_mmu.c
47839 @@ -11,6 +11,7 @@
47840 #include <linux/rmap.h>
47841 #include <linux/swap.h>
47842 #include <linux/swapops.h>
47843 +#include <linux/grsecurity.h>
47844
47845 #include <asm/elf.h>
47846 #include <asm/uaccess.h>
47847 @@ -52,8 +53,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
47848 "VmExe:\t%8lu kB\n"
47849 "VmLib:\t%8lu kB\n"
47850 "VmPTE:\t%8lu kB\n"
47851 - "VmSwap:\t%8lu kB\n",
47852 - hiwater_vm << (PAGE_SHIFT-10),
47853 + "VmSwap:\t%8lu kB\n"
47854 +
47855 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
47856 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
47857 +#endif
47858 +
47859 + ,hiwater_vm << (PAGE_SHIFT-10),
47860 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
47861 mm->locked_vm << (PAGE_SHIFT-10),
47862 mm->pinned_vm << (PAGE_SHIFT-10),
47863 @@ -62,7 +68,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
47864 data << (PAGE_SHIFT-10),
47865 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
47866 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
47867 - swap << (PAGE_SHIFT-10));
47868 + swap << (PAGE_SHIFT-10)
47869 +
47870 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
47871 + , mm->context.user_cs_base, mm->context.user_cs_limit
47872 +#endif
47873 +
47874 + );
47875 }
47876
47877 unsigned long task_vsize(struct mm_struct *mm)
47878 @@ -209,6 +221,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
47879 return ret;
47880 }
47881
47882 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47883 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47884 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47885 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47886 +#endif
47887 +
47888 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47889 {
47890 struct mm_struct *mm = vma->vm_mm;
47891 @@ -227,13 +245,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47892 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
47893 }
47894
47895 - /* We don't show the stack guard page in /proc/maps */
47896 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47897 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
47898 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
47899 +#else
47900 start = vma->vm_start;
47901 - if (stack_guard_page_start(vma, start))
47902 - start += PAGE_SIZE;
47903 end = vma->vm_end;
47904 - if (stack_guard_page_end(vma, end))
47905 - end -= PAGE_SIZE;
47906 +#endif
47907
47908 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
47909 start,
47910 @@ -242,7 +260,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47911 flags & VM_WRITE ? 'w' : '-',
47912 flags & VM_EXEC ? 'x' : '-',
47913 flags & VM_MAYSHARE ? 's' : 'p',
47914 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47915 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
47916 +#else
47917 pgoff,
47918 +#endif
47919 MAJOR(dev), MINOR(dev), ino, &len);
47920
47921 /*
47922 @@ -251,7 +273,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47923 */
47924 if (file) {
47925 pad_len_spaces(m, len);
47926 - seq_path(m, &file->f_path, "\n");
47927 + seq_path(m, &file->f_path, "\n\\");
47928 } else {
47929 const char *name = arch_vma_name(vma);
47930 if (!name) {
47931 @@ -259,8 +281,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47932 if (vma->vm_start <= mm->brk &&
47933 vma->vm_end >= mm->start_brk) {
47934 name = "[heap]";
47935 - } else if (vma->vm_start <= mm->start_stack &&
47936 - vma->vm_end >= mm->start_stack) {
47937 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
47938 + (vma->vm_start <= mm->start_stack &&
47939 + vma->vm_end >= mm->start_stack)) {
47940 name = "[stack]";
47941 }
47942 } else {
47943 @@ -281,6 +304,13 @@ static int show_map(struct seq_file *m, void *v)
47944 struct proc_maps_private *priv = m->private;
47945 struct task_struct *task = priv->task;
47946
47947 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47948 + if (current->exec_id != m->exec_id) {
47949 + gr_log_badprocpid("maps");
47950 + return 0;
47951 + }
47952 +#endif
47953 +
47954 show_map_vma(m, vma);
47955
47956 if (m->count < m->size) /* vma is copied successfully */
47957 @@ -434,12 +464,23 @@ static int show_smap(struct seq_file *m, void *v)
47958 .private = &mss,
47959 };
47960
47961 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47962 + if (current->exec_id != m->exec_id) {
47963 + gr_log_badprocpid("smaps");
47964 + return 0;
47965 + }
47966 +#endif
47967 memset(&mss, 0, sizeof mss);
47968 - mss.vma = vma;
47969 - /* mmap_sem is held in m_start */
47970 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
47971 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
47972 -
47973 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47974 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
47975 +#endif
47976 + mss.vma = vma;
47977 + /* mmap_sem is held in m_start */
47978 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
47979 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
47980 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47981 + }
47982 +#endif
47983 show_map_vma(m, vma);
47984
47985 seq_printf(m,
47986 @@ -457,7 +498,11 @@ static int show_smap(struct seq_file *m, void *v)
47987 "KernelPageSize: %8lu kB\n"
47988 "MMUPageSize: %8lu kB\n"
47989 "Locked: %8lu kB\n",
47990 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47991 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
47992 +#else
47993 (vma->vm_end - vma->vm_start) >> 10,
47994 +#endif
47995 mss.resident >> 10,
47996 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
47997 mss.shared_clean >> 10,
47998 @@ -1015,6 +1060,13 @@ static int show_numa_map(struct seq_file *m, void *v)
47999 int n;
48000 char buffer[50];
48001
48002 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48003 + if (current->exec_id != m->exec_id) {
48004 + gr_log_badprocpid("numa_maps");
48005 + return 0;
48006 + }
48007 +#endif
48008 +
48009 if (!mm)
48010 return 0;
48011
48012 @@ -1032,11 +1084,15 @@ static int show_numa_map(struct seq_file *m, void *v)
48013 mpol_to_str(buffer, sizeof(buffer), pol, 0);
48014 mpol_cond_put(pol);
48015
48016 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48017 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
48018 +#else
48019 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
48020 +#endif
48021
48022 if (file) {
48023 seq_printf(m, " file=");
48024 - seq_path(m, &file->f_path, "\n\t= ");
48025 + seq_path(m, &file->f_path, "\n\t\\= ");
48026 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
48027 seq_printf(m, " heap");
48028 } else if (vma->vm_start <= mm->start_stack &&
48029 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
48030 index 980de54..2a4db5f 100644
48031 --- a/fs/proc/task_nommu.c
48032 +++ b/fs/proc/task_nommu.c
48033 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48034 else
48035 bytes += kobjsize(mm);
48036
48037 - if (current->fs && current->fs->users > 1)
48038 + if (current->fs && atomic_read(&current->fs->users) > 1)
48039 sbytes += kobjsize(current->fs);
48040 else
48041 bytes += kobjsize(current->fs);
48042 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
48043
48044 if (file) {
48045 pad_len_spaces(m, len);
48046 - seq_path(m, &file->f_path, "");
48047 + seq_path(m, &file->f_path, "\n\\");
48048 } else if (mm) {
48049 if (vma->vm_start <= mm->start_stack &&
48050 vma->vm_end >= mm->start_stack) {
48051 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
48052 index d67908b..d13f6a6 100644
48053 --- a/fs/quota/netlink.c
48054 +++ b/fs/quota/netlink.c
48055 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
48056 void quota_send_warning(short type, unsigned int id, dev_t dev,
48057 const char warntype)
48058 {
48059 - static atomic_t seq;
48060 + static atomic_unchecked_t seq;
48061 struct sk_buff *skb;
48062 void *msg_head;
48063 int ret;
48064 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
48065 "VFS: Not enough memory to send quota warning.\n");
48066 return;
48067 }
48068 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
48069 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
48070 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
48071 if (!msg_head) {
48072 printk(KERN_ERR
48073 diff --git a/fs/readdir.c b/fs/readdir.c
48074 index 356f715..c918d38 100644
48075 --- a/fs/readdir.c
48076 +++ b/fs/readdir.c
48077 @@ -17,6 +17,7 @@
48078 #include <linux/security.h>
48079 #include <linux/syscalls.h>
48080 #include <linux/unistd.h>
48081 +#include <linux/namei.h>
48082
48083 #include <asm/uaccess.h>
48084
48085 @@ -67,6 +68,7 @@ struct old_linux_dirent {
48086
48087 struct readdir_callback {
48088 struct old_linux_dirent __user * dirent;
48089 + struct file * file;
48090 int result;
48091 };
48092
48093 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
48094 buf->result = -EOVERFLOW;
48095 return -EOVERFLOW;
48096 }
48097 +
48098 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48099 + return 0;
48100 +
48101 buf->result++;
48102 dirent = buf->dirent;
48103 if (!access_ok(VERIFY_WRITE, dirent,
48104 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
48105
48106 buf.result = 0;
48107 buf.dirent = dirent;
48108 + buf.file = file;
48109
48110 error = vfs_readdir(file, fillonedir, &buf);
48111 if (buf.result)
48112 @@ -142,6 +149,7 @@ struct linux_dirent {
48113 struct getdents_callback {
48114 struct linux_dirent __user * current_dir;
48115 struct linux_dirent __user * previous;
48116 + struct file * file;
48117 int count;
48118 int error;
48119 };
48120 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
48121 buf->error = -EOVERFLOW;
48122 return -EOVERFLOW;
48123 }
48124 +
48125 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48126 + return 0;
48127 +
48128 dirent = buf->previous;
48129 if (dirent) {
48130 if (__put_user(offset, &dirent->d_off))
48131 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
48132 buf.previous = NULL;
48133 buf.count = count;
48134 buf.error = 0;
48135 + buf.file = file;
48136
48137 error = vfs_readdir(file, filldir, &buf);
48138 if (error >= 0)
48139 @@ -229,6 +242,7 @@ out:
48140 struct getdents_callback64 {
48141 struct linux_dirent64 __user * current_dir;
48142 struct linux_dirent64 __user * previous;
48143 + struct file *file;
48144 int count;
48145 int error;
48146 };
48147 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
48148 buf->error = -EINVAL; /* only used if we fail.. */
48149 if (reclen > buf->count)
48150 return -EINVAL;
48151 +
48152 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48153 + return 0;
48154 +
48155 dirent = buf->previous;
48156 if (dirent) {
48157 if (__put_user(offset, &dirent->d_off))
48158 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48159
48160 buf.current_dir = dirent;
48161 buf.previous = NULL;
48162 + buf.file = file;
48163 buf.count = count;
48164 buf.error = 0;
48165
48166 @@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48167 error = buf.error;
48168 lastdirent = buf.previous;
48169 if (lastdirent) {
48170 - typeof(lastdirent->d_off) d_off = file->f_pos;
48171 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48172 if (__put_user(d_off, &lastdirent->d_off))
48173 error = -EFAULT;
48174 else
48175 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
48176 index 60c0804..d814f98 100644
48177 --- a/fs/reiserfs/do_balan.c
48178 +++ b/fs/reiserfs/do_balan.c
48179 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
48180 return;
48181 }
48182
48183 - atomic_inc(&(fs_generation(tb->tb_sb)));
48184 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
48185 do_balance_starts(tb);
48186
48187 /* balance leaf returns 0 except if combining L R and S into
48188 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
48189 index 7a99811..a7c96c4 100644
48190 --- a/fs/reiserfs/procfs.c
48191 +++ b/fs/reiserfs/procfs.c
48192 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
48193 "SMALL_TAILS " : "NO_TAILS ",
48194 replay_only(sb) ? "REPLAY_ONLY " : "",
48195 convert_reiserfs(sb) ? "CONV " : "",
48196 - atomic_read(&r->s_generation_counter),
48197 + atomic_read_unchecked(&r->s_generation_counter),
48198 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
48199 SF(s_do_balance), SF(s_unneeded_left_neighbor),
48200 SF(s_good_search_by_key_reada), SF(s_bmaps),
48201 diff --git a/fs/select.c b/fs/select.c
48202 index d33418f..2a5345e 100644
48203 --- a/fs/select.c
48204 +++ b/fs/select.c
48205 @@ -20,6 +20,7 @@
48206 #include <linux/module.h>
48207 #include <linux/slab.h>
48208 #include <linux/poll.h>
48209 +#include <linux/security.h>
48210 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
48211 #include <linux/file.h>
48212 #include <linux/fdtable.h>
48213 @@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
48214 struct poll_list *walk = head;
48215 unsigned long todo = nfds;
48216
48217 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
48218 if (nfds > rlimit(RLIMIT_NOFILE))
48219 return -EINVAL;
48220
48221 diff --git a/fs/seq_file.c b/fs/seq_file.c
48222 index dba43c3..9fb8511 100644
48223 --- a/fs/seq_file.c
48224 +++ b/fs/seq_file.c
48225 @@ -9,6 +9,7 @@
48226 #include <linux/module.h>
48227 #include <linux/seq_file.h>
48228 #include <linux/slab.h>
48229 +#include <linux/sched.h>
48230
48231 #include <asm/uaccess.h>
48232 #include <asm/page.h>
48233 @@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
48234 memset(p, 0, sizeof(*p));
48235 mutex_init(&p->lock);
48236 p->op = op;
48237 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48238 + p->exec_id = current->exec_id;
48239 +#endif
48240
48241 /*
48242 * Wrappers around seq_open(e.g. swaps_open) need to be
48243 @@ -76,7 +80,8 @@ static int traverse(struct seq_file *m, loff_t offset)
48244 return 0;
48245 }
48246 if (!m->buf) {
48247 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
48248 + m->size = PAGE_SIZE;
48249 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
48250 if (!m->buf)
48251 return -ENOMEM;
48252 }
48253 @@ -116,7 +121,8 @@ static int traverse(struct seq_file *m, loff_t offset)
48254 Eoverflow:
48255 m->op->stop(m, p);
48256 kfree(m->buf);
48257 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
48258 + m->size <<= 1;
48259 + m->buf = kmalloc(m->size, GFP_KERNEL);
48260 return !m->buf ? -ENOMEM : -EAGAIN;
48261 }
48262
48263 @@ -169,7 +175,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
48264 m->version = file->f_version;
48265 /* grab buffer if we didn't have one */
48266 if (!m->buf) {
48267 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
48268 + m->size = PAGE_SIZE;
48269 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
48270 if (!m->buf)
48271 goto Enomem;
48272 }
48273 @@ -210,7 +217,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
48274 goto Fill;
48275 m->op->stop(m, p);
48276 kfree(m->buf);
48277 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
48278 + m->size <<= 1;
48279 + m->buf = kmalloc(m->size, GFP_KERNEL);
48280 if (!m->buf)
48281 goto Enomem;
48282 m->count = 0;
48283 @@ -549,7 +557,7 @@ static void single_stop(struct seq_file *p, void *v)
48284 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
48285 void *data)
48286 {
48287 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
48288 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
48289 int res = -ENOMEM;
48290
48291 if (op) {
48292 diff --git a/fs/splice.c b/fs/splice.c
48293 index fa2defa..8601650 100644
48294 --- a/fs/splice.c
48295 +++ b/fs/splice.c
48296 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
48297 pipe_lock(pipe);
48298
48299 for (;;) {
48300 - if (!pipe->readers) {
48301 + if (!atomic_read(&pipe->readers)) {
48302 send_sig(SIGPIPE, current, 0);
48303 if (!ret)
48304 ret = -EPIPE;
48305 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
48306 do_wakeup = 0;
48307 }
48308
48309 - pipe->waiting_writers++;
48310 + atomic_inc(&pipe->waiting_writers);
48311 pipe_wait(pipe);
48312 - pipe->waiting_writers--;
48313 + atomic_dec(&pipe->waiting_writers);
48314 }
48315
48316 pipe_unlock(pipe);
48317 @@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
48318 old_fs = get_fs();
48319 set_fs(get_ds());
48320 /* The cast to a user pointer is valid due to the set_fs() */
48321 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
48322 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
48323 set_fs(old_fs);
48324
48325 return res;
48326 @@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
48327 old_fs = get_fs();
48328 set_fs(get_ds());
48329 /* The cast to a user pointer is valid due to the set_fs() */
48330 - res = vfs_write(file, (const char __user *)buf, count, &pos);
48331 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
48332 set_fs(old_fs);
48333
48334 return res;
48335 @@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
48336 goto err;
48337
48338 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
48339 - vec[i].iov_base = (void __user *) page_address(page);
48340 + vec[i].iov_base = (void __force_user *) page_address(page);
48341 vec[i].iov_len = this_len;
48342 spd.pages[i] = page;
48343 spd.nr_pages++;
48344 @@ -846,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
48345 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
48346 {
48347 while (!pipe->nrbufs) {
48348 - if (!pipe->writers)
48349 + if (!atomic_read(&pipe->writers))
48350 return 0;
48351
48352 - if (!pipe->waiting_writers && sd->num_spliced)
48353 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
48354 return 0;
48355
48356 if (sd->flags & SPLICE_F_NONBLOCK)
48357 @@ -1182,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
48358 * out of the pipe right after the splice_to_pipe(). So set
48359 * PIPE_READERS appropriately.
48360 */
48361 - pipe->readers = 1;
48362 + atomic_set(&pipe->readers, 1);
48363
48364 current->splice_pipe = pipe;
48365 }
48366 @@ -1734,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48367 ret = -ERESTARTSYS;
48368 break;
48369 }
48370 - if (!pipe->writers)
48371 + if (!atomic_read(&pipe->writers))
48372 break;
48373 - if (!pipe->waiting_writers) {
48374 + if (!atomic_read(&pipe->waiting_writers)) {
48375 if (flags & SPLICE_F_NONBLOCK) {
48376 ret = -EAGAIN;
48377 break;
48378 @@ -1768,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48379 pipe_lock(pipe);
48380
48381 while (pipe->nrbufs >= pipe->buffers) {
48382 - if (!pipe->readers) {
48383 + if (!atomic_read(&pipe->readers)) {
48384 send_sig(SIGPIPE, current, 0);
48385 ret = -EPIPE;
48386 break;
48387 @@ -1781,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48388 ret = -ERESTARTSYS;
48389 break;
48390 }
48391 - pipe->waiting_writers++;
48392 + atomic_inc(&pipe->waiting_writers);
48393 pipe_wait(pipe);
48394 - pipe->waiting_writers--;
48395 + atomic_dec(&pipe->waiting_writers);
48396 }
48397
48398 pipe_unlock(pipe);
48399 @@ -1819,14 +1819,14 @@ retry:
48400 pipe_double_lock(ipipe, opipe);
48401
48402 do {
48403 - if (!opipe->readers) {
48404 + if (!atomic_read(&opipe->readers)) {
48405 send_sig(SIGPIPE, current, 0);
48406 if (!ret)
48407 ret = -EPIPE;
48408 break;
48409 }
48410
48411 - if (!ipipe->nrbufs && !ipipe->writers)
48412 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
48413 break;
48414
48415 /*
48416 @@ -1923,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
48417 pipe_double_lock(ipipe, opipe);
48418
48419 do {
48420 - if (!opipe->readers) {
48421 + if (!atomic_read(&opipe->readers)) {
48422 send_sig(SIGPIPE, current, 0);
48423 if (!ret)
48424 ret = -EPIPE;
48425 @@ -1968,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
48426 * return EAGAIN if we have the potential of some data in the
48427 * future, otherwise just return 0
48428 */
48429 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
48430 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
48431 ret = -EAGAIN;
48432
48433 pipe_unlock(ipipe);
48434 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
48435 index 7fdf6a7..e6cd8ad 100644
48436 --- a/fs/sysfs/dir.c
48437 +++ b/fs/sysfs/dir.c
48438 @@ -642,6 +642,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
48439 struct sysfs_dirent *sd;
48440 int rc;
48441
48442 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
48443 + const char *parent_name = parent_sd->s_name;
48444 +
48445 + mode = S_IFDIR | S_IRWXU;
48446 +
48447 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
48448 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
48449 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
48450 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
48451 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
48452 +#endif
48453 +
48454 /* allocate */
48455 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
48456 if (!sd)
48457 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
48458 index 779789a..f58193c 100644
48459 --- a/fs/sysfs/file.c
48460 +++ b/fs/sysfs/file.c
48461 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
48462
48463 struct sysfs_open_dirent {
48464 atomic_t refcnt;
48465 - atomic_t event;
48466 + atomic_unchecked_t event;
48467 wait_queue_head_t poll;
48468 struct list_head buffers; /* goes through sysfs_buffer.list */
48469 };
48470 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
48471 if (!sysfs_get_active(attr_sd))
48472 return -ENODEV;
48473
48474 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
48475 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
48476 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
48477
48478 sysfs_put_active(attr_sd);
48479 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
48480 return -ENOMEM;
48481
48482 atomic_set(&new_od->refcnt, 0);
48483 - atomic_set(&new_od->event, 1);
48484 + atomic_set_unchecked(&new_od->event, 1);
48485 init_waitqueue_head(&new_od->poll);
48486 INIT_LIST_HEAD(&new_od->buffers);
48487 goto retry;
48488 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
48489
48490 sysfs_put_active(attr_sd);
48491
48492 - if (buffer->event != atomic_read(&od->event))
48493 + if (buffer->event != atomic_read_unchecked(&od->event))
48494 goto trigger;
48495
48496 return DEFAULT_POLLMASK;
48497 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
48498
48499 od = sd->s_attr.open;
48500 if (od) {
48501 - atomic_inc(&od->event);
48502 + atomic_inc_unchecked(&od->event);
48503 wake_up_interruptible(&od->poll);
48504 }
48505
48506 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
48507 index a7ac78f..02158e1 100644
48508 --- a/fs/sysfs/symlink.c
48509 +++ b/fs/sysfs/symlink.c
48510 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48511
48512 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
48513 {
48514 - char *page = nd_get_link(nd);
48515 + const char *page = nd_get_link(nd);
48516 if (!IS_ERR(page))
48517 free_page((unsigned long)page);
48518 }
48519 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
48520 index c175b4d..8f36a16 100644
48521 --- a/fs/udf/misc.c
48522 +++ b/fs/udf/misc.c
48523 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
48524
48525 u8 udf_tag_checksum(const struct tag *t)
48526 {
48527 - u8 *data = (u8 *)t;
48528 + const u8 *data = (const u8 *)t;
48529 u8 checksum = 0;
48530 int i;
48531 for (i = 0; i < sizeof(struct tag); ++i)
48532 diff --git a/fs/utimes.c b/fs/utimes.c
48533 index ba653f3..06ea4b1 100644
48534 --- a/fs/utimes.c
48535 +++ b/fs/utimes.c
48536 @@ -1,6 +1,7 @@
48537 #include <linux/compiler.h>
48538 #include <linux/file.h>
48539 #include <linux/fs.h>
48540 +#include <linux/security.h>
48541 #include <linux/linkage.h>
48542 #include <linux/mount.h>
48543 #include <linux/namei.h>
48544 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
48545 goto mnt_drop_write_and_out;
48546 }
48547 }
48548 +
48549 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
48550 + error = -EACCES;
48551 + goto mnt_drop_write_and_out;
48552 + }
48553 +
48554 mutex_lock(&inode->i_mutex);
48555 error = notify_change(path->dentry, &newattrs);
48556 mutex_unlock(&inode->i_mutex);
48557 diff --git a/fs/xattr.c b/fs/xattr.c
48558 index 67583de..c5aad14 100644
48559 --- a/fs/xattr.c
48560 +++ b/fs/xattr.c
48561 @@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
48562 * Extended attribute SET operations
48563 */
48564 static long
48565 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
48566 +setxattr(struct path *path, const char __user *name, const void __user *value,
48567 size_t size, int flags)
48568 {
48569 int error;
48570 @@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
48571 return PTR_ERR(kvalue);
48572 }
48573
48574 - error = vfs_setxattr(d, kname, kvalue, size, flags);
48575 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
48576 + error = -EACCES;
48577 + goto out;
48578 + }
48579 +
48580 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
48581 +out:
48582 kfree(kvalue);
48583 return error;
48584 }
48585 @@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
48586 return error;
48587 error = mnt_want_write(path.mnt);
48588 if (!error) {
48589 - error = setxattr(path.dentry, name, value, size, flags);
48590 + error = setxattr(&path, name, value, size, flags);
48591 mnt_drop_write(path.mnt);
48592 }
48593 path_put(&path);
48594 @@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
48595 return error;
48596 error = mnt_want_write(path.mnt);
48597 if (!error) {
48598 - error = setxattr(path.dentry, name, value, size, flags);
48599 + error = setxattr(&path, name, value, size, flags);
48600 mnt_drop_write(path.mnt);
48601 }
48602 path_put(&path);
48603 @@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
48604 const void __user *,value, size_t, size, int, flags)
48605 {
48606 struct file *f;
48607 - struct dentry *dentry;
48608 int error = -EBADF;
48609
48610 f = fget(fd);
48611 if (!f)
48612 return error;
48613 - dentry = f->f_path.dentry;
48614 - audit_inode(NULL, dentry);
48615 + audit_inode(NULL, f->f_path.dentry);
48616 error = mnt_want_write_file(f);
48617 if (!error) {
48618 - error = setxattr(dentry, name, value, size, flags);
48619 + error = setxattr(&f->f_path, name, value, size, flags);
48620 mnt_drop_write(f->f_path.mnt);
48621 }
48622 fput(f);
48623 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
48624 index 8d5a506..7f62712 100644
48625 --- a/fs/xattr_acl.c
48626 +++ b/fs/xattr_acl.c
48627 @@ -17,8 +17,8 @@
48628 struct posix_acl *
48629 posix_acl_from_xattr(const void *value, size_t size)
48630 {
48631 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
48632 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
48633 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
48634 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
48635 int count;
48636 struct posix_acl *acl;
48637 struct posix_acl_entry *acl_e;
48638 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
48639 index d0ab788..827999b 100644
48640 --- a/fs/xfs/xfs_bmap.c
48641 +++ b/fs/xfs/xfs_bmap.c
48642 @@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
48643 int nmap,
48644 int ret_nmap);
48645 #else
48646 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
48647 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
48648 #endif /* DEBUG */
48649
48650 STATIC int
48651 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
48652 index 79d05e8..e3e5861 100644
48653 --- a/fs/xfs/xfs_dir2_sf.c
48654 +++ b/fs/xfs/xfs_dir2_sf.c
48655 @@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
48656 }
48657
48658 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
48659 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
48660 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
48661 + char name[sfep->namelen];
48662 + memcpy(name, sfep->name, sfep->namelen);
48663 + if (filldir(dirent, name, sfep->namelen,
48664 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
48665 + *offset = off & 0x7fffffff;
48666 + return 0;
48667 + }
48668 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
48669 off & 0x7fffffff, ino, DT_UNKNOWN)) {
48670 *offset = off & 0x7fffffff;
48671 return 0;
48672 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
48673 index d99a905..9f88202 100644
48674 --- a/fs/xfs/xfs_ioctl.c
48675 +++ b/fs/xfs/xfs_ioctl.c
48676 @@ -128,7 +128,7 @@ xfs_find_handle(
48677 }
48678
48679 error = -EFAULT;
48680 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
48681 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
48682 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
48683 goto out_put;
48684
48685 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
48686 index 23ce927..e274cc1 100644
48687 --- a/fs/xfs/xfs_iops.c
48688 +++ b/fs/xfs/xfs_iops.c
48689 @@ -447,7 +447,7 @@ xfs_vn_put_link(
48690 struct nameidata *nd,
48691 void *p)
48692 {
48693 - char *s = nd_get_link(nd);
48694 + const char *s = nd_get_link(nd);
48695
48696 if (!IS_ERR(s))
48697 kfree(s);
48698 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
48699 new file mode 100644
48700 index 0000000..4089e05
48701 --- /dev/null
48702 +++ b/grsecurity/Kconfig
48703 @@ -0,0 +1,1078 @@
48704 +#
48705 +# grecurity configuration
48706 +#
48707 +
48708 +menu "Grsecurity"
48709 +
48710 +config GRKERNSEC
48711 + bool "Grsecurity"
48712 + select CRYPTO
48713 + select CRYPTO_SHA256
48714 + help
48715 + If you say Y here, you will be able to configure many features
48716 + that will enhance the security of your system. It is highly
48717 + recommended that you say Y here and read through the help
48718 + for each option so that you fully understand the features and
48719 + can evaluate their usefulness for your machine.
48720 +
48721 +choice
48722 + prompt "Security Level"
48723 + depends on GRKERNSEC
48724 + default GRKERNSEC_CUSTOM
48725 +
48726 +config GRKERNSEC_LOW
48727 + bool "Low"
48728 + select GRKERNSEC_LINK
48729 + select GRKERNSEC_FIFO
48730 + select GRKERNSEC_RANDNET
48731 + select GRKERNSEC_DMESG
48732 + select GRKERNSEC_CHROOT
48733 + select GRKERNSEC_CHROOT_CHDIR
48734 +
48735 + help
48736 + If you choose this option, several of the grsecurity options will
48737 + be enabled that will give you greater protection against a number
48738 + of attacks, while assuring that none of your software will have any
48739 + conflicts with the additional security measures. If you run a lot
48740 + of unusual software, or you are having problems with the higher
48741 + security levels, you should say Y here. With this option, the
48742 + following features are enabled:
48743 +
48744 + - Linking restrictions
48745 + - FIFO restrictions
48746 + - Restricted dmesg
48747 + - Enforced chdir("/") on chroot
48748 + - Runtime module disabling
48749 +
48750 +config GRKERNSEC_MEDIUM
48751 + bool "Medium"
48752 + select PAX
48753 + select PAX_EI_PAX
48754 + select PAX_PT_PAX_FLAGS
48755 + select PAX_HAVE_ACL_FLAGS
48756 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
48757 + select GRKERNSEC_CHROOT
48758 + select GRKERNSEC_CHROOT_SYSCTL
48759 + select GRKERNSEC_LINK
48760 + select GRKERNSEC_FIFO
48761 + select GRKERNSEC_DMESG
48762 + select GRKERNSEC_RANDNET
48763 + select GRKERNSEC_FORKFAIL
48764 + select GRKERNSEC_TIME
48765 + select GRKERNSEC_SIGNAL
48766 + select GRKERNSEC_CHROOT
48767 + select GRKERNSEC_CHROOT_UNIX
48768 + select GRKERNSEC_CHROOT_MOUNT
48769 + select GRKERNSEC_CHROOT_PIVOT
48770 + select GRKERNSEC_CHROOT_DOUBLE
48771 + select GRKERNSEC_CHROOT_CHDIR
48772 + select GRKERNSEC_CHROOT_MKNOD
48773 + select GRKERNSEC_PROC
48774 + select GRKERNSEC_PROC_USERGROUP
48775 + select PAX_RANDUSTACK
48776 + select PAX_ASLR
48777 + select PAX_RANDMMAP
48778 + select PAX_REFCOUNT if (X86 || SPARC64)
48779 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
48780 +
48781 + help
48782 + If you say Y here, several features in addition to those included
48783 + in the low additional security level will be enabled. These
48784 + features provide even more security to your system, though in rare
48785 + cases they may be incompatible with very old or poorly written
48786 + software. If you enable this option, make sure that your auth
48787 + service (identd) is running as gid 1001. With this option,
48788 + the following features (in addition to those provided in the
48789 + low additional security level) will be enabled:
48790 +
48791 + - Failed fork logging
48792 + - Time change logging
48793 + - Signal logging
48794 + - Deny mounts in chroot
48795 + - Deny double chrooting
48796 + - Deny sysctl writes in chroot
48797 + - Deny mknod in chroot
48798 + - Deny access to abstract AF_UNIX sockets out of chroot
48799 + - Deny pivot_root in chroot
48800 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
48801 + - /proc restrictions with special GID set to 10 (usually wheel)
48802 + - Address Space Layout Randomization (ASLR)
48803 + - Prevent exploitation of most refcount overflows
48804 + - Bounds checking of copying between the kernel and userland
48805 +
48806 +config GRKERNSEC_HIGH
48807 + bool "High"
48808 + select GRKERNSEC_LINK
48809 + select GRKERNSEC_FIFO
48810 + select GRKERNSEC_DMESG
48811 + select GRKERNSEC_FORKFAIL
48812 + select GRKERNSEC_TIME
48813 + select GRKERNSEC_SIGNAL
48814 + select GRKERNSEC_CHROOT
48815 + select GRKERNSEC_CHROOT_SHMAT
48816 + select GRKERNSEC_CHROOT_UNIX
48817 + select GRKERNSEC_CHROOT_MOUNT
48818 + select GRKERNSEC_CHROOT_FCHDIR
48819 + select GRKERNSEC_CHROOT_PIVOT
48820 + select GRKERNSEC_CHROOT_DOUBLE
48821 + select GRKERNSEC_CHROOT_CHDIR
48822 + select GRKERNSEC_CHROOT_MKNOD
48823 + select GRKERNSEC_CHROOT_CAPS
48824 + select GRKERNSEC_CHROOT_SYSCTL
48825 + select GRKERNSEC_CHROOT_FINDTASK
48826 + select GRKERNSEC_SYSFS_RESTRICT
48827 + select GRKERNSEC_PROC
48828 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
48829 + select GRKERNSEC_HIDESYM
48830 + select GRKERNSEC_BRUTE
48831 + select GRKERNSEC_PROC_USERGROUP
48832 + select GRKERNSEC_KMEM
48833 + select GRKERNSEC_RESLOG
48834 + select GRKERNSEC_RANDNET
48835 + select GRKERNSEC_PROC_ADD
48836 + select GRKERNSEC_CHROOT_CHMOD
48837 + select GRKERNSEC_CHROOT_NICE
48838 + select GRKERNSEC_SETXID
48839 + select GRKERNSEC_AUDIT_MOUNT
48840 + select GRKERNSEC_MODHARDEN if (MODULES)
48841 + select GRKERNSEC_HARDEN_PTRACE
48842 + select GRKERNSEC_PTRACE_READEXEC
48843 + select GRKERNSEC_VM86 if (X86_32)
48844 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
48845 + select PAX
48846 + select PAX_RANDUSTACK
48847 + select PAX_ASLR
48848 + select PAX_RANDMMAP
48849 + select PAX_NOEXEC
48850 + select PAX_MPROTECT
48851 + select PAX_EI_PAX
48852 + select PAX_PT_PAX_FLAGS
48853 + select PAX_HAVE_ACL_FLAGS
48854 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
48855 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
48856 + select PAX_RANDKSTACK if (X86_TSC && X86)
48857 + select PAX_SEGMEXEC if (X86_32)
48858 + select PAX_PAGEEXEC
48859 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
48860 + select PAX_EMUTRAMP if (PARISC)
48861 + select PAX_EMUSIGRT if (PARISC)
48862 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
48863 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
48864 + select PAX_REFCOUNT if (X86 || SPARC64)
48865 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
48866 + help
48867 + If you say Y here, many of the features of grsecurity will be
48868 + enabled, which will protect you against many kinds of attacks
48869 + against your system. The heightened security comes at a cost
48870 + of an increased chance of incompatibilities with rare software
48871 + on your machine. Since this security level enables PaX, you should
48872 + view <http://pax.grsecurity.net> and read about the PaX
48873 + project. While you are there, download chpax and run it on
48874 + binaries that cause problems with PaX. Also remember that
48875 + since the /proc restrictions are enabled, you must run your
48876 + identd as gid 1001. This security level enables the following
48877 + features in addition to those listed in the low and medium
48878 + security levels:
48879 +
48880 + - Additional /proc restrictions
48881 + - Chmod restrictions in chroot
48882 + - No signals, ptrace, or viewing of processes outside of chroot
48883 + - Capability restrictions in chroot
48884 + - Deny fchdir out of chroot
48885 + - Priority restrictions in chroot
48886 + - Segmentation-based implementation of PaX
48887 + - Mprotect restrictions
48888 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
48889 + - Kernel stack randomization
48890 + - Mount/unmount/remount logging
48891 + - Kernel symbol hiding
48892 + - Hardening of module auto-loading
48893 + - Ptrace restrictions
48894 + - Restricted vm86 mode
48895 + - Restricted sysfs/debugfs
48896 + - Active kernel exploit response
48897 +
48898 +config GRKERNSEC_CUSTOM
48899 + bool "Custom"
48900 + help
48901 + If you say Y here, you will be able to configure every grsecurity
48902 + option, which allows you to enable many more features that aren't
48903 + covered in the basic security levels. These additional features
48904 + include TPE, socket restrictions, and the sysctl system for
48905 + grsecurity. It is advised that you read through the help for
48906 + each option to determine its usefulness in your situation.
48907 +
48908 +endchoice
48909 +
48910 +menu "Memory Protections"
48911 +depends on GRKERNSEC
48912 +
48913 +config GRKERNSEC_KMEM
48914 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
48915 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
48916 + help
48917 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
48918 + be written to or read from to modify or leak the contents of the running
48919 + kernel. /dev/port will also not be allowed to be opened. If you have module
48920 + support disabled, enabling this will close up four ways that are
48921 + currently used to insert malicious code into the running kernel.
48922 + Even with all these features enabled, we still highly recommend that
48923 + you use the RBAC system, as it is still possible for an attacker to
48924 + modify the running kernel through privileged I/O granted by ioperm/iopl.
48925 + If you are not using XFree86, you may be able to stop this additional
48926 + case by enabling the 'Disable privileged I/O' option. Though nothing
48927 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
48928 + but only to video memory, which is the only writing we allow in this
48929 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
48930 + not be allowed to mprotect it with PROT_WRITE later.
48931 + It is highly recommended that you say Y here if you meet all the
48932 + conditions above.
48933 +
48934 +config GRKERNSEC_VM86
48935 + bool "Restrict VM86 mode"
48936 + depends on X86_32
48937 +
48938 + help
48939 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
48940 + make use of a special execution mode on 32bit x86 processors called
48941 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
48942 + video cards and will still work with this option enabled. The purpose
48943 + of the option is to prevent exploitation of emulation errors in
48944 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
48945 + Nearly all users should be able to enable this option.
48946 +
48947 +config GRKERNSEC_IO
48948 + bool "Disable privileged I/O"
48949 + depends on X86
48950 + select RTC_CLASS
48951 + select RTC_INTF_DEV
48952 + select RTC_DRV_CMOS
48953 +
48954 + help
48955 + If you say Y here, all ioperm and iopl calls will return an error.
48956 + Ioperm and iopl can be used to modify the running kernel.
48957 + Unfortunately, some programs need this access to operate properly,
48958 + the most notable of which are XFree86 and hwclock. hwclock can be
48959 + remedied by having RTC support in the kernel, so real-time
48960 + clock support is enabled if this option is enabled, to ensure
48961 + that hwclock operates correctly. XFree86 still will not
48962 + operate correctly with this option enabled, so DO NOT CHOOSE Y
48963 + IF YOU USE XFree86. If you use XFree86 and you still want to
48964 + protect your kernel against modification, use the RBAC system.
48965 +
48966 +config GRKERNSEC_PROC_MEMMAP
48967 + bool "Harden ASLR against information leaks and entropy reduction"
48968 + default y if (PAX_NOEXEC || PAX_ASLR)
48969 + depends on PAX_NOEXEC || PAX_ASLR
48970 + help
48971 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
48972 + give no information about the addresses of its mappings if
48973 + PaX features that rely on random addresses are enabled on the task.
48974 + In addition to sanitizing this information and disabling other
48975 + dangerous sources of information, this option causes reads of sensitive
48976 + /proc/<pid> entries where the file descriptor was opened in a different
48977 + task than the one performing the read. Such attempts are logged.
48978 + This option also limits argv/env strings for suid/sgid binaries
48979 + to 512KB to prevent a complete exhaustion of the stack entropy provided
48980 + by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
48981 + binaries to prevent alternative mmap layouts from being abused.
48982 +
48983 + If you use PaX it is essential that you say Y here as it closes up
48984 + several holes that make full ASLR useless locally.
48985 +
48986 +config GRKERNSEC_BRUTE
48987 + bool "Deter exploit bruteforcing"
48988 + help
48989 + If you say Y here, attempts to bruteforce exploits against forking
48990 + daemons such as apache or sshd, as well as against suid/sgid binaries
48991 + will be deterred. When a child of a forking daemon is killed by PaX
48992 + or crashes due to an illegal instruction or other suspicious signal,
48993 + the parent process will be delayed 30 seconds upon every subsequent
48994 + fork until the administrator is able to assess the situation and
48995 + restart the daemon.
48996 + In the suid/sgid case, the attempt is logged, the user has all their
48997 + processes terminated, and they are prevented from executing any further
48998 + processes for 15 minutes.
48999 + It is recommended that you also enable signal logging in the auditing
49000 + section so that logs are generated when a process triggers a suspicious
49001 + signal.
49002 + If the sysctl option is enabled, a sysctl option with name
49003 + "deter_bruteforce" is created.
49004 +
49005 +
49006 +config GRKERNSEC_MODHARDEN
49007 + bool "Harden module auto-loading"
49008 + depends on MODULES
49009 + help
49010 + If you say Y here, module auto-loading in response to use of some
49011 + feature implemented by an unloaded module will be restricted to
49012 + root users. Enabling this option helps defend against attacks
49013 + by unprivileged users who abuse the auto-loading behavior to
49014 + cause a vulnerable module to load that is then exploited.
49015 +
49016 + If this option prevents a legitimate use of auto-loading for a
49017 + non-root user, the administrator can execute modprobe manually
49018 + with the exact name of the module mentioned in the alert log.
49019 + Alternatively, the administrator can add the module to the list
49020 + of modules loaded at boot by modifying init scripts.
49021 +
49022 + Modification of init scripts will most likely be needed on
49023 + Ubuntu servers with encrypted home directory support enabled,
49024 + as the first non-root user logging in will cause the ecb(aes),
49025 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
49026 +
49027 +config GRKERNSEC_HIDESYM
49028 + bool "Hide kernel symbols"
49029 + help
49030 + If you say Y here, getting information on loaded modules, and
49031 + displaying all kernel symbols through a syscall will be restricted
49032 + to users with CAP_SYS_MODULE. For software compatibility reasons,
49033 + /proc/kallsyms will be restricted to the root user. The RBAC
49034 + system can hide that entry even from root.
49035 +
49036 + This option also prevents leaking of kernel addresses through
49037 + several /proc entries.
49038 +
49039 + Note that this option is only effective provided the following
49040 + conditions are met:
49041 + 1) The kernel using grsecurity is not precompiled by some distribution
49042 + 2) You have also enabled GRKERNSEC_DMESG
49043 + 3) You are using the RBAC system and hiding other files such as your
49044 + kernel image and System.map. Alternatively, enabling this option
49045 + causes the permissions on /boot, /lib/modules, and the kernel
49046 + source directory to change at compile time to prevent
49047 + reading by non-root users.
49048 + If the above conditions are met, this option will aid in providing a
49049 + useful protection against local kernel exploitation of overflows
49050 + and arbitrary read/write vulnerabilities.
49051 +
49052 +config GRKERNSEC_KERN_LOCKOUT
49053 + bool "Active kernel exploit response"
49054 + depends on X86 || ARM || PPC || SPARC
49055 + help
49056 + If you say Y here, when a PaX alert is triggered due to suspicious
49057 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
49058 + or an OOPs occurs due to bad memory accesses, instead of just
49059 + terminating the offending process (and potentially allowing
49060 + a subsequent exploit from the same user), we will take one of two
49061 + actions:
49062 + If the user was root, we will panic the system
49063 + If the user was non-root, we will log the attempt, terminate
49064 + all processes owned by the user, then prevent them from creating
49065 + any new processes until the system is restarted
49066 + This deters repeated kernel exploitation/bruteforcing attempts
49067 + and is useful for later forensics.
49068 +
49069 +endmenu
49070 +menu "Role Based Access Control Options"
49071 +depends on GRKERNSEC
49072 +
49073 +config GRKERNSEC_RBAC_DEBUG
49074 + bool
49075 +
49076 +config GRKERNSEC_NO_RBAC
49077 + bool "Disable RBAC system"
49078 + help
49079 + If you say Y here, the /dev/grsec device will be removed from the kernel,
49080 + preventing the RBAC system from being enabled. You should only say Y
49081 + here if you have no intention of using the RBAC system, so as to prevent
49082 + an attacker with root access from misusing the RBAC system to hide files
49083 + and processes when loadable module support and /dev/[k]mem have been
49084 + locked down.
49085 +
49086 +config GRKERNSEC_ACL_HIDEKERN
49087 + bool "Hide kernel processes"
49088 + help
49089 + If you say Y here, all kernel threads will be hidden to all
49090 + processes but those whose subject has the "view hidden processes"
49091 + flag.
49092 +
49093 +config GRKERNSEC_ACL_MAXTRIES
49094 + int "Maximum tries before password lockout"
49095 + default 3
49096 + help
49097 + This option enforces the maximum number of times a user can attempt
49098 + to authorize themselves with the grsecurity RBAC system before being
49099 + denied the ability to attempt authorization again for a specified time.
49100 + The lower the number, the harder it will be to brute-force a password.
49101 +
49102 +config GRKERNSEC_ACL_TIMEOUT
49103 + int "Time to wait after max password tries, in seconds"
49104 + default 30
49105 + help
49106 + This option specifies the time the user must wait after attempting to
49107 + authorize to the RBAC system with the maximum number of invalid
49108 + passwords. The higher the number, the harder it will be to brute-force
49109 + a password.
49110 +
49111 +endmenu
49112 +menu "Filesystem Protections"
49113 +depends on GRKERNSEC
49114 +
49115 +config GRKERNSEC_PROC
49116 + bool "Proc restrictions"
49117 + help
49118 + If you say Y here, the permissions of the /proc filesystem
49119 + will be altered to enhance system security and privacy. You MUST
49120 + choose either a user only restriction or a user and group restriction.
49121 + Depending upon the option you choose, you can either restrict users to
49122 + see only the processes they themselves run, or choose a group that can
49123 + view all processes and files normally restricted to root if you choose
49124 + the "restrict to user only" option. NOTE: If you're running identd or
49125 + ntpd as a non-root user, you will have to run it as the group you
49126 + specify here.
49127 +
49128 +config GRKERNSEC_PROC_USER
49129 + bool "Restrict /proc to user only"
49130 + depends on GRKERNSEC_PROC
49131 + help
49132 + If you say Y here, non-root users will only be able to view their own
49133 + processes, and restricts them from viewing network-related information,
49134 + and viewing kernel symbol and module information.
49135 +
49136 +config GRKERNSEC_PROC_USERGROUP
49137 + bool "Allow special group"
49138 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
49139 + help
49140 + If you say Y here, you will be able to select a group that will be
49141 + able to view all processes and network-related information. If you've
49142 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
49143 + remain hidden. This option is useful if you want to run identd as
49144 + a non-root user.
49145 +
49146 +config GRKERNSEC_PROC_GID
49147 + int "GID for special group"
49148 + depends on GRKERNSEC_PROC_USERGROUP
49149 + default 1001
49150 +
49151 +config GRKERNSEC_PROC_ADD
49152 + bool "Additional restrictions"
49153 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
49154 + help
49155 + If you say Y here, additional restrictions will be placed on
49156 + /proc that keep normal users from viewing device information and
49157 + slabinfo information that could be useful for exploits.
49158 +
49159 +config GRKERNSEC_LINK
49160 + bool "Linking restrictions"
49161 + help
49162 + If you say Y here, /tmp race exploits will be prevented, since users
49163 + will no longer be able to follow symlinks owned by other users in
49164 + world-writable +t directories (e.g. /tmp), unless the owner of the
49165 + symlink is the owner of the directory. users will also not be
49166 + able to hardlink to files they do not own. If the sysctl option is
49167 + enabled, a sysctl option with name "linking_restrictions" is created.
49168 +
49169 +config GRKERNSEC_FIFO
49170 + bool "FIFO restrictions"
49171 + help
49172 + If you say Y here, users will not be able to write to FIFOs they don't
49173 + own in world-writable +t directories (e.g. /tmp), unless the owner of
49174 + the FIFO is the same owner of the directory it's held in. If the sysctl
49175 + option is enabled, a sysctl option with name "fifo_restrictions" is
49176 + created.
49177 +
49178 +config GRKERNSEC_SYSFS_RESTRICT
49179 + bool "Sysfs/debugfs restriction"
49180 + depends on SYSFS
49181 + help
49182 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
49183 + any filesystem normally mounted under it (e.g. debugfs) will be
49184 + mostly accessible only by root. These filesystems generally provide access
49185 + to hardware and debug information that isn't appropriate for unprivileged
49186 + users of the system. Sysfs and debugfs have also become a large source
49187 + of new vulnerabilities, ranging from infoleaks to local compromise.
49188 + There has been very little oversight with an eye toward security involved
49189 + in adding new exporters of information to these filesystems, so their
49190 + use is discouraged.
49191 + For reasons of compatibility, a few directories have been whitelisted
49192 + for access by non-root users:
49193 + /sys/fs/selinux
49194 + /sys/fs/fuse
49195 + /sys/devices/system/cpu
49196 +
49197 +config GRKERNSEC_ROFS
49198 + bool "Runtime read-only mount protection"
49199 + help
49200 + If you say Y here, a sysctl option with name "romount_protect" will
49201 + be created. By setting this option to 1 at runtime, filesystems
49202 + will be protected in the following ways:
49203 + * No new writable mounts will be allowed
49204 + * Existing read-only mounts won't be able to be remounted read/write
49205 + * Write operations will be denied on all block devices
49206 + This option acts independently of grsec_lock: once it is set to 1,
49207 + it cannot be turned off. Therefore, please be mindful of the resulting
49208 + behavior if this option is enabled in an init script on a read-only
49209 + filesystem. This feature is mainly intended for secure embedded systems.
49210 +
49211 +config GRKERNSEC_CHROOT
49212 + bool "Chroot jail restrictions"
49213 + help
49214 + If you say Y here, you will be able to choose several options that will
49215 + make breaking out of a chrooted jail much more difficult. If you
49216 + encounter no software incompatibilities with the following options, it
49217 + is recommended that you enable each one.
49218 +
49219 +config GRKERNSEC_CHROOT_MOUNT
49220 + bool "Deny mounts"
49221 + depends on GRKERNSEC_CHROOT
49222 + help
49223 + If you say Y here, processes inside a chroot will not be able to
49224 + mount or remount filesystems. If the sysctl option is enabled, a
49225 + sysctl option with name "chroot_deny_mount" is created.
49226 +
49227 +config GRKERNSEC_CHROOT_DOUBLE
49228 + bool "Deny double-chroots"
49229 + depends on GRKERNSEC_CHROOT
49230 + help
49231 + If you say Y here, processes inside a chroot will not be able to chroot
49232 + again outside the chroot. This is a widely used method of breaking
49233 + out of a chroot jail and should not be allowed. If the sysctl
49234 + option is enabled, a sysctl option with name
49235 + "chroot_deny_chroot" is created.
49236 +
49237 +config GRKERNSEC_CHROOT_PIVOT
49238 + bool "Deny pivot_root in chroot"
49239 + depends on GRKERNSEC_CHROOT
49240 + help
49241 + If you say Y here, processes inside a chroot will not be able to use
49242 + a function called pivot_root() that was introduced in Linux 2.3.41. It
49243 + works similar to chroot in that it changes the root filesystem. This
49244 + function could be misused in a chrooted process to attempt to break out
49245 + of the chroot, and therefore should not be allowed. If the sysctl
49246 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
49247 + created.
49248 +
49249 +config GRKERNSEC_CHROOT_CHDIR
49250 + bool "Enforce chdir(\"/\") on all chroots"
49251 + depends on GRKERNSEC_CHROOT
49252 + help
49253 + If you say Y here, the current working directory of all newly-chrooted
49254 + applications will be set to the the root directory of the chroot.
49255 + The man page on chroot(2) states:
49256 + Note that this call does not change the current working
49257 + directory, so that `.' can be outside the tree rooted at
49258 + `/'. In particular, the super-user can escape from a
49259 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
49260 +
49261 + It is recommended that you say Y here, since it's not known to break
49262 + any software. If the sysctl option is enabled, a sysctl option with
49263 + name "chroot_enforce_chdir" is created.
49264 +
49265 +config GRKERNSEC_CHROOT_CHMOD
49266 + bool "Deny (f)chmod +s"
49267 + depends on GRKERNSEC_CHROOT
49268 + help
49269 + If you say Y here, processes inside a chroot will not be able to chmod
49270 + or fchmod files to make them have suid or sgid bits. This protects
49271 + against another published method of breaking a chroot. If the sysctl
49272 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
49273 + created.
49274 +
49275 +config GRKERNSEC_CHROOT_FCHDIR
49276 + bool "Deny fchdir out of chroot"
49277 + depends on GRKERNSEC_CHROOT
49278 + help
49279 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
49280 + to a file descriptor of the chrooting process that points to a directory
49281 + outside the filesystem will be stopped. If the sysctl option
49282 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
49283 +
49284 +config GRKERNSEC_CHROOT_MKNOD
49285 + bool "Deny mknod"
49286 + depends on GRKERNSEC_CHROOT
49287 + help
49288 + If you say Y here, processes inside a chroot will not be allowed to
49289 + mknod. The problem with using mknod inside a chroot is that it
49290 + would allow an attacker to create a device entry that is the same
49291 + as one on the physical root of your system, which could range from
49292 + anything from the console device to a device for your harddrive (which
49293 + they could then use to wipe the drive or steal data). It is recommended
49294 + that you say Y here, unless you run into software incompatibilities.
49295 + If the sysctl option is enabled, a sysctl option with name
49296 + "chroot_deny_mknod" is created.
49297 +
49298 +config GRKERNSEC_CHROOT_SHMAT
49299 + bool "Deny shmat() out of chroot"
49300 + depends on GRKERNSEC_CHROOT
49301 + help
49302 + If you say Y here, processes inside a chroot will not be able to attach
49303 + to shared memory segments that were created outside of the chroot jail.
49304 + It is recommended that you say Y here. If the sysctl option is enabled,
49305 + a sysctl option with name "chroot_deny_shmat" is created.
49306 +
49307 +config GRKERNSEC_CHROOT_UNIX
49308 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
49309 + depends on GRKERNSEC_CHROOT
49310 + help
49311 + If you say Y here, processes inside a chroot will not be able to
49312 + connect to abstract (meaning not belonging to a filesystem) Unix
49313 + domain sockets that were bound outside of a chroot. It is recommended
49314 + that you say Y here. If the sysctl option is enabled, a sysctl option
49315 + with name "chroot_deny_unix" is created.
49316 +
49317 +config GRKERNSEC_CHROOT_FINDTASK
49318 + bool "Protect outside processes"
49319 + depends on GRKERNSEC_CHROOT
49320 + help
49321 + If you say Y here, processes inside a chroot will not be able to
49322 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
49323 + getsid, or view any process outside of the chroot. If the sysctl
49324 + option is enabled, a sysctl option with name "chroot_findtask" is
49325 + created.
49326 +
49327 +config GRKERNSEC_CHROOT_NICE
49328 + bool "Restrict priority changes"
49329 + depends on GRKERNSEC_CHROOT
49330 + help
49331 + If you say Y here, processes inside a chroot will not be able to raise
49332 + the priority of processes in the chroot, or alter the priority of
49333 + processes outside the chroot. This provides more security than simply
49334 + removing CAP_SYS_NICE from the process' capability set. If the
49335 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
49336 + is created.
49337 +
49338 +config GRKERNSEC_CHROOT_SYSCTL
49339 + bool "Deny sysctl writes"
49340 + depends on GRKERNSEC_CHROOT
49341 + help
49342 + If you say Y here, an attacker in a chroot will not be able to
49343 + write to sysctl entries, either by sysctl(2) or through a /proc
49344 + interface. It is strongly recommended that you say Y here. If the
49345 + sysctl option is enabled, a sysctl option with name
49346 + "chroot_deny_sysctl" is created.
49347 +
49348 +config GRKERNSEC_CHROOT_CAPS
49349 + bool "Capability restrictions"
49350 + depends on GRKERNSEC_CHROOT
49351 + help
49352 + If you say Y here, the capabilities on all processes within a
49353 + chroot jail will be lowered to stop module insertion, raw i/o,
49354 + system and net admin tasks, rebooting the system, modifying immutable
49355 + files, modifying IPC owned by another, and changing the system time.
49356 + This is left an option because it can break some apps. Disable this
49357 + if your chrooted apps are having problems performing those kinds of
49358 + tasks. If the sysctl option is enabled, a sysctl option with
49359 + name "chroot_caps" is created.
49360 +
49361 +endmenu
49362 +menu "Kernel Auditing"
49363 +depends on GRKERNSEC
49364 +
49365 +config GRKERNSEC_AUDIT_GROUP
49366 + bool "Single group for auditing"
49367 + help
49368 + If you say Y here, the exec, chdir, and (un)mount logging features
49369 + will only operate on a group you specify. This option is recommended
49370 + if you only want to watch certain users instead of having a large
49371 + amount of logs from the entire system. If the sysctl option is enabled,
49372 + a sysctl option with name "audit_group" is created.
49373 +
49374 +config GRKERNSEC_AUDIT_GID
49375 + int "GID for auditing"
49376 + depends on GRKERNSEC_AUDIT_GROUP
49377 + default 1007
49378 +
49379 +config GRKERNSEC_EXECLOG
49380 + bool "Exec logging"
49381 + help
49382 + If you say Y here, all execve() calls will be logged (since the
49383 + other exec*() calls are frontends to execve(), all execution
49384 + will be logged). Useful for shell-servers that like to keep track
49385 + of their users. If the sysctl option is enabled, a sysctl option with
49386 + name "exec_logging" is created.
49387 + WARNING: This option when enabled will produce a LOT of logs, especially
49388 + on an active system.
49389 +
49390 +config GRKERNSEC_RESLOG
49391 + bool "Resource logging"
49392 + help
49393 + If you say Y here, all attempts to overstep resource limits will
49394 + be logged with the resource name, the requested size, and the current
49395 + limit. It is highly recommended that you say Y here. If the sysctl
49396 + option is enabled, a sysctl option with name "resource_logging" is
49397 + created. If the RBAC system is enabled, the sysctl value is ignored.
49398 +
49399 +config GRKERNSEC_CHROOT_EXECLOG
49400 + bool "Log execs within chroot"
49401 + help
49402 + If you say Y here, all executions inside a chroot jail will be logged
49403 + to syslog. This can cause a large amount of logs if certain
49404 + applications (eg. djb's daemontools) are installed on the system, and
49405 + is therefore left as an option. If the sysctl option is enabled, a
49406 + sysctl option with name "chroot_execlog" is created.
49407 +
49408 +config GRKERNSEC_AUDIT_PTRACE
49409 + bool "Ptrace logging"
49410 + help
49411 + If you say Y here, all attempts to attach to a process via ptrace
49412 + will be logged. If the sysctl option is enabled, a sysctl option
49413 + with name "audit_ptrace" is created.
49414 +
49415 +config GRKERNSEC_AUDIT_CHDIR
49416 + bool "Chdir logging"
49417 + help
49418 + If you say Y here, all chdir() calls will be logged. If the sysctl
49419 + option is enabled, a sysctl option with name "audit_chdir" is created.
49420 +
49421 +config GRKERNSEC_AUDIT_MOUNT
49422 + bool "(Un)Mount logging"
49423 + help
49424 + If you say Y here, all mounts and unmounts will be logged. If the
49425 + sysctl option is enabled, a sysctl option with name "audit_mount" is
49426 + created.
49427 +
49428 +config GRKERNSEC_SIGNAL
49429 + bool "Signal logging"
49430 + help
49431 + If you say Y here, certain important signals will be logged, such as
49432 + SIGSEGV, which will as a result inform you of when a error in a program
49433 + occurred, which in some cases could mean a possible exploit attempt.
49434 + If the sysctl option is enabled, a sysctl option with name
49435 + "signal_logging" is created.
49436 +
49437 +config GRKERNSEC_FORKFAIL
49438 + bool "Fork failure logging"
49439 + help
49440 + If you say Y here, all failed fork() attempts will be logged.
49441 + This could suggest a fork bomb, or someone attempting to overstep
49442 + their process limit. If the sysctl option is enabled, a sysctl option
49443 + with name "forkfail_logging" is created.
49444 +
49445 +config GRKERNSEC_TIME
49446 + bool "Time change logging"
49447 + help
49448 + If you say Y here, any changes of the system clock will be logged.
49449 + If the sysctl option is enabled, a sysctl option with name
49450 + "timechange_logging" is created.
49451 +
49452 +config GRKERNSEC_PROC_IPADDR
49453 + bool "/proc/<pid>/ipaddr support"
49454 + help
49455 + If you say Y here, a new entry will be added to each /proc/<pid>
49456 + directory that contains the IP address of the person using the task.
49457 + The IP is carried across local TCP and AF_UNIX stream sockets.
49458 + This information can be useful for IDS/IPSes to perform remote response
49459 + to a local attack. The entry is readable by only the owner of the
49460 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
49461 + the RBAC system), and thus does not create privacy concerns.
49462 +
49463 +config GRKERNSEC_RWXMAP_LOG
49464 + bool 'Denied RWX mmap/mprotect logging'
49465 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
49466 + help
49467 + If you say Y here, calls to mmap() and mprotect() with explicit
49468 + usage of PROT_WRITE and PROT_EXEC together will be logged when
49469 + denied by the PAX_MPROTECT feature. If the sysctl option is
49470 + enabled, a sysctl option with name "rwxmap_logging" is created.
49471 +
49472 +config GRKERNSEC_AUDIT_TEXTREL
49473 + bool 'ELF text relocations logging (READ HELP)'
49474 + depends on PAX_MPROTECT
49475 + help
49476 + If you say Y here, text relocations will be logged with the filename
49477 + of the offending library or binary. The purpose of the feature is
49478 + to help Linux distribution developers get rid of libraries and
49479 + binaries that need text relocations which hinder the future progress
49480 + of PaX. Only Linux distribution developers should say Y here, and
49481 + never on a production machine, as this option creates an information
49482 + leak that could aid an attacker in defeating the randomization of
49483 + a single memory region. If the sysctl option is enabled, a sysctl
49484 + option with name "audit_textrel" is created.
49485 +
49486 +endmenu
49487 +
49488 +menu "Executable Protections"
49489 +depends on GRKERNSEC
49490 +
49491 +config GRKERNSEC_DMESG
49492 + bool "Dmesg(8) restriction"
49493 + help
49494 + If you say Y here, non-root users will not be able to use dmesg(8)
49495 + to view up to the last 4kb of messages in the kernel's log buffer.
49496 + The kernel's log buffer often contains kernel addresses and other
49497 + identifying information useful to an attacker in fingerprinting a
49498 + system for a targeted exploit.
49499 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
49500 + created.
49501 +
49502 +config GRKERNSEC_HARDEN_PTRACE
49503 + bool "Deter ptrace-based process snooping"
49504 + help
49505 + If you say Y here, TTY sniffers and other malicious monitoring
49506 + programs implemented through ptrace will be defeated. If you
49507 + have been using the RBAC system, this option has already been
49508 + enabled for several years for all users, with the ability to make
49509 + fine-grained exceptions.
49510 +
49511 + This option only affects the ability of non-root users to ptrace
49512 + processes that are not a descendent of the ptracing process.
49513 + This means that strace ./binary and gdb ./binary will still work,
49514 + but attaching to arbitrary processes will not. If the sysctl
49515 + option is enabled, a sysctl option with name "harden_ptrace" is
49516 + created.
49517 +
49518 +config GRKERNSEC_PTRACE_READEXEC
49519 + bool "Require read access to ptrace sensitive binaries"
49520 + help
49521 + If you say Y here, unprivileged users will not be able to ptrace unreadable
49522 + binaries. This option is useful in environments that
49523 + remove the read bits (e.g. file mode 4711) from suid binaries to
49524 + prevent infoleaking of their contents. This option adds
49525 + consistency to the use of that file mode, as the binary could normally
49526 + be read out when run without privileges while ptracing.
49527 +
49528 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
49529 + is created.
49530 +
49531 +config GRKERNSEC_SETXID
49532 + bool "Enforce consistent multithreaded privileges"
49533 + help
49534 + If you say Y here, a change from a root uid to a non-root uid
49535 + in a multithreaded application will cause the resulting uids,
49536 + gids, supplementary groups, and capabilities in that thread
49537 + to be propagated to the other threads of the process. In most
49538 + cases this is unnecessary, as glibc will emulate this behavior
49539 + on behalf of the application. Other libcs do not act in the
49540 + same way, allowing the other threads of the process to continue
49541 + running with root privileges. If the sysctl option is enabled,
49542 + a sysctl option with name "consistent_setxid" is created.
49543 +
49544 +config GRKERNSEC_TPE
49545 + bool "Trusted Path Execution (TPE)"
49546 + help
49547 + If you say Y here, you will be able to choose a gid to add to the
49548 + supplementary groups of users you want to mark as "untrusted."
49549 + These users will not be able to execute any files that are not in
49550 + root-owned directories writable only by root. If the sysctl option
49551 + is enabled, a sysctl option with name "tpe" is created.
49552 +
49553 +config GRKERNSEC_TPE_ALL
49554 + bool "Partially restrict all non-root users"
49555 + depends on GRKERNSEC_TPE
49556 + help
49557 + If you say Y here, all non-root users will be covered under
49558 + a weaker TPE restriction. This is separate from, and in addition to,
49559 + the main TPE options that you have selected elsewhere. Thus, if a
49560 + "trusted" GID is chosen, this restriction applies to even that GID.
49561 + Under this restriction, all non-root users will only be allowed to
49562 + execute files in directories they own that are not group or
49563 + world-writable, or in directories owned by root and writable only by
49564 + root. If the sysctl option is enabled, a sysctl option with name
49565 + "tpe_restrict_all" is created.
49566 +
49567 +config GRKERNSEC_TPE_INVERT
49568 + bool "Invert GID option"
49569 + depends on GRKERNSEC_TPE
49570 + help
49571 + If you say Y here, the group you specify in the TPE configuration will
49572 + decide what group TPE restrictions will be *disabled* for. This
49573 + option is useful if you want TPE restrictions to be applied to most
49574 + users on the system. If the sysctl option is enabled, a sysctl option
49575 + with name "tpe_invert" is created. Unlike other sysctl options, this
49576 + entry will default to on for backward-compatibility.
49577 +
49578 +config GRKERNSEC_TPE_GID
49579 + int "GID for untrusted users"
49580 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
49581 + default 1005
49582 + help
49583 + Setting this GID determines what group TPE restrictions will be
49584 + *enabled* for. If the sysctl option is enabled, a sysctl option
49585 + with name "tpe_gid" is created.
49586 +
49587 +config GRKERNSEC_TPE_GID
49588 + int "GID for trusted users"
49589 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
49590 + default 1005
49591 + help
49592 + Setting this GID determines what group TPE restrictions will be
49593 + *disabled* for. If the sysctl option is enabled, a sysctl option
49594 + with name "tpe_gid" is created.
49595 +
49596 +endmenu
49597 +menu "Network Protections"
49598 +depends on GRKERNSEC
49599 +
49600 +config GRKERNSEC_RANDNET
49601 + bool "Larger entropy pools"
49602 + help
49603 + If you say Y here, the entropy pools used for many features of Linux
49604 + and grsecurity will be doubled in size. Since several grsecurity
49605 + features use additional randomness, it is recommended that you say Y
49606 + here. Saying Y here has a similar effect as modifying
49607 + /proc/sys/kernel/random/poolsize.
49608 +
49609 +config GRKERNSEC_BLACKHOLE
49610 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
49611 + depends on NET
49612 + help
49613 + If you say Y here, neither TCP resets nor ICMP
49614 + destination-unreachable packets will be sent in response to packets
49615 + sent to ports for which no associated listening process exists.
49616 + This feature supports both IPV4 and IPV6 and exempts the
49617 + loopback interface from blackholing. Enabling this feature
49618 + makes a host more resilient to DoS attacks and reduces network
49619 + visibility against scanners.
49620 +
49621 + The blackhole feature as-implemented is equivalent to the FreeBSD
49622 + blackhole feature, as it prevents RST responses to all packets, not
49623 + just SYNs. Under most application behavior this causes no
49624 + problems, but applications (like haproxy) may not close certain
49625 + connections in a way that cleanly terminates them on the remote
49626 + end, leaving the remote host in LAST_ACK state. Because of this
49627 + side-effect and to prevent intentional LAST_ACK DoSes, this
49628 + feature also adds automatic mitigation against such attacks.
49629 + The mitigation drastically reduces the amount of time a socket
49630 + can spend in LAST_ACK state. If you're using haproxy and not
49631 + all servers it connects to have this option enabled, consider
49632 + disabling this feature on the haproxy host.
49633 +
49634 + If the sysctl option is enabled, two sysctl options with names
49635 + "ip_blackhole" and "lastack_retries" will be created.
49636 + While "ip_blackhole" takes the standard zero/non-zero on/off
49637 + toggle, "lastack_retries" uses the same kinds of values as
49638 + "tcp_retries1" and "tcp_retries2". The default value of 4
49639 + prevents a socket from lasting more than 45 seconds in LAST_ACK
49640 + state.
49641 +
49642 +config GRKERNSEC_SOCKET
49643 + bool "Socket restrictions"
49644 + depends on NET
49645 + help
49646 + If you say Y here, you will be able to choose from several options.
49647 + If you assign a GID on your system and add it to the supplementary
49648 + groups of users you want to restrict socket access to, this patch
49649 + will perform up to three things, based on the option(s) you choose.
49650 +
49651 +config GRKERNSEC_SOCKET_ALL
49652 + bool "Deny any sockets to group"
49653 + depends on GRKERNSEC_SOCKET
49654 + help
49655 + If you say Y here, you will be able to choose a GID of whose users will
49656 + be unable to connect to other hosts from your machine or run server
49657 + applications from your machine. If the sysctl option is enabled, a
49658 + sysctl option with name "socket_all" is created.
49659 +
49660 +config GRKERNSEC_SOCKET_ALL_GID
49661 + int "GID to deny all sockets for"
49662 + depends on GRKERNSEC_SOCKET_ALL
49663 + default 1004
49664 + help
49665 + Here you can choose the GID to disable socket access for. Remember to
49666 + add the users you want socket access disabled for to the GID
49667 + specified here. If the sysctl option is enabled, a sysctl option
49668 + with name "socket_all_gid" is created.
49669 +
49670 +config GRKERNSEC_SOCKET_CLIENT
49671 + bool "Deny client sockets to group"
49672 + depends on GRKERNSEC_SOCKET
49673 + help
49674 + If you say Y here, you will be able to choose a GID of whose users will
49675 + be unable to connect to other hosts from your machine, but will be
49676 + able to run servers. If this option is enabled, all users in the group
49677 + you specify will have to use passive mode when initiating ftp transfers
49678 + from the shell on your machine. If the sysctl option is enabled, a
49679 + sysctl option with name "socket_client" is created.
49680 +
49681 +config GRKERNSEC_SOCKET_CLIENT_GID
49682 + int "GID to deny client sockets for"
49683 + depends on GRKERNSEC_SOCKET_CLIENT
49684 + default 1003
49685 + help
49686 + Here you can choose the GID to disable client socket access for.
49687 + Remember to add the users you want client socket access disabled for to
49688 + the GID specified here. If the sysctl option is enabled, a sysctl
49689 + option with name "socket_client_gid" is created.
49690 +
49691 +config GRKERNSEC_SOCKET_SERVER
49692 + bool "Deny server sockets to group"
49693 + depends on GRKERNSEC_SOCKET
49694 + help
49695 + If you say Y here, you will be able to choose a GID of whose users will
49696 + be unable to run server applications from your machine. If the sysctl
49697 + option is enabled, a sysctl option with name "socket_server" is created.
49698 +
49699 +config GRKERNSEC_SOCKET_SERVER_GID
49700 + int "GID to deny server sockets for"
49701 + depends on GRKERNSEC_SOCKET_SERVER
49702 + default 1002
49703 + help
49704 + Here you can choose the GID to disable server socket access for.
49705 + Remember to add the users you want server socket access disabled for to
49706 + the GID specified here. If the sysctl option is enabled, a sysctl
49707 + option with name "socket_server_gid" is created.
49708 +
49709 +endmenu
49710 +menu "Sysctl support"
49711 +depends on GRKERNSEC && SYSCTL
49712 +
49713 +config GRKERNSEC_SYSCTL
49714 + bool "Sysctl support"
49715 + help
49716 + If you say Y here, you will be able to change the options that
49717 + grsecurity runs with at bootup, without having to recompile your
49718 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
49719 + to enable (1) or disable (0) various features. All the sysctl entries
49720 + are mutable until the "grsec_lock" entry is set to a non-zero value.
49721 + All features enabled in the kernel configuration are disabled at boot
49722 + if you do not say Y to the "Turn on features by default" option.
49723 + All options should be set at startup, and the grsec_lock entry should
49724 + be set to a non-zero value after all the options are set.
49725 + *THIS IS EXTREMELY IMPORTANT*
49726 +
49727 +config GRKERNSEC_SYSCTL_DISTRO
49728 + bool "Extra sysctl support for distro makers (READ HELP)"
49729 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
49730 + help
49731 + If you say Y here, additional sysctl options will be created
49732 + for features that affect processes running as root. Therefore,
49733 + it is critical when using this option that the grsec_lock entry be
49734 + enabled after boot. Only distros with prebuilt kernel packages
49735 + with this option enabled that can ensure grsec_lock is enabled
49736 + after boot should use this option.
49737 + *Failure to set grsec_lock after boot makes all grsec features
49738 + this option covers useless*
49739 +
49740 + Currently this option creates the following sysctl entries:
49741 + "Disable Privileged I/O": "disable_priv_io"
49742 +
49743 +config GRKERNSEC_SYSCTL_ON
49744 + bool "Turn on features by default"
49745 + depends on GRKERNSEC_SYSCTL
49746 + help
49747 + If you say Y here, instead of having all features enabled in the
49748 + kernel configuration disabled at boot time, the features will be
49749 + enabled at boot time. It is recommended you say Y here unless
49750 + there is some reason you would want all sysctl-tunable features to
49751 + be disabled by default. As mentioned elsewhere, it is important
49752 + to enable the grsec_lock entry once you have finished modifying
49753 + the sysctl entries.
49754 +
49755 +endmenu
49756 +menu "Logging Options"
49757 +depends on GRKERNSEC
49758 +
49759 +config GRKERNSEC_FLOODTIME
49760 + int "Seconds in between log messages (minimum)"
49761 + default 10
49762 + help
49763 + This option allows you to enforce the number of seconds between
49764 + grsecurity log messages. The default should be suitable for most
49765 + people, however, if you choose to change it, choose a value small enough
49766 + to allow informative logs to be produced, but large enough to
49767 + prevent flooding.
49768 +
49769 +config GRKERNSEC_FLOODBURST
49770 + int "Number of messages in a burst (maximum)"
49771 + default 6
49772 + help
49773 + This option allows you to choose the maximum number of messages allowed
49774 + within the flood time interval you chose in a separate option. The
49775 + default should be suitable for most people, however if you find that
49776 + many of your logs are being interpreted as flooding, you may want to
49777 + raise this value.
49778 +
49779 +endmenu
49780 +
49781 +endmenu
49782 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
49783 new file mode 100644
49784 index 0000000..1b9afa9
49785 --- /dev/null
49786 +++ b/grsecurity/Makefile
49787 @@ -0,0 +1,38 @@
49788 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
49789 +# during 2001-2009 it has been completely redesigned by Brad Spengler
49790 +# into an RBAC system
49791 +#
49792 +# All code in this directory and various hooks inserted throughout the kernel
49793 +# are copyright Brad Spengler - Open Source Security, Inc., and released
49794 +# under the GPL v2 or higher
49795 +
49796 +KBUILD_CFLAGS += -Werror
49797 +
49798 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
49799 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
49800 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
49801 +
49802 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
49803 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
49804 + gracl_learn.o grsec_log.o
49805 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
49806 +
49807 +ifdef CONFIG_NET
49808 +obj-y += grsec_sock.o
49809 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
49810 +endif
49811 +
49812 +ifndef CONFIG_GRKERNSEC
49813 +obj-y += grsec_disabled.o
49814 +endif
49815 +
49816 +ifdef CONFIG_GRKERNSEC_HIDESYM
49817 +extra-y := grsec_hidesym.o
49818 +$(obj)/grsec_hidesym.o:
49819 + @-chmod -f 500 /boot
49820 + @-chmod -f 500 /lib/modules
49821 + @-chmod -f 500 /lib64/modules
49822 + @-chmod -f 500 /lib32/modules
49823 + @-chmod -f 700 .
49824 + @echo ' grsec: protected kernel image paths'
49825 +endif
49826 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
49827 new file mode 100644
49828 index 0000000..2733872
49829 --- /dev/null
49830 +++ b/grsecurity/gracl.c
49831 @@ -0,0 +1,4163 @@
49832 +#include <linux/kernel.h>
49833 +#include <linux/module.h>
49834 +#include <linux/sched.h>
49835 +#include <linux/mm.h>
49836 +#include <linux/file.h>
49837 +#include <linux/fs.h>
49838 +#include <linux/namei.h>
49839 +#include <linux/mount.h>
49840 +#include <linux/tty.h>
49841 +#include <linux/proc_fs.h>
49842 +#include <linux/lglock.h>
49843 +#include <linux/slab.h>
49844 +#include <linux/vmalloc.h>
49845 +#include <linux/types.h>
49846 +#include <linux/sysctl.h>
49847 +#include <linux/netdevice.h>
49848 +#include <linux/ptrace.h>
49849 +#include <linux/gracl.h>
49850 +#include <linux/gralloc.h>
49851 +#include <linux/security.h>
49852 +#include <linux/grinternal.h>
49853 +#include <linux/pid_namespace.h>
49854 +#include <linux/fdtable.h>
49855 +#include <linux/percpu.h>
49856 +
49857 +#include <asm/uaccess.h>
49858 +#include <asm/errno.h>
49859 +#include <asm/mman.h>
49860 +
49861 +static struct acl_role_db acl_role_set;
49862 +static struct name_db name_set;
49863 +static struct inodev_db inodev_set;
49864 +
49865 +/* for keeping track of userspace pointers used for subjects, so we
49866 + can share references in the kernel as well
49867 +*/
49868 +
49869 +static struct path real_root;
49870 +
49871 +static struct acl_subj_map_db subj_map_set;
49872 +
49873 +static struct acl_role_label *default_role;
49874 +
49875 +static struct acl_role_label *role_list;
49876 +
49877 +static u16 acl_sp_role_value;
49878 +
49879 +extern char *gr_shared_page[4];
49880 +static DEFINE_MUTEX(gr_dev_mutex);
49881 +DEFINE_RWLOCK(gr_inode_lock);
49882 +
49883 +struct gr_arg *gr_usermode;
49884 +
49885 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
49886 +
49887 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
49888 +extern void gr_clear_learn_entries(void);
49889 +
49890 +#ifdef CONFIG_GRKERNSEC_RESLOG
49891 +extern void gr_log_resource(const struct task_struct *task,
49892 + const int res, const unsigned long wanted, const int gt);
49893 +#endif
49894 +
49895 +unsigned char *gr_system_salt;
49896 +unsigned char *gr_system_sum;
49897 +
49898 +static struct sprole_pw **acl_special_roles = NULL;
49899 +static __u16 num_sprole_pws = 0;
49900 +
49901 +static struct acl_role_label *kernel_role = NULL;
49902 +
49903 +static unsigned int gr_auth_attempts = 0;
49904 +static unsigned long gr_auth_expires = 0UL;
49905 +
49906 +#ifdef CONFIG_NET
49907 +extern struct vfsmount *sock_mnt;
49908 +#endif
49909 +
49910 +extern struct vfsmount *pipe_mnt;
49911 +extern struct vfsmount *shm_mnt;
49912 +#ifdef CONFIG_HUGETLBFS
49913 +extern struct vfsmount *hugetlbfs_vfsmount;
49914 +#endif
49915 +
49916 +static struct acl_object_label *fakefs_obj_rw;
49917 +static struct acl_object_label *fakefs_obj_rwx;
49918 +
49919 +extern int gr_init_uidset(void);
49920 +extern void gr_free_uidset(void);
49921 +extern void gr_remove_uid(uid_t uid);
49922 +extern int gr_find_uid(uid_t uid);
49923 +
49924 +DECLARE_BRLOCK(vfsmount_lock);
49925 +
49926 +__inline__ int
49927 +gr_acl_is_enabled(void)
49928 +{
49929 + return (gr_status & GR_READY);
49930 +}
49931 +
49932 +#ifdef CONFIG_BTRFS_FS
49933 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
49934 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
49935 +#endif
49936 +
49937 +static inline dev_t __get_dev(const struct dentry *dentry)
49938 +{
49939 +#ifdef CONFIG_BTRFS_FS
49940 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
49941 + return get_btrfs_dev_from_inode(dentry->d_inode);
49942 + else
49943 +#endif
49944 + return dentry->d_inode->i_sb->s_dev;
49945 +}
49946 +
49947 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
49948 +{
49949 + return __get_dev(dentry);
49950 +}
49951 +
49952 +static char gr_task_roletype_to_char(struct task_struct *task)
49953 +{
49954 + switch (task->role->roletype &
49955 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
49956 + GR_ROLE_SPECIAL)) {
49957 + case GR_ROLE_DEFAULT:
49958 + return 'D';
49959 + case GR_ROLE_USER:
49960 + return 'U';
49961 + case GR_ROLE_GROUP:
49962 + return 'G';
49963 + case GR_ROLE_SPECIAL:
49964 + return 'S';
49965 + }
49966 +
49967 + return 'X';
49968 +}
49969 +
49970 +char gr_roletype_to_char(void)
49971 +{
49972 + return gr_task_roletype_to_char(current);
49973 +}
49974 +
49975 +__inline__ int
49976 +gr_acl_tpe_check(void)
49977 +{
49978 + if (unlikely(!(gr_status & GR_READY)))
49979 + return 0;
49980 + if (current->role->roletype & GR_ROLE_TPE)
49981 + return 1;
49982 + else
49983 + return 0;
49984 +}
49985 +
49986 +int
49987 +gr_handle_rawio(const struct inode *inode)
49988 +{
49989 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49990 + if (inode && S_ISBLK(inode->i_mode) &&
49991 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
49992 + !capable(CAP_SYS_RAWIO))
49993 + return 1;
49994 +#endif
49995 + return 0;
49996 +}
49997 +
49998 +static int
49999 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
50000 +{
50001 + if (likely(lena != lenb))
50002 + return 0;
50003 +
50004 + return !memcmp(a, b, lena);
50005 +}
50006 +
50007 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
50008 +{
50009 + *buflen -= namelen;
50010 + if (*buflen < 0)
50011 + return -ENAMETOOLONG;
50012 + *buffer -= namelen;
50013 + memcpy(*buffer, str, namelen);
50014 + return 0;
50015 +}
50016 +
50017 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
50018 +{
50019 + return prepend(buffer, buflen, name->name, name->len);
50020 +}
50021 +
50022 +static int prepend_path(const struct path *path, struct path *root,
50023 + char **buffer, int *buflen)
50024 +{
50025 + struct dentry *dentry = path->dentry;
50026 + struct vfsmount *vfsmnt = path->mnt;
50027 + bool slash = false;
50028 + int error = 0;
50029 +
50030 + while (dentry != root->dentry || vfsmnt != root->mnt) {
50031 + struct dentry * parent;
50032 +
50033 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
50034 + /* Global root? */
50035 + if (vfsmnt->mnt_parent == vfsmnt) {
50036 + goto out;
50037 + }
50038 + dentry = vfsmnt->mnt_mountpoint;
50039 + vfsmnt = vfsmnt->mnt_parent;
50040 + continue;
50041 + }
50042 + parent = dentry->d_parent;
50043 + prefetch(parent);
50044 + spin_lock(&dentry->d_lock);
50045 + error = prepend_name(buffer, buflen, &dentry->d_name);
50046 + spin_unlock(&dentry->d_lock);
50047 + if (!error)
50048 + error = prepend(buffer, buflen, "/", 1);
50049 + if (error)
50050 + break;
50051 +
50052 + slash = true;
50053 + dentry = parent;
50054 + }
50055 +
50056 +out:
50057 + if (!error && !slash)
50058 + error = prepend(buffer, buflen, "/", 1);
50059 +
50060 + return error;
50061 +}
50062 +
50063 +/* this must be called with vfsmount_lock and rename_lock held */
50064 +
50065 +static char *__our_d_path(const struct path *path, struct path *root,
50066 + char *buf, int buflen)
50067 +{
50068 + char *res = buf + buflen;
50069 + int error;
50070 +
50071 + prepend(&res, &buflen, "\0", 1);
50072 + error = prepend_path(path, root, &res, &buflen);
50073 + if (error)
50074 + return ERR_PTR(error);
50075 +
50076 + return res;
50077 +}
50078 +
50079 +static char *
50080 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
50081 +{
50082 + char *retval;
50083 +
50084 + retval = __our_d_path(path, root, buf, buflen);
50085 + if (unlikely(IS_ERR(retval)))
50086 + retval = strcpy(buf, "<path too long>");
50087 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
50088 + retval[1] = '\0';
50089 +
50090 + return retval;
50091 +}
50092 +
50093 +static char *
50094 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50095 + char *buf, int buflen)
50096 +{
50097 + struct path path;
50098 + char *res;
50099 +
50100 + path.dentry = (struct dentry *)dentry;
50101 + path.mnt = (struct vfsmount *)vfsmnt;
50102 +
50103 + /* we can use real_root.dentry, real_root.mnt, because this is only called
50104 + by the RBAC system */
50105 + res = gen_full_path(&path, &real_root, buf, buflen);
50106 +
50107 + return res;
50108 +}
50109 +
50110 +static char *
50111 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50112 + char *buf, int buflen)
50113 +{
50114 + char *res;
50115 + struct path path;
50116 + struct path root;
50117 + struct task_struct *reaper = &init_task;
50118 +
50119 + path.dentry = (struct dentry *)dentry;
50120 + path.mnt = (struct vfsmount *)vfsmnt;
50121 +
50122 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
50123 + get_fs_root(reaper->fs, &root);
50124 +
50125 + write_seqlock(&rename_lock);
50126 + br_read_lock(vfsmount_lock);
50127 + res = gen_full_path(&path, &root, buf, buflen);
50128 + br_read_unlock(vfsmount_lock);
50129 + write_sequnlock(&rename_lock);
50130 +
50131 + path_put(&root);
50132 + return res;
50133 +}
50134 +
50135 +static char *
50136 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50137 +{
50138 + char *ret;
50139 + write_seqlock(&rename_lock);
50140 + br_read_lock(vfsmount_lock);
50141 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50142 + PAGE_SIZE);
50143 + br_read_unlock(vfsmount_lock);
50144 + write_sequnlock(&rename_lock);
50145 + return ret;
50146 +}
50147 +
50148 +static char *
50149 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50150 +{
50151 + char *ret;
50152 + char *buf;
50153 + int buflen;
50154 +
50155 + write_seqlock(&rename_lock);
50156 + br_read_lock(vfsmount_lock);
50157 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50158 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
50159 + buflen = (int)(ret - buf);
50160 + if (buflen >= 5)
50161 + prepend(&ret, &buflen, "/proc", 5);
50162 + else
50163 + ret = strcpy(buf, "<path too long>");
50164 + br_read_unlock(vfsmount_lock);
50165 + write_sequnlock(&rename_lock);
50166 + return ret;
50167 +}
50168 +
50169 +char *
50170 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
50171 +{
50172 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50173 + PAGE_SIZE);
50174 +}
50175 +
50176 +char *
50177 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
50178 +{
50179 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
50180 + PAGE_SIZE);
50181 +}
50182 +
50183 +char *
50184 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
50185 +{
50186 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
50187 + PAGE_SIZE);
50188 +}
50189 +
50190 +char *
50191 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
50192 +{
50193 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
50194 + PAGE_SIZE);
50195 +}
50196 +
50197 +char *
50198 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
50199 +{
50200 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
50201 + PAGE_SIZE);
50202 +}
50203 +
50204 +__inline__ __u32
50205 +to_gr_audit(const __u32 reqmode)
50206 +{
50207 + /* masks off auditable permission flags, then shifts them to create
50208 + auditing flags, and adds the special case of append auditing if
50209 + we're requesting write */
50210 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
50211 +}
50212 +
50213 +struct acl_subject_label *
50214 +lookup_subject_map(const struct acl_subject_label *userp)
50215 +{
50216 + unsigned int index = shash(userp, subj_map_set.s_size);
50217 + struct subject_map *match;
50218 +
50219 + match = subj_map_set.s_hash[index];
50220 +
50221 + while (match && match->user != userp)
50222 + match = match->next;
50223 +
50224 + if (match != NULL)
50225 + return match->kernel;
50226 + else
50227 + return NULL;
50228 +}
50229 +
50230 +static void
50231 +insert_subj_map_entry(struct subject_map *subjmap)
50232 +{
50233 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
50234 + struct subject_map **curr;
50235 +
50236 + subjmap->prev = NULL;
50237 +
50238 + curr = &subj_map_set.s_hash[index];
50239 + if (*curr != NULL)
50240 + (*curr)->prev = subjmap;
50241 +
50242 + subjmap->next = *curr;
50243 + *curr = subjmap;
50244 +
50245 + return;
50246 +}
50247 +
50248 +static struct acl_role_label *
50249 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
50250 + const gid_t gid)
50251 +{
50252 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
50253 + struct acl_role_label *match;
50254 + struct role_allowed_ip *ipp;
50255 + unsigned int x;
50256 + u32 curr_ip = task->signal->curr_ip;
50257 +
50258 + task->signal->saved_ip = curr_ip;
50259 +
50260 + match = acl_role_set.r_hash[index];
50261 +
50262 + while (match) {
50263 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
50264 + for (x = 0; x < match->domain_child_num; x++) {
50265 + if (match->domain_children[x] == uid)
50266 + goto found;
50267 + }
50268 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
50269 + break;
50270 + match = match->next;
50271 + }
50272 +found:
50273 + if (match == NULL) {
50274 + try_group:
50275 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
50276 + match = acl_role_set.r_hash[index];
50277 +
50278 + while (match) {
50279 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
50280 + for (x = 0; x < match->domain_child_num; x++) {
50281 + if (match->domain_children[x] == gid)
50282 + goto found2;
50283 + }
50284 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
50285 + break;
50286 + match = match->next;
50287 + }
50288 +found2:
50289 + if (match == NULL)
50290 + match = default_role;
50291 + if (match->allowed_ips == NULL)
50292 + return match;
50293 + else {
50294 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
50295 + if (likely
50296 + ((ntohl(curr_ip) & ipp->netmask) ==
50297 + (ntohl(ipp->addr) & ipp->netmask)))
50298 + return match;
50299 + }
50300 + match = default_role;
50301 + }
50302 + } else if (match->allowed_ips == NULL) {
50303 + return match;
50304 + } else {
50305 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
50306 + if (likely
50307 + ((ntohl(curr_ip) & ipp->netmask) ==
50308 + (ntohl(ipp->addr) & ipp->netmask)))
50309 + return match;
50310 + }
50311 + goto try_group;
50312 + }
50313 +
50314 + return match;
50315 +}
50316 +
50317 +struct acl_subject_label *
50318 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
50319 + const struct acl_role_label *role)
50320 +{
50321 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
50322 + struct acl_subject_label *match;
50323 +
50324 + match = role->subj_hash[index];
50325 +
50326 + while (match && (match->inode != ino || match->device != dev ||
50327 + (match->mode & GR_DELETED))) {
50328 + match = match->next;
50329 + }
50330 +
50331 + if (match && !(match->mode & GR_DELETED))
50332 + return match;
50333 + else
50334 + return NULL;
50335 +}
50336 +
50337 +struct acl_subject_label *
50338 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
50339 + const struct acl_role_label *role)
50340 +{
50341 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
50342 + struct acl_subject_label *match;
50343 +
50344 + match = role->subj_hash[index];
50345 +
50346 + while (match && (match->inode != ino || match->device != dev ||
50347 + !(match->mode & GR_DELETED))) {
50348 + match = match->next;
50349 + }
50350 +
50351 + if (match && (match->mode & GR_DELETED))
50352 + return match;
50353 + else
50354 + return NULL;
50355 +}
50356 +
50357 +static struct acl_object_label *
50358 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
50359 + const struct acl_subject_label *subj)
50360 +{
50361 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
50362 + struct acl_object_label *match;
50363 +
50364 + match = subj->obj_hash[index];
50365 +
50366 + while (match && (match->inode != ino || match->device != dev ||
50367 + (match->mode & GR_DELETED))) {
50368 + match = match->next;
50369 + }
50370 +
50371 + if (match && !(match->mode & GR_DELETED))
50372 + return match;
50373 + else
50374 + return NULL;
50375 +}
50376 +
50377 +static struct acl_object_label *
50378 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
50379 + const struct acl_subject_label *subj)
50380 +{
50381 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
50382 + struct acl_object_label *match;
50383 +
50384 + match = subj->obj_hash[index];
50385 +
50386 + while (match && (match->inode != ino || match->device != dev ||
50387 + !(match->mode & GR_DELETED))) {
50388 + match = match->next;
50389 + }
50390 +
50391 + if (match && (match->mode & GR_DELETED))
50392 + return match;
50393 +
50394 + match = subj->obj_hash[index];
50395 +
50396 + while (match && (match->inode != ino || match->device != dev ||
50397 + (match->mode & GR_DELETED))) {
50398 + match = match->next;
50399 + }
50400 +
50401 + if (match && !(match->mode & GR_DELETED))
50402 + return match;
50403 + else
50404 + return NULL;
50405 +}
50406 +
50407 +static struct name_entry *
50408 +lookup_name_entry(const char *name)
50409 +{
50410 + unsigned int len = strlen(name);
50411 + unsigned int key = full_name_hash(name, len);
50412 + unsigned int index = key % name_set.n_size;
50413 + struct name_entry *match;
50414 +
50415 + match = name_set.n_hash[index];
50416 +
50417 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
50418 + match = match->next;
50419 +
50420 + return match;
50421 +}
50422 +
50423 +static struct name_entry *
50424 +lookup_name_entry_create(const char *name)
50425 +{
50426 + unsigned int len = strlen(name);
50427 + unsigned int key = full_name_hash(name, len);
50428 + unsigned int index = key % name_set.n_size;
50429 + struct name_entry *match;
50430 +
50431 + match = name_set.n_hash[index];
50432 +
50433 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
50434 + !match->deleted))
50435 + match = match->next;
50436 +
50437 + if (match && match->deleted)
50438 + return match;
50439 +
50440 + match = name_set.n_hash[index];
50441 +
50442 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
50443 + match->deleted))
50444 + match = match->next;
50445 +
50446 + if (match && !match->deleted)
50447 + return match;
50448 + else
50449 + return NULL;
50450 +}
50451 +
50452 +static struct inodev_entry *
50453 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
50454 +{
50455 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
50456 + struct inodev_entry *match;
50457 +
50458 + match = inodev_set.i_hash[index];
50459 +
50460 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
50461 + match = match->next;
50462 +
50463 + return match;
50464 +}
50465 +
50466 +static void
50467 +insert_inodev_entry(struct inodev_entry *entry)
50468 +{
50469 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
50470 + inodev_set.i_size);
50471 + struct inodev_entry **curr;
50472 +
50473 + entry->prev = NULL;
50474 +
50475 + curr = &inodev_set.i_hash[index];
50476 + if (*curr != NULL)
50477 + (*curr)->prev = entry;
50478 +
50479 + entry->next = *curr;
50480 + *curr = entry;
50481 +
50482 + return;
50483 +}
50484 +
50485 +static void
50486 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
50487 +{
50488 + unsigned int index =
50489 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
50490 + struct acl_role_label **curr;
50491 + struct acl_role_label *tmp;
50492 +
50493 + curr = &acl_role_set.r_hash[index];
50494 +
50495 + /* if role was already inserted due to domains and already has
50496 + a role in the same bucket as it attached, then we need to
50497 + combine these two buckets
50498 + */
50499 + if (role->next) {
50500 + tmp = role->next;
50501 + while (tmp->next)
50502 + tmp = tmp->next;
50503 + tmp->next = *curr;
50504 + } else
50505 + role->next = *curr;
50506 + *curr = role;
50507 +
50508 + return;
50509 +}
50510 +
50511 +static void
50512 +insert_acl_role_label(struct acl_role_label *role)
50513 +{
50514 + int i;
50515 +
50516 + if (role_list == NULL) {
50517 + role_list = role;
50518 + role->prev = NULL;
50519 + } else {
50520 + role->prev = role_list;
50521 + role_list = role;
50522 + }
50523 +
50524 + /* used for hash chains */
50525 + role->next = NULL;
50526 +
50527 + if (role->roletype & GR_ROLE_DOMAIN) {
50528 + for (i = 0; i < role->domain_child_num; i++)
50529 + __insert_acl_role_label(role, role->domain_children[i]);
50530 + } else
50531 + __insert_acl_role_label(role, role->uidgid);
50532 +}
50533 +
50534 +static int
50535 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
50536 +{
50537 + struct name_entry **curr, *nentry;
50538 + struct inodev_entry *ientry;
50539 + unsigned int len = strlen(name);
50540 + unsigned int key = full_name_hash(name, len);
50541 + unsigned int index = key % name_set.n_size;
50542 +
50543 + curr = &name_set.n_hash[index];
50544 +
50545 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
50546 + curr = &((*curr)->next);
50547 +
50548 + if (*curr != NULL)
50549 + return 1;
50550 +
50551 + nentry = acl_alloc(sizeof (struct name_entry));
50552 + if (nentry == NULL)
50553 + return 0;
50554 + ientry = acl_alloc(sizeof (struct inodev_entry));
50555 + if (ientry == NULL)
50556 + return 0;
50557 + ientry->nentry = nentry;
50558 +
50559 + nentry->key = key;
50560 + nentry->name = name;
50561 + nentry->inode = inode;
50562 + nentry->device = device;
50563 + nentry->len = len;
50564 + nentry->deleted = deleted;
50565 +
50566 + nentry->prev = NULL;
50567 + curr = &name_set.n_hash[index];
50568 + if (*curr != NULL)
50569 + (*curr)->prev = nentry;
50570 + nentry->next = *curr;
50571 + *curr = nentry;
50572 +
50573 + /* insert us into the table searchable by inode/dev */
50574 + insert_inodev_entry(ientry);
50575 +
50576 + return 1;
50577 +}
50578 +
50579 +static void
50580 +insert_acl_obj_label(struct acl_object_label *obj,
50581 + struct acl_subject_label *subj)
50582 +{
50583 + unsigned int index =
50584 + fhash(obj->inode, obj->device, subj->obj_hash_size);
50585 + struct acl_object_label **curr;
50586 +
50587 +
50588 + obj->prev = NULL;
50589 +
50590 + curr = &subj->obj_hash[index];
50591 + if (*curr != NULL)
50592 + (*curr)->prev = obj;
50593 +
50594 + obj->next = *curr;
50595 + *curr = obj;
50596 +
50597 + return;
50598 +}
50599 +
50600 +static void
50601 +insert_acl_subj_label(struct acl_subject_label *obj,
50602 + struct acl_role_label *role)
50603 +{
50604 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
50605 + struct acl_subject_label **curr;
50606 +
50607 + obj->prev = NULL;
50608 +
50609 + curr = &role->subj_hash[index];
50610 + if (*curr != NULL)
50611 + (*curr)->prev = obj;
50612 +
50613 + obj->next = *curr;
50614 + *curr = obj;
50615 +
50616 + return;
50617 +}
50618 +
50619 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
50620 +
50621 +static void *
50622 +create_table(__u32 * len, int elementsize)
50623 +{
50624 + unsigned int table_sizes[] = {
50625 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
50626 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
50627 + 4194301, 8388593, 16777213, 33554393, 67108859
50628 + };
50629 + void *newtable = NULL;
50630 + unsigned int pwr = 0;
50631 +
50632 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
50633 + table_sizes[pwr] <= *len)
50634 + pwr++;
50635 +
50636 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
50637 + return newtable;
50638 +
50639 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
50640 + newtable =
50641 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
50642 + else
50643 + newtable = vmalloc(table_sizes[pwr] * elementsize);
50644 +
50645 + *len = table_sizes[pwr];
50646 +
50647 + return newtable;
50648 +}
50649 +
50650 +static int
50651 +init_variables(const struct gr_arg *arg)
50652 +{
50653 + struct task_struct *reaper = &init_task;
50654 + unsigned int stacksize;
50655 +
50656 + subj_map_set.s_size = arg->role_db.num_subjects;
50657 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
50658 + name_set.n_size = arg->role_db.num_objects;
50659 + inodev_set.i_size = arg->role_db.num_objects;
50660 +
50661 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
50662 + !name_set.n_size || !inodev_set.i_size)
50663 + return 1;
50664 +
50665 + if (!gr_init_uidset())
50666 + return 1;
50667 +
50668 + /* set up the stack that holds allocation info */
50669 +
50670 + stacksize = arg->role_db.num_pointers + 5;
50671 +
50672 + if (!acl_alloc_stack_init(stacksize))
50673 + return 1;
50674 +
50675 + /* grab reference for the real root dentry and vfsmount */
50676 + get_fs_root(reaper->fs, &real_root);
50677 +
50678 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50679 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
50680 +#endif
50681 +
50682 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
50683 + if (fakefs_obj_rw == NULL)
50684 + return 1;
50685 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
50686 +
50687 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
50688 + if (fakefs_obj_rwx == NULL)
50689 + return 1;
50690 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
50691 +
50692 + subj_map_set.s_hash =
50693 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
50694 + acl_role_set.r_hash =
50695 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
50696 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
50697 + inodev_set.i_hash =
50698 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
50699 +
50700 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
50701 + !name_set.n_hash || !inodev_set.i_hash)
50702 + return 1;
50703 +
50704 + memset(subj_map_set.s_hash, 0,
50705 + sizeof(struct subject_map *) * subj_map_set.s_size);
50706 + memset(acl_role_set.r_hash, 0,
50707 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
50708 + memset(name_set.n_hash, 0,
50709 + sizeof (struct name_entry *) * name_set.n_size);
50710 + memset(inodev_set.i_hash, 0,
50711 + sizeof (struct inodev_entry *) * inodev_set.i_size);
50712 +
50713 + return 0;
50714 +}
50715 +
50716 +/* free information not needed after startup
50717 + currently contains user->kernel pointer mappings for subjects
50718 +*/
50719 +
50720 +static void
50721 +free_init_variables(void)
50722 +{
50723 + __u32 i;
50724 +
50725 + if (subj_map_set.s_hash) {
50726 + for (i = 0; i < subj_map_set.s_size; i++) {
50727 + if (subj_map_set.s_hash[i]) {
50728 + kfree(subj_map_set.s_hash[i]);
50729 + subj_map_set.s_hash[i] = NULL;
50730 + }
50731 + }
50732 +
50733 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
50734 + PAGE_SIZE)
50735 + kfree(subj_map_set.s_hash);
50736 + else
50737 + vfree(subj_map_set.s_hash);
50738 + }
50739 +
50740 + return;
50741 +}
50742 +
50743 +static void
50744 +free_variables(void)
50745 +{
50746 + struct acl_subject_label *s;
50747 + struct acl_role_label *r;
50748 + struct task_struct *task, *task2;
50749 + unsigned int x;
50750 +
50751 + gr_clear_learn_entries();
50752 +
50753 + read_lock(&tasklist_lock);
50754 + do_each_thread(task2, task) {
50755 + task->acl_sp_role = 0;
50756 + task->acl_role_id = 0;
50757 + task->acl = NULL;
50758 + task->role = NULL;
50759 + } while_each_thread(task2, task);
50760 + read_unlock(&tasklist_lock);
50761 +
50762 + /* release the reference to the real root dentry and vfsmount */
50763 + path_put(&real_root);
50764 +
50765 + /* free all object hash tables */
50766 +
50767 + FOR_EACH_ROLE_START(r)
50768 + if (r->subj_hash == NULL)
50769 + goto next_role;
50770 + FOR_EACH_SUBJECT_START(r, s, x)
50771 + if (s->obj_hash == NULL)
50772 + break;
50773 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
50774 + kfree(s->obj_hash);
50775 + else
50776 + vfree(s->obj_hash);
50777 + FOR_EACH_SUBJECT_END(s, x)
50778 + FOR_EACH_NESTED_SUBJECT_START(r, s)
50779 + if (s->obj_hash == NULL)
50780 + break;
50781 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
50782 + kfree(s->obj_hash);
50783 + else
50784 + vfree(s->obj_hash);
50785 + FOR_EACH_NESTED_SUBJECT_END(s)
50786 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
50787 + kfree(r->subj_hash);
50788 + else
50789 + vfree(r->subj_hash);
50790 + r->subj_hash = NULL;
50791 +next_role:
50792 + FOR_EACH_ROLE_END(r)
50793 +
50794 + acl_free_all();
50795 +
50796 + if (acl_role_set.r_hash) {
50797 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
50798 + PAGE_SIZE)
50799 + kfree(acl_role_set.r_hash);
50800 + else
50801 + vfree(acl_role_set.r_hash);
50802 + }
50803 + if (name_set.n_hash) {
50804 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
50805 + PAGE_SIZE)
50806 + kfree(name_set.n_hash);
50807 + else
50808 + vfree(name_set.n_hash);
50809 + }
50810 +
50811 + if (inodev_set.i_hash) {
50812 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
50813 + PAGE_SIZE)
50814 + kfree(inodev_set.i_hash);
50815 + else
50816 + vfree(inodev_set.i_hash);
50817 + }
50818 +
50819 + gr_free_uidset();
50820 +
50821 + memset(&name_set, 0, sizeof (struct name_db));
50822 + memset(&inodev_set, 0, sizeof (struct inodev_db));
50823 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
50824 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
50825 +
50826 + default_role = NULL;
50827 + role_list = NULL;
50828 +
50829 + return;
50830 +}
50831 +
50832 +static __u32
50833 +count_user_objs(struct acl_object_label *userp)
50834 +{
50835 + struct acl_object_label o_tmp;
50836 + __u32 num = 0;
50837 +
50838 + while (userp) {
50839 + if (copy_from_user(&o_tmp, userp,
50840 + sizeof (struct acl_object_label)))
50841 + break;
50842 +
50843 + userp = o_tmp.prev;
50844 + num++;
50845 + }
50846 +
50847 + return num;
50848 +}
50849 +
50850 +static struct acl_subject_label *
50851 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
50852 +
50853 +static int
50854 +copy_user_glob(struct acl_object_label *obj)
50855 +{
50856 + struct acl_object_label *g_tmp, **guser;
50857 + unsigned int len;
50858 + char *tmp;
50859 +
50860 + if (obj->globbed == NULL)
50861 + return 0;
50862 +
50863 + guser = &obj->globbed;
50864 + while (*guser) {
50865 + g_tmp = (struct acl_object_label *)
50866 + acl_alloc(sizeof (struct acl_object_label));
50867 + if (g_tmp == NULL)
50868 + return -ENOMEM;
50869 +
50870 + if (copy_from_user(g_tmp, *guser,
50871 + sizeof (struct acl_object_label)))
50872 + return -EFAULT;
50873 +
50874 + len = strnlen_user(g_tmp->filename, PATH_MAX);
50875 +
50876 + if (!len || len >= PATH_MAX)
50877 + return -EINVAL;
50878 +
50879 + if ((tmp = (char *) acl_alloc(len)) == NULL)
50880 + return -ENOMEM;
50881 +
50882 + if (copy_from_user(tmp, g_tmp->filename, len))
50883 + return -EFAULT;
50884 + tmp[len-1] = '\0';
50885 + g_tmp->filename = tmp;
50886 +
50887 + *guser = g_tmp;
50888 + guser = &(g_tmp->next);
50889 + }
50890 +
50891 + return 0;
50892 +}
50893 +
50894 +static int
50895 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
50896 + struct acl_role_label *role)
50897 +{
50898 + struct acl_object_label *o_tmp;
50899 + unsigned int len;
50900 + int ret;
50901 + char *tmp;
50902 +
50903 + while (userp) {
50904 + if ((o_tmp = (struct acl_object_label *)
50905 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
50906 + return -ENOMEM;
50907 +
50908 + if (copy_from_user(o_tmp, userp,
50909 + sizeof (struct acl_object_label)))
50910 + return -EFAULT;
50911 +
50912 + userp = o_tmp->prev;
50913 +
50914 + len = strnlen_user(o_tmp->filename, PATH_MAX);
50915 +
50916 + if (!len || len >= PATH_MAX)
50917 + return -EINVAL;
50918 +
50919 + if ((tmp = (char *) acl_alloc(len)) == NULL)
50920 + return -ENOMEM;
50921 +
50922 + if (copy_from_user(tmp, o_tmp->filename, len))
50923 + return -EFAULT;
50924 + tmp[len-1] = '\0';
50925 + o_tmp->filename = tmp;
50926 +
50927 + insert_acl_obj_label(o_tmp, subj);
50928 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
50929 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
50930 + return -ENOMEM;
50931 +
50932 + ret = copy_user_glob(o_tmp);
50933 + if (ret)
50934 + return ret;
50935 +
50936 + if (o_tmp->nested) {
50937 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
50938 + if (IS_ERR(o_tmp->nested))
50939 + return PTR_ERR(o_tmp->nested);
50940 +
50941 + /* insert into nested subject list */
50942 + o_tmp->nested->next = role->hash->first;
50943 + role->hash->first = o_tmp->nested;
50944 + }
50945 + }
50946 +
50947 + return 0;
50948 +}
50949 +
50950 +static __u32
50951 +count_user_subjs(struct acl_subject_label *userp)
50952 +{
50953 + struct acl_subject_label s_tmp;
50954 + __u32 num = 0;
50955 +
50956 + while (userp) {
50957 + if (copy_from_user(&s_tmp, userp,
50958 + sizeof (struct acl_subject_label)))
50959 + break;
50960 +
50961 + userp = s_tmp.prev;
50962 + /* do not count nested subjects against this count, since
50963 + they are not included in the hash table, but are
50964 + attached to objects. We have already counted
50965 + the subjects in userspace for the allocation
50966 + stack
50967 + */
50968 + if (!(s_tmp.mode & GR_NESTED))
50969 + num++;
50970 + }
50971 +
50972 + return num;
50973 +}
50974 +
50975 +static int
50976 +copy_user_allowedips(struct acl_role_label *rolep)
50977 +{
50978 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
50979 +
50980 + ruserip = rolep->allowed_ips;
50981 +
50982 + while (ruserip) {
50983 + rlast = rtmp;
50984 +
50985 + if ((rtmp = (struct role_allowed_ip *)
50986 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
50987 + return -ENOMEM;
50988 +
50989 + if (copy_from_user(rtmp, ruserip,
50990 + sizeof (struct role_allowed_ip)))
50991 + return -EFAULT;
50992 +
50993 + ruserip = rtmp->prev;
50994 +
50995 + if (!rlast) {
50996 + rtmp->prev = NULL;
50997 + rolep->allowed_ips = rtmp;
50998 + } else {
50999 + rlast->next = rtmp;
51000 + rtmp->prev = rlast;
51001 + }
51002 +
51003 + if (!ruserip)
51004 + rtmp->next = NULL;
51005 + }
51006 +
51007 + return 0;
51008 +}
51009 +
51010 +static int
51011 +copy_user_transitions(struct acl_role_label *rolep)
51012 +{
51013 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
51014 +
51015 + unsigned int len;
51016 + char *tmp;
51017 +
51018 + rusertp = rolep->transitions;
51019 +
51020 + while (rusertp) {
51021 + rlast = rtmp;
51022 +
51023 + if ((rtmp = (struct role_transition *)
51024 + acl_alloc(sizeof (struct role_transition))) == NULL)
51025 + return -ENOMEM;
51026 +
51027 + if (copy_from_user(rtmp, rusertp,
51028 + sizeof (struct role_transition)))
51029 + return -EFAULT;
51030 +
51031 + rusertp = rtmp->prev;
51032 +
51033 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
51034 +
51035 + if (!len || len >= GR_SPROLE_LEN)
51036 + return -EINVAL;
51037 +
51038 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51039 + return -ENOMEM;
51040 +
51041 + if (copy_from_user(tmp, rtmp->rolename, len))
51042 + return -EFAULT;
51043 + tmp[len-1] = '\0';
51044 + rtmp->rolename = tmp;
51045 +
51046 + if (!rlast) {
51047 + rtmp->prev = NULL;
51048 + rolep->transitions = rtmp;
51049 + } else {
51050 + rlast->next = rtmp;
51051 + rtmp->prev = rlast;
51052 + }
51053 +
51054 + if (!rusertp)
51055 + rtmp->next = NULL;
51056 + }
51057 +
51058 + return 0;
51059 +}
51060 +
51061 +static struct acl_subject_label *
51062 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
51063 +{
51064 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
51065 + unsigned int len;
51066 + char *tmp;
51067 + __u32 num_objs;
51068 + struct acl_ip_label **i_tmp, *i_utmp2;
51069 + struct gr_hash_struct ghash;
51070 + struct subject_map *subjmap;
51071 + unsigned int i_num;
51072 + int err;
51073 +
51074 + s_tmp = lookup_subject_map(userp);
51075 +
51076 + /* we've already copied this subject into the kernel, just return
51077 + the reference to it, and don't copy it over again
51078 + */
51079 + if (s_tmp)
51080 + return(s_tmp);
51081 +
51082 + if ((s_tmp = (struct acl_subject_label *)
51083 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
51084 + return ERR_PTR(-ENOMEM);
51085 +
51086 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
51087 + if (subjmap == NULL)
51088 + return ERR_PTR(-ENOMEM);
51089 +
51090 + subjmap->user = userp;
51091 + subjmap->kernel = s_tmp;
51092 + insert_subj_map_entry(subjmap);
51093 +
51094 + if (copy_from_user(s_tmp, userp,
51095 + sizeof (struct acl_subject_label)))
51096 + return ERR_PTR(-EFAULT);
51097 +
51098 + len = strnlen_user(s_tmp->filename, PATH_MAX);
51099 +
51100 + if (!len || len >= PATH_MAX)
51101 + return ERR_PTR(-EINVAL);
51102 +
51103 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51104 + return ERR_PTR(-ENOMEM);
51105 +
51106 + if (copy_from_user(tmp, s_tmp->filename, len))
51107 + return ERR_PTR(-EFAULT);
51108 + tmp[len-1] = '\0';
51109 + s_tmp->filename = tmp;
51110 +
51111 + if (!strcmp(s_tmp->filename, "/"))
51112 + role->root_label = s_tmp;
51113 +
51114 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
51115 + return ERR_PTR(-EFAULT);
51116 +
51117 + /* copy user and group transition tables */
51118 +
51119 + if (s_tmp->user_trans_num) {
51120 + uid_t *uidlist;
51121 +
51122 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
51123 + if (uidlist == NULL)
51124 + return ERR_PTR(-ENOMEM);
51125 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
51126 + return ERR_PTR(-EFAULT);
51127 +
51128 + s_tmp->user_transitions = uidlist;
51129 + }
51130 +
51131 + if (s_tmp->group_trans_num) {
51132 + gid_t *gidlist;
51133 +
51134 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
51135 + if (gidlist == NULL)
51136 + return ERR_PTR(-ENOMEM);
51137 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
51138 + return ERR_PTR(-EFAULT);
51139 +
51140 + s_tmp->group_transitions = gidlist;
51141 + }
51142 +
51143 + /* set up object hash table */
51144 + num_objs = count_user_objs(ghash.first);
51145 +
51146 + s_tmp->obj_hash_size = num_objs;
51147 + s_tmp->obj_hash =
51148 + (struct acl_object_label **)
51149 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
51150 +
51151 + if (!s_tmp->obj_hash)
51152 + return ERR_PTR(-ENOMEM);
51153 +
51154 + memset(s_tmp->obj_hash, 0,
51155 + s_tmp->obj_hash_size *
51156 + sizeof (struct acl_object_label *));
51157 +
51158 + /* add in objects */
51159 + err = copy_user_objs(ghash.first, s_tmp, role);
51160 +
51161 + if (err)
51162 + return ERR_PTR(err);
51163 +
51164 + /* set pointer for parent subject */
51165 + if (s_tmp->parent_subject) {
51166 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
51167 +
51168 + if (IS_ERR(s_tmp2))
51169 + return s_tmp2;
51170 +
51171 + s_tmp->parent_subject = s_tmp2;
51172 + }
51173 +
51174 + /* add in ip acls */
51175 +
51176 + if (!s_tmp->ip_num) {
51177 + s_tmp->ips = NULL;
51178 + goto insert;
51179 + }
51180 +
51181 + i_tmp =
51182 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
51183 + sizeof (struct acl_ip_label *));
51184 +
51185 + if (!i_tmp)
51186 + return ERR_PTR(-ENOMEM);
51187 +
51188 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
51189 + *(i_tmp + i_num) =
51190 + (struct acl_ip_label *)
51191 + acl_alloc(sizeof (struct acl_ip_label));
51192 + if (!*(i_tmp + i_num))
51193 + return ERR_PTR(-ENOMEM);
51194 +
51195 + if (copy_from_user
51196 + (&i_utmp2, s_tmp->ips + i_num,
51197 + sizeof (struct acl_ip_label *)))
51198 + return ERR_PTR(-EFAULT);
51199 +
51200 + if (copy_from_user
51201 + (*(i_tmp + i_num), i_utmp2,
51202 + sizeof (struct acl_ip_label)))
51203 + return ERR_PTR(-EFAULT);
51204 +
51205 + if ((*(i_tmp + i_num))->iface == NULL)
51206 + continue;
51207 +
51208 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
51209 + if (!len || len >= IFNAMSIZ)
51210 + return ERR_PTR(-EINVAL);
51211 + tmp = acl_alloc(len);
51212 + if (tmp == NULL)
51213 + return ERR_PTR(-ENOMEM);
51214 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
51215 + return ERR_PTR(-EFAULT);
51216 + (*(i_tmp + i_num))->iface = tmp;
51217 + }
51218 +
51219 + s_tmp->ips = i_tmp;
51220 +
51221 +insert:
51222 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
51223 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
51224 + return ERR_PTR(-ENOMEM);
51225 +
51226 + return s_tmp;
51227 +}
51228 +
51229 +static int
51230 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
51231 +{
51232 + struct acl_subject_label s_pre;
51233 + struct acl_subject_label * ret;
51234 + int err;
51235 +
51236 + while (userp) {
51237 + if (copy_from_user(&s_pre, userp,
51238 + sizeof (struct acl_subject_label)))
51239 + return -EFAULT;
51240 +
51241 + /* do not add nested subjects here, add
51242 + while parsing objects
51243 + */
51244 +
51245 + if (s_pre.mode & GR_NESTED) {
51246 + userp = s_pre.prev;
51247 + continue;
51248 + }
51249 +
51250 + ret = do_copy_user_subj(userp, role);
51251 +
51252 + err = PTR_ERR(ret);
51253 + if (IS_ERR(ret))
51254 + return err;
51255 +
51256 + insert_acl_subj_label(ret, role);
51257 +
51258 + userp = s_pre.prev;
51259 + }
51260 +
51261 + return 0;
51262 +}
51263 +
51264 +static int
51265 +copy_user_acl(struct gr_arg *arg)
51266 +{
51267 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
51268 + struct sprole_pw *sptmp;
51269 + struct gr_hash_struct *ghash;
51270 + uid_t *domainlist;
51271 + unsigned int r_num;
51272 + unsigned int len;
51273 + char *tmp;
51274 + int err = 0;
51275 + __u16 i;
51276 + __u32 num_subjs;
51277 +
51278 + /* we need a default and kernel role */
51279 + if (arg->role_db.num_roles < 2)
51280 + return -EINVAL;
51281 +
51282 + /* copy special role authentication info from userspace */
51283 +
51284 + num_sprole_pws = arg->num_sprole_pws;
51285 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
51286 +
51287 + if (!acl_special_roles) {
51288 + err = -ENOMEM;
51289 + goto cleanup;
51290 + }
51291 +
51292 + for (i = 0; i < num_sprole_pws; i++) {
51293 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
51294 + if (!sptmp) {
51295 + err = -ENOMEM;
51296 + goto cleanup;
51297 + }
51298 + if (copy_from_user(sptmp, arg->sprole_pws + i,
51299 + sizeof (struct sprole_pw))) {
51300 + err = -EFAULT;
51301 + goto cleanup;
51302 + }
51303 +
51304 + len =
51305 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
51306 +
51307 + if (!len || len >= GR_SPROLE_LEN) {
51308 + err = -EINVAL;
51309 + goto cleanup;
51310 + }
51311 +
51312 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
51313 + err = -ENOMEM;
51314 + goto cleanup;
51315 + }
51316 +
51317 + if (copy_from_user(tmp, sptmp->rolename, len)) {
51318 + err = -EFAULT;
51319 + goto cleanup;
51320 + }
51321 + tmp[len-1] = '\0';
51322 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51323 + printk(KERN_ALERT "Copying special role %s\n", tmp);
51324 +#endif
51325 + sptmp->rolename = tmp;
51326 + acl_special_roles[i] = sptmp;
51327 + }
51328 +
51329 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
51330 +
51331 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
51332 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
51333 +
51334 + if (!r_tmp) {
51335 + err = -ENOMEM;
51336 + goto cleanup;
51337 + }
51338 +
51339 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
51340 + sizeof (struct acl_role_label *))) {
51341 + err = -EFAULT;
51342 + goto cleanup;
51343 + }
51344 +
51345 + if (copy_from_user(r_tmp, r_utmp2,
51346 + sizeof (struct acl_role_label))) {
51347 + err = -EFAULT;
51348 + goto cleanup;
51349 + }
51350 +
51351 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
51352 +
51353 + if (!len || len >= PATH_MAX) {
51354 + err = -EINVAL;
51355 + goto cleanup;
51356 + }
51357 +
51358 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
51359 + err = -ENOMEM;
51360 + goto cleanup;
51361 + }
51362 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
51363 + err = -EFAULT;
51364 + goto cleanup;
51365 + }
51366 + tmp[len-1] = '\0';
51367 + r_tmp->rolename = tmp;
51368 +
51369 + if (!strcmp(r_tmp->rolename, "default")
51370 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
51371 + default_role = r_tmp;
51372 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
51373 + kernel_role = r_tmp;
51374 + }
51375 +
51376 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
51377 + err = -ENOMEM;
51378 + goto cleanup;
51379 + }
51380 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
51381 + err = -EFAULT;
51382 + goto cleanup;
51383 + }
51384 +
51385 + r_tmp->hash = ghash;
51386 +
51387 + num_subjs = count_user_subjs(r_tmp->hash->first);
51388 +
51389 + r_tmp->subj_hash_size = num_subjs;
51390 + r_tmp->subj_hash =
51391 + (struct acl_subject_label **)
51392 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
51393 +
51394 + if (!r_tmp->subj_hash) {
51395 + err = -ENOMEM;
51396 + goto cleanup;
51397 + }
51398 +
51399 + err = copy_user_allowedips(r_tmp);
51400 + if (err)
51401 + goto cleanup;
51402 +
51403 + /* copy domain info */
51404 + if (r_tmp->domain_children != NULL) {
51405 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
51406 + if (domainlist == NULL) {
51407 + err = -ENOMEM;
51408 + goto cleanup;
51409 + }
51410 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
51411 + err = -EFAULT;
51412 + goto cleanup;
51413 + }
51414 + r_tmp->domain_children = domainlist;
51415 + }
51416 +
51417 + err = copy_user_transitions(r_tmp);
51418 + if (err)
51419 + goto cleanup;
51420 +
51421 + memset(r_tmp->subj_hash, 0,
51422 + r_tmp->subj_hash_size *
51423 + sizeof (struct acl_subject_label *));
51424 +
51425 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
51426 +
51427 + if (err)
51428 + goto cleanup;
51429 +
51430 + /* set nested subject list to null */
51431 + r_tmp->hash->first = NULL;
51432 +
51433 + insert_acl_role_label(r_tmp);
51434 + }
51435 +
51436 + goto return_err;
51437 + cleanup:
51438 + free_variables();
51439 + return_err:
51440 + return err;
51441 +
51442 +}
51443 +
51444 +static int
51445 +gracl_init(struct gr_arg *args)
51446 +{
51447 + int error = 0;
51448 +
51449 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
51450 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
51451 +
51452 + if (init_variables(args)) {
51453 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
51454 + error = -ENOMEM;
51455 + free_variables();
51456 + goto out;
51457 + }
51458 +
51459 + error = copy_user_acl(args);
51460 + free_init_variables();
51461 + if (error) {
51462 + free_variables();
51463 + goto out;
51464 + }
51465 +
51466 + if ((error = gr_set_acls(0))) {
51467 + free_variables();
51468 + goto out;
51469 + }
51470 +
51471 + pax_open_kernel();
51472 + gr_status |= GR_READY;
51473 + pax_close_kernel();
51474 +
51475 + out:
51476 + return error;
51477 +}
51478 +
51479 +/* derived from glibc fnmatch() 0: match, 1: no match*/
51480 +
51481 +static int
51482 +glob_match(const char *p, const char *n)
51483 +{
51484 + char c;
51485 +
51486 + while ((c = *p++) != '\0') {
51487 + switch (c) {
51488 + case '?':
51489 + if (*n == '\0')
51490 + return 1;
51491 + else if (*n == '/')
51492 + return 1;
51493 + break;
51494 + case '\\':
51495 + if (*n != c)
51496 + return 1;
51497 + break;
51498 + case '*':
51499 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
51500 + if (*n == '/')
51501 + return 1;
51502 + else if (c == '?') {
51503 + if (*n == '\0')
51504 + return 1;
51505 + else
51506 + ++n;
51507 + }
51508 + }
51509 + if (c == '\0') {
51510 + return 0;
51511 + } else {
51512 + const char *endp;
51513 +
51514 + if ((endp = strchr(n, '/')) == NULL)
51515 + endp = n + strlen(n);
51516 +
51517 + if (c == '[') {
51518 + for (--p; n < endp; ++n)
51519 + if (!glob_match(p, n))
51520 + return 0;
51521 + } else if (c == '/') {
51522 + while (*n != '\0' && *n != '/')
51523 + ++n;
51524 + if (*n == '/' && !glob_match(p, n + 1))
51525 + return 0;
51526 + } else {
51527 + for (--p; n < endp; ++n)
51528 + if (*n == c && !glob_match(p, n))
51529 + return 0;
51530 + }
51531 +
51532 + return 1;
51533 + }
51534 + case '[':
51535 + {
51536 + int not;
51537 + char cold;
51538 +
51539 + if (*n == '\0' || *n == '/')
51540 + return 1;
51541 +
51542 + not = (*p == '!' || *p == '^');
51543 + if (not)
51544 + ++p;
51545 +
51546 + c = *p++;
51547 + for (;;) {
51548 + unsigned char fn = (unsigned char)*n;
51549 +
51550 + if (c == '\0')
51551 + return 1;
51552 + else {
51553 + if (c == fn)
51554 + goto matched;
51555 + cold = c;
51556 + c = *p++;
51557 +
51558 + if (c == '-' && *p != ']') {
51559 + unsigned char cend = *p++;
51560 +
51561 + if (cend == '\0')
51562 + return 1;
51563 +
51564 + if (cold <= fn && fn <= cend)
51565 + goto matched;
51566 +
51567 + c = *p++;
51568 + }
51569 + }
51570 +
51571 + if (c == ']')
51572 + break;
51573 + }
51574 + if (!not)
51575 + return 1;
51576 + break;
51577 + matched:
51578 + while (c != ']') {
51579 + if (c == '\0')
51580 + return 1;
51581 +
51582 + c = *p++;
51583 + }
51584 + if (not)
51585 + return 1;
51586 + }
51587 + break;
51588 + default:
51589 + if (c != *n)
51590 + return 1;
51591 + }
51592 +
51593 + ++n;
51594 + }
51595 +
51596 + if (*n == '\0')
51597 + return 0;
51598 +
51599 + if (*n == '/')
51600 + return 0;
51601 +
51602 + return 1;
51603 +}
51604 +
51605 +static struct acl_object_label *
51606 +chk_glob_label(struct acl_object_label *globbed,
51607 + const struct dentry *dentry, const struct vfsmount *mnt, char **path)
51608 +{
51609 + struct acl_object_label *tmp;
51610 +
51611 + if (*path == NULL)
51612 + *path = gr_to_filename_nolock(dentry, mnt);
51613 +
51614 + tmp = globbed;
51615 +
51616 + while (tmp) {
51617 + if (!glob_match(tmp->filename, *path))
51618 + return tmp;
51619 + tmp = tmp->next;
51620 + }
51621 +
51622 + return NULL;
51623 +}
51624 +
51625 +static struct acl_object_label *
51626 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
51627 + const ino_t curr_ino, const dev_t curr_dev,
51628 + const struct acl_subject_label *subj, char **path, const int checkglob)
51629 +{
51630 + struct acl_subject_label *tmpsubj;
51631 + struct acl_object_label *retval;
51632 + struct acl_object_label *retval2;
51633 +
51634 + tmpsubj = (struct acl_subject_label *) subj;
51635 + read_lock(&gr_inode_lock);
51636 + do {
51637 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
51638 + if (retval) {
51639 + if (checkglob && retval->globbed) {
51640 + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
51641 + if (retval2)
51642 + retval = retval2;
51643 + }
51644 + break;
51645 + }
51646 + } while ((tmpsubj = tmpsubj->parent_subject));
51647 + read_unlock(&gr_inode_lock);
51648 +
51649 + return retval;
51650 +}
51651 +
51652 +static __inline__ struct acl_object_label *
51653 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
51654 + struct dentry *curr_dentry,
51655 + const struct acl_subject_label *subj, char **path, const int checkglob)
51656 +{
51657 + int newglob = checkglob;
51658 + ino_t inode;
51659 + dev_t device;
51660 +
51661 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
51662 + as we don't want a / * rule to match instead of the / object
51663 + don't do this for create lookups that call this function though, since they're looking up
51664 + on the parent and thus need globbing checks on all paths
51665 + */
51666 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
51667 + newglob = GR_NO_GLOB;
51668 +
51669 + spin_lock(&curr_dentry->d_lock);
51670 + inode = curr_dentry->d_inode->i_ino;
51671 + device = __get_dev(curr_dentry);
51672 + spin_unlock(&curr_dentry->d_lock);
51673 +
51674 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
51675 +}
51676 +
51677 +static struct acl_object_label *
51678 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51679 + const struct acl_subject_label *subj, char *path, const int checkglob)
51680 +{
51681 + struct dentry *dentry = (struct dentry *) l_dentry;
51682 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
51683 + struct acl_object_label *retval;
51684 + struct dentry *parent;
51685 +
51686 + write_seqlock(&rename_lock);
51687 + br_read_lock(vfsmount_lock);
51688 +
51689 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
51690 +#ifdef CONFIG_NET
51691 + mnt == sock_mnt ||
51692 +#endif
51693 +#ifdef CONFIG_HUGETLBFS
51694 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
51695 +#endif
51696 + /* ignore Eric Biederman */
51697 + IS_PRIVATE(l_dentry->d_inode))) {
51698 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
51699 + goto out;
51700 + }
51701 +
51702 + for (;;) {
51703 + if (dentry == real_root.dentry && mnt == real_root.mnt)
51704 + break;
51705 +
51706 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
51707 + if (mnt->mnt_parent == mnt)
51708 + break;
51709 +
51710 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
51711 + if (retval != NULL)
51712 + goto out;
51713 +
51714 + dentry = mnt->mnt_mountpoint;
51715 + mnt = mnt->mnt_parent;
51716 + continue;
51717 + }
51718 +
51719 + parent = dentry->d_parent;
51720 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
51721 + if (retval != NULL)
51722 + goto out;
51723 +
51724 + dentry = parent;
51725 + }
51726 +
51727 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
51728 +
51729 + /* real_root is pinned so we don't have to hold a reference */
51730 + if (retval == NULL)
51731 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
51732 +out:
51733 + br_read_unlock(vfsmount_lock);
51734 + write_sequnlock(&rename_lock);
51735 +
51736 + BUG_ON(retval == NULL);
51737 +
51738 + return retval;
51739 +}
51740 +
51741 +static __inline__ struct acl_object_label *
51742 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51743 + const struct acl_subject_label *subj)
51744 +{
51745 + char *path = NULL;
51746 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
51747 +}
51748 +
51749 +static __inline__ struct acl_object_label *
51750 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51751 + const struct acl_subject_label *subj)
51752 +{
51753 + char *path = NULL;
51754 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
51755 +}
51756 +
51757 +static __inline__ struct acl_object_label *
51758 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51759 + const struct acl_subject_label *subj, char *path)
51760 +{
51761 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
51762 +}
51763 +
51764 +static struct acl_subject_label *
51765 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51766 + const struct acl_role_label *role)
51767 +{
51768 + struct dentry *dentry = (struct dentry *) l_dentry;
51769 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
51770 + struct acl_subject_label *retval;
51771 + struct dentry *parent;
51772 +
51773 + write_seqlock(&rename_lock);
51774 + br_read_lock(vfsmount_lock);
51775 +
51776 + for (;;) {
51777 + if (dentry == real_root.dentry && mnt == real_root.mnt)
51778 + break;
51779 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
51780 + if (mnt->mnt_parent == mnt)
51781 + break;
51782 +
51783 + spin_lock(&dentry->d_lock);
51784 + read_lock(&gr_inode_lock);
51785 + retval =
51786 + lookup_acl_subj_label(dentry->d_inode->i_ino,
51787 + __get_dev(dentry), role);
51788 + read_unlock(&gr_inode_lock);
51789 + spin_unlock(&dentry->d_lock);
51790 + if (retval != NULL)
51791 + goto out;
51792 +
51793 + dentry = mnt->mnt_mountpoint;
51794 + mnt = mnt->mnt_parent;
51795 + continue;
51796 + }
51797 +
51798 + spin_lock(&dentry->d_lock);
51799 + read_lock(&gr_inode_lock);
51800 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
51801 + __get_dev(dentry), role);
51802 + read_unlock(&gr_inode_lock);
51803 + parent = dentry->d_parent;
51804 + spin_unlock(&dentry->d_lock);
51805 +
51806 + if (retval != NULL)
51807 + goto out;
51808 +
51809 + dentry = parent;
51810 + }
51811 +
51812 + spin_lock(&dentry->d_lock);
51813 + read_lock(&gr_inode_lock);
51814 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
51815 + __get_dev(dentry), role);
51816 + read_unlock(&gr_inode_lock);
51817 + spin_unlock(&dentry->d_lock);
51818 +
51819 + if (unlikely(retval == NULL)) {
51820 + /* real_root is pinned, we don't need to hold a reference */
51821 + read_lock(&gr_inode_lock);
51822 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
51823 + __get_dev(real_root.dentry), role);
51824 + read_unlock(&gr_inode_lock);
51825 + }
51826 +out:
51827 + br_read_unlock(vfsmount_lock);
51828 + write_sequnlock(&rename_lock);
51829 +
51830 + BUG_ON(retval == NULL);
51831 +
51832 + return retval;
51833 +}
51834 +
51835 +static void
51836 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
51837 +{
51838 + struct task_struct *task = current;
51839 + const struct cred *cred = current_cred();
51840 +
51841 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
51842 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
51843 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
51844 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
51845 +
51846 + return;
51847 +}
51848 +
51849 +static void
51850 +gr_log_learn_sysctl(const char *path, const __u32 mode)
51851 +{
51852 + struct task_struct *task = current;
51853 + const struct cred *cred = current_cred();
51854 +
51855 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
51856 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
51857 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
51858 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
51859 +
51860 + return;
51861 +}
51862 +
51863 +static void
51864 +gr_log_learn_id_change(const char type, const unsigned int real,
51865 + const unsigned int effective, const unsigned int fs)
51866 +{
51867 + struct task_struct *task = current;
51868 + const struct cred *cred = current_cred();
51869 +
51870 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
51871 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
51872 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
51873 + type, real, effective, fs, &task->signal->saved_ip);
51874 +
51875 + return;
51876 +}
51877 +
51878 +__u32
51879 +gr_search_file(const struct dentry * dentry, const __u32 mode,
51880 + const struct vfsmount * mnt)
51881 +{
51882 + __u32 retval = mode;
51883 + struct acl_subject_label *curracl;
51884 + struct acl_object_label *currobj;
51885 +
51886 + if (unlikely(!(gr_status & GR_READY)))
51887 + return (mode & ~GR_AUDITS);
51888 +
51889 + curracl = current->acl;
51890 +
51891 + currobj = chk_obj_label(dentry, mnt, curracl);
51892 + retval = currobj->mode & mode;
51893 +
51894 + /* if we're opening a specified transfer file for writing
51895 + (e.g. /dev/initctl), then transfer our role to init
51896 + */
51897 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
51898 + current->role->roletype & GR_ROLE_PERSIST)) {
51899 + struct task_struct *task = init_pid_ns.child_reaper;
51900 +
51901 + if (task->role != current->role) {
51902 + task->acl_sp_role = 0;
51903 + task->acl_role_id = current->acl_role_id;
51904 + task->role = current->role;
51905 + rcu_read_lock();
51906 + read_lock(&grsec_exec_file_lock);
51907 + gr_apply_subject_to_task(task);
51908 + read_unlock(&grsec_exec_file_lock);
51909 + rcu_read_unlock();
51910 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
51911 + }
51912 + }
51913 +
51914 + if (unlikely
51915 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
51916 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
51917 + __u32 new_mode = mode;
51918 +
51919 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
51920 +
51921 + retval = new_mode;
51922 +
51923 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
51924 + new_mode |= GR_INHERIT;
51925 +
51926 + if (!(mode & GR_NOLEARN))
51927 + gr_log_learn(dentry, mnt, new_mode);
51928 + }
51929 +
51930 + return retval;
51931 +}
51932 +
51933 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
51934 + const struct dentry *parent,
51935 + const struct vfsmount *mnt)
51936 +{
51937 + struct name_entry *match;
51938 + struct acl_object_label *matchpo;
51939 + struct acl_subject_label *curracl;
51940 + char *path;
51941 +
51942 + if (unlikely(!(gr_status & GR_READY)))
51943 + return NULL;
51944 +
51945 + preempt_disable();
51946 + path = gr_to_filename_rbac(new_dentry, mnt);
51947 + match = lookup_name_entry_create(path);
51948 +
51949 + curracl = current->acl;
51950 +
51951 + if (match) {
51952 + read_lock(&gr_inode_lock);
51953 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
51954 + read_unlock(&gr_inode_lock);
51955 +
51956 + if (matchpo) {
51957 + preempt_enable();
51958 + return matchpo;
51959 + }
51960 + }
51961 +
51962 + // lookup parent
51963 +
51964 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
51965 +
51966 + preempt_enable();
51967 + return matchpo;
51968 +}
51969 +
51970 +__u32
51971 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
51972 + const struct vfsmount * mnt, const __u32 mode)
51973 +{
51974 + struct acl_object_label *matchpo;
51975 + __u32 retval;
51976 +
51977 + if (unlikely(!(gr_status & GR_READY)))
51978 + return (mode & ~GR_AUDITS);
51979 +
51980 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
51981 +
51982 + retval = matchpo->mode & mode;
51983 +
51984 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
51985 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
51986 + __u32 new_mode = mode;
51987 +
51988 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
51989 +
51990 + gr_log_learn(new_dentry, mnt, new_mode);
51991 + return new_mode;
51992 + }
51993 +
51994 + return retval;
51995 +}
51996 +
51997 +__u32
51998 +gr_check_link(const struct dentry * new_dentry,
51999 + const struct dentry * parent_dentry,
52000 + const struct vfsmount * parent_mnt,
52001 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
52002 +{
52003 + struct acl_object_label *obj;
52004 + __u32 oldmode, newmode;
52005 + __u32 needmode;
52006 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
52007 + GR_DELETE | GR_INHERIT;
52008 +
52009 + if (unlikely(!(gr_status & GR_READY)))
52010 + return (GR_CREATE | GR_LINK);
52011 +
52012 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
52013 + oldmode = obj->mode;
52014 +
52015 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
52016 + newmode = obj->mode;
52017 +
52018 + needmode = newmode & checkmodes;
52019 +
52020 + // old name for hardlink must have at least the permissions of the new name
52021 + if ((oldmode & needmode) != needmode)
52022 + goto bad;
52023 +
52024 + // if old name had restrictions/auditing, make sure the new name does as well
52025 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
52026 +
52027 + // don't allow hardlinking of suid/sgid files without permission
52028 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52029 + needmode |= GR_SETID;
52030 +
52031 + if ((newmode & needmode) != needmode)
52032 + goto bad;
52033 +
52034 + // enforce minimum permissions
52035 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
52036 + return newmode;
52037 +bad:
52038 + needmode = oldmode;
52039 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52040 + needmode |= GR_SETID;
52041 +
52042 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
52043 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
52044 + return (GR_CREATE | GR_LINK);
52045 + } else if (newmode & GR_SUPPRESS)
52046 + return GR_SUPPRESS;
52047 + else
52048 + return 0;
52049 +}
52050 +
52051 +int
52052 +gr_check_hidden_task(const struct task_struct *task)
52053 +{
52054 + if (unlikely(!(gr_status & GR_READY)))
52055 + return 0;
52056 +
52057 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
52058 + return 1;
52059 +
52060 + return 0;
52061 +}
52062 +
52063 +int
52064 +gr_check_protected_task(const struct task_struct *task)
52065 +{
52066 + if (unlikely(!(gr_status & GR_READY) || !task))
52067 + return 0;
52068 +
52069 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52070 + task->acl != current->acl)
52071 + return 1;
52072 +
52073 + return 0;
52074 +}
52075 +
52076 +int
52077 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52078 +{
52079 + struct task_struct *p;
52080 + int ret = 0;
52081 +
52082 + if (unlikely(!(gr_status & GR_READY) || !pid))
52083 + return ret;
52084 +
52085 + read_lock(&tasklist_lock);
52086 + do_each_pid_task(pid, type, p) {
52087 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52088 + p->acl != current->acl) {
52089 + ret = 1;
52090 + goto out;
52091 + }
52092 + } while_each_pid_task(pid, type, p);
52093 +out:
52094 + read_unlock(&tasklist_lock);
52095 +
52096 + return ret;
52097 +}
52098 +
52099 +void
52100 +gr_copy_label(struct task_struct *tsk)
52101 +{
52102 + /* plain copying of fields is already done by dup_task_struct */
52103 + tsk->signal->used_accept = 0;
52104 + tsk->acl_sp_role = 0;
52105 + //tsk->acl_role_id = current->acl_role_id;
52106 + //tsk->acl = current->acl;
52107 + //tsk->role = current->role;
52108 + tsk->signal->curr_ip = current->signal->curr_ip;
52109 + tsk->signal->saved_ip = current->signal->saved_ip;
52110 + if (current->exec_file)
52111 + get_file(current->exec_file);
52112 + //tsk->exec_file = current->exec_file;
52113 + //tsk->is_writable = current->is_writable;
52114 + if (unlikely(current->signal->used_accept)) {
52115 + current->signal->curr_ip = 0;
52116 + current->signal->saved_ip = 0;
52117 + }
52118 +
52119 + return;
52120 +}
52121 +
52122 +static void
52123 +gr_set_proc_res(struct task_struct *task)
52124 +{
52125 + struct acl_subject_label *proc;
52126 + unsigned short i;
52127 +
52128 + proc = task->acl;
52129 +
52130 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
52131 + return;
52132 +
52133 + for (i = 0; i < RLIM_NLIMITS; i++) {
52134 + if (!(proc->resmask & (1 << i)))
52135 + continue;
52136 +
52137 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
52138 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
52139 + }
52140 +
52141 + return;
52142 +}
52143 +
52144 +extern int __gr_process_user_ban(struct user_struct *user);
52145 +
52146 +int
52147 +gr_check_user_change(int real, int effective, int fs)
52148 +{
52149 + unsigned int i;
52150 + __u16 num;
52151 + uid_t *uidlist;
52152 + int curuid;
52153 + int realok = 0;
52154 + int effectiveok = 0;
52155 + int fsok = 0;
52156 +
52157 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52158 + struct user_struct *user;
52159 +
52160 + if (real == -1)
52161 + goto skipit;
52162 +
52163 + user = find_user(real);
52164 + if (user == NULL)
52165 + goto skipit;
52166 +
52167 + if (__gr_process_user_ban(user)) {
52168 + /* for find_user */
52169 + free_uid(user);
52170 + return 1;
52171 + }
52172 +
52173 + /* for find_user */
52174 + free_uid(user);
52175 +
52176 +skipit:
52177 +#endif
52178 +
52179 + if (unlikely(!(gr_status & GR_READY)))
52180 + return 0;
52181 +
52182 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52183 + gr_log_learn_id_change('u', real, effective, fs);
52184 +
52185 + num = current->acl->user_trans_num;
52186 + uidlist = current->acl->user_transitions;
52187 +
52188 + if (uidlist == NULL)
52189 + return 0;
52190 +
52191 + if (real == -1)
52192 + realok = 1;
52193 + if (effective == -1)
52194 + effectiveok = 1;
52195 + if (fs == -1)
52196 + fsok = 1;
52197 +
52198 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
52199 + for (i = 0; i < num; i++) {
52200 + curuid = (int)uidlist[i];
52201 + if (real == curuid)
52202 + realok = 1;
52203 + if (effective == curuid)
52204 + effectiveok = 1;
52205 + if (fs == curuid)
52206 + fsok = 1;
52207 + }
52208 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
52209 + for (i = 0; i < num; i++) {
52210 + curuid = (int)uidlist[i];
52211 + if (real == curuid)
52212 + break;
52213 + if (effective == curuid)
52214 + break;
52215 + if (fs == curuid)
52216 + break;
52217 + }
52218 + /* not in deny list */
52219 + if (i == num) {
52220 + realok = 1;
52221 + effectiveok = 1;
52222 + fsok = 1;
52223 + }
52224 + }
52225 +
52226 + if (realok && effectiveok && fsok)
52227 + return 0;
52228 + else {
52229 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
52230 + return 1;
52231 + }
52232 +}
52233 +
52234 +int
52235 +gr_check_group_change(int real, int effective, int fs)
52236 +{
52237 + unsigned int i;
52238 + __u16 num;
52239 + gid_t *gidlist;
52240 + int curgid;
52241 + int realok = 0;
52242 + int effectiveok = 0;
52243 + int fsok = 0;
52244 +
52245 + if (unlikely(!(gr_status & GR_READY)))
52246 + return 0;
52247 +
52248 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52249 + gr_log_learn_id_change('g', real, effective, fs);
52250 +
52251 + num = current->acl->group_trans_num;
52252 + gidlist = current->acl->group_transitions;
52253 +
52254 + if (gidlist == NULL)
52255 + return 0;
52256 +
52257 + if (real == -1)
52258 + realok = 1;
52259 + if (effective == -1)
52260 + effectiveok = 1;
52261 + if (fs == -1)
52262 + fsok = 1;
52263 +
52264 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
52265 + for (i = 0; i < num; i++) {
52266 + curgid = (int)gidlist[i];
52267 + if (real == curgid)
52268 + realok = 1;
52269 + if (effective == curgid)
52270 + effectiveok = 1;
52271 + if (fs == curgid)
52272 + fsok = 1;
52273 + }
52274 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
52275 + for (i = 0; i < num; i++) {
52276 + curgid = (int)gidlist[i];
52277 + if (real == curgid)
52278 + break;
52279 + if (effective == curgid)
52280 + break;
52281 + if (fs == curgid)
52282 + break;
52283 + }
52284 + /* not in deny list */
52285 + if (i == num) {
52286 + realok = 1;
52287 + effectiveok = 1;
52288 + fsok = 1;
52289 + }
52290 + }
52291 +
52292 + if (realok && effectiveok && fsok)
52293 + return 0;
52294 + else {
52295 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
52296 + return 1;
52297 + }
52298 +}
52299 +
52300 +extern int gr_acl_is_capable(const int cap);
52301 +
52302 +void
52303 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
52304 +{
52305 + struct acl_role_label *role = task->role;
52306 + struct acl_subject_label *subj = NULL;
52307 + struct acl_object_label *obj;
52308 + struct file *filp;
52309 +
52310 + if (unlikely(!(gr_status & GR_READY)))
52311 + return;
52312 +
52313 + filp = task->exec_file;
52314 +
52315 + /* kernel process, we'll give them the kernel role */
52316 + if (unlikely(!filp)) {
52317 + task->role = kernel_role;
52318 + task->acl = kernel_role->root_label;
52319 + return;
52320 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
52321 + role = lookup_acl_role_label(task, uid, gid);
52322 +
52323 + /* don't change the role if we're not a privileged process */
52324 + if (role && task->role != role &&
52325 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
52326 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
52327 + return;
52328 +
52329 + /* perform subject lookup in possibly new role
52330 + we can use this result below in the case where role == task->role
52331 + */
52332 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
52333 +
52334 + /* if we changed uid/gid, but result in the same role
52335 + and are using inheritance, don't lose the inherited subject
52336 + if current subject is other than what normal lookup
52337 + would result in, we arrived via inheritance, don't
52338 + lose subject
52339 + */
52340 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
52341 + (subj == task->acl)))
52342 + task->acl = subj;
52343 +
52344 + task->role = role;
52345 +
52346 + task->is_writable = 0;
52347 +
52348 + /* ignore additional mmap checks for processes that are writable
52349 + by the default ACL */
52350 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52351 + if (unlikely(obj->mode & GR_WRITE))
52352 + task->is_writable = 1;
52353 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
52354 + if (unlikely(obj->mode & GR_WRITE))
52355 + task->is_writable = 1;
52356 +
52357 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52358 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
52359 +#endif
52360 +
52361 + gr_set_proc_res(task);
52362 +
52363 + return;
52364 +}
52365 +
52366 +int
52367 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
52368 + const int unsafe_flags)
52369 +{
52370 + struct task_struct *task = current;
52371 + struct acl_subject_label *newacl;
52372 + struct acl_object_label *obj;
52373 + __u32 retmode;
52374 +
52375 + if (unlikely(!(gr_status & GR_READY)))
52376 + return 0;
52377 +
52378 + newacl = chk_subj_label(dentry, mnt, task->role);
52379 +
52380 + task_lock(task);
52381 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
52382 + !(task->role->roletype & GR_ROLE_GOD) &&
52383 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
52384 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52385 + task_unlock(task);
52386 + if (unsafe_flags & LSM_UNSAFE_SHARE)
52387 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
52388 + else
52389 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
52390 + return -EACCES;
52391 + }
52392 + task_unlock(task);
52393 +
52394 + obj = chk_obj_label(dentry, mnt, task->acl);
52395 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
52396 +
52397 + if (!(task->acl->mode & GR_INHERITLEARN) &&
52398 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
52399 + if (obj->nested)
52400 + task->acl = obj->nested;
52401 + else
52402 + task->acl = newacl;
52403 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
52404 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
52405 +
52406 + task->is_writable = 0;
52407 +
52408 + /* ignore additional mmap checks for processes that are writable
52409 + by the default ACL */
52410 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
52411 + if (unlikely(obj->mode & GR_WRITE))
52412 + task->is_writable = 1;
52413 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
52414 + if (unlikely(obj->mode & GR_WRITE))
52415 + task->is_writable = 1;
52416 +
52417 + gr_set_proc_res(task);
52418 +
52419 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52420 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
52421 +#endif
52422 + return 0;
52423 +}
52424 +
52425 +/* always called with valid inodev ptr */
52426 +static void
52427 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
52428 +{
52429 + struct acl_object_label *matchpo;
52430 + struct acl_subject_label *matchps;
52431 + struct acl_subject_label *subj;
52432 + struct acl_role_label *role;
52433 + unsigned int x;
52434 +
52435 + FOR_EACH_ROLE_START(role)
52436 + FOR_EACH_SUBJECT_START(role, subj, x)
52437 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
52438 + matchpo->mode |= GR_DELETED;
52439 + FOR_EACH_SUBJECT_END(subj,x)
52440 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
52441 + if (subj->inode == ino && subj->device == dev)
52442 + subj->mode |= GR_DELETED;
52443 + FOR_EACH_NESTED_SUBJECT_END(subj)
52444 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
52445 + matchps->mode |= GR_DELETED;
52446 + FOR_EACH_ROLE_END(role)
52447 +
52448 + inodev->nentry->deleted = 1;
52449 +
52450 + return;
52451 +}
52452 +
52453 +void
52454 +gr_handle_delete(const ino_t ino, const dev_t dev)
52455 +{
52456 + struct inodev_entry *inodev;
52457 +
52458 + if (unlikely(!(gr_status & GR_READY)))
52459 + return;
52460 +
52461 + write_lock(&gr_inode_lock);
52462 + inodev = lookup_inodev_entry(ino, dev);
52463 + if (inodev != NULL)
52464 + do_handle_delete(inodev, ino, dev);
52465 + write_unlock(&gr_inode_lock);
52466 +
52467 + return;
52468 +}
52469 +
52470 +static void
52471 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
52472 + const ino_t newinode, const dev_t newdevice,
52473 + struct acl_subject_label *subj)
52474 +{
52475 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
52476 + struct acl_object_label *match;
52477 +
52478 + match = subj->obj_hash[index];
52479 +
52480 + while (match && (match->inode != oldinode ||
52481 + match->device != olddevice ||
52482 + !(match->mode & GR_DELETED)))
52483 + match = match->next;
52484 +
52485 + if (match && (match->inode == oldinode)
52486 + && (match->device == olddevice)
52487 + && (match->mode & GR_DELETED)) {
52488 + if (match->prev == NULL) {
52489 + subj->obj_hash[index] = match->next;
52490 + if (match->next != NULL)
52491 + match->next->prev = NULL;
52492 + } else {
52493 + match->prev->next = match->next;
52494 + if (match->next != NULL)
52495 + match->next->prev = match->prev;
52496 + }
52497 + match->prev = NULL;
52498 + match->next = NULL;
52499 + match->inode = newinode;
52500 + match->device = newdevice;
52501 + match->mode &= ~GR_DELETED;
52502 +
52503 + insert_acl_obj_label(match, subj);
52504 + }
52505 +
52506 + return;
52507 +}
52508 +
52509 +static void
52510 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
52511 + const ino_t newinode, const dev_t newdevice,
52512 + struct acl_role_label *role)
52513 +{
52514 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
52515 + struct acl_subject_label *match;
52516 +
52517 + match = role->subj_hash[index];
52518 +
52519 + while (match && (match->inode != oldinode ||
52520 + match->device != olddevice ||
52521 + !(match->mode & GR_DELETED)))
52522 + match = match->next;
52523 +
52524 + if (match && (match->inode == oldinode)
52525 + && (match->device == olddevice)
52526 + && (match->mode & GR_DELETED)) {
52527 + if (match->prev == NULL) {
52528 + role->subj_hash[index] = match->next;
52529 + if (match->next != NULL)
52530 + match->next->prev = NULL;
52531 + } else {
52532 + match->prev->next = match->next;
52533 + if (match->next != NULL)
52534 + match->next->prev = match->prev;
52535 + }
52536 + match->prev = NULL;
52537 + match->next = NULL;
52538 + match->inode = newinode;
52539 + match->device = newdevice;
52540 + match->mode &= ~GR_DELETED;
52541 +
52542 + insert_acl_subj_label(match, role);
52543 + }
52544 +
52545 + return;
52546 +}
52547 +
52548 +static void
52549 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
52550 + const ino_t newinode, const dev_t newdevice)
52551 +{
52552 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
52553 + struct inodev_entry *match;
52554 +
52555 + match = inodev_set.i_hash[index];
52556 +
52557 + while (match && (match->nentry->inode != oldinode ||
52558 + match->nentry->device != olddevice || !match->nentry->deleted))
52559 + match = match->next;
52560 +
52561 + if (match && (match->nentry->inode == oldinode)
52562 + && (match->nentry->device == olddevice) &&
52563 + match->nentry->deleted) {
52564 + if (match->prev == NULL) {
52565 + inodev_set.i_hash[index] = match->next;
52566 + if (match->next != NULL)
52567 + match->next->prev = NULL;
52568 + } else {
52569 + match->prev->next = match->next;
52570 + if (match->next != NULL)
52571 + match->next->prev = match->prev;
52572 + }
52573 + match->prev = NULL;
52574 + match->next = NULL;
52575 + match->nentry->inode = newinode;
52576 + match->nentry->device = newdevice;
52577 + match->nentry->deleted = 0;
52578 +
52579 + insert_inodev_entry(match);
52580 + }
52581 +
52582 + return;
52583 +}
52584 +
52585 +static void
52586 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
52587 +{
52588 + struct acl_subject_label *subj;
52589 + struct acl_role_label *role;
52590 + unsigned int x;
52591 +
52592 + FOR_EACH_ROLE_START(role)
52593 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
52594 +
52595 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
52596 + if ((subj->inode == ino) && (subj->device == dev)) {
52597 + subj->inode = ino;
52598 + subj->device = dev;
52599 + }
52600 + FOR_EACH_NESTED_SUBJECT_END(subj)
52601 + FOR_EACH_SUBJECT_START(role, subj, x)
52602 + update_acl_obj_label(matchn->inode, matchn->device,
52603 + ino, dev, subj);
52604 + FOR_EACH_SUBJECT_END(subj,x)
52605 + FOR_EACH_ROLE_END(role)
52606 +
52607 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
52608 +
52609 + return;
52610 +}
52611 +
52612 +static void
52613 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
52614 + const struct vfsmount *mnt)
52615 +{
52616 + ino_t ino = dentry->d_inode->i_ino;
52617 + dev_t dev = __get_dev(dentry);
52618 +
52619 + __do_handle_create(matchn, ino, dev);
52620 +
52621 + return;
52622 +}
52623 +
52624 +void
52625 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
52626 +{
52627 + struct name_entry *matchn;
52628 +
52629 + if (unlikely(!(gr_status & GR_READY)))
52630 + return;
52631 +
52632 + preempt_disable();
52633 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
52634 +
52635 + if (unlikely((unsigned long)matchn)) {
52636 + write_lock(&gr_inode_lock);
52637 + do_handle_create(matchn, dentry, mnt);
52638 + write_unlock(&gr_inode_lock);
52639 + }
52640 + preempt_enable();
52641 +
52642 + return;
52643 +}
52644 +
52645 +void
52646 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
52647 +{
52648 + struct name_entry *matchn;
52649 +
52650 + if (unlikely(!(gr_status & GR_READY)))
52651 + return;
52652 +
52653 + preempt_disable();
52654 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
52655 +
52656 + if (unlikely((unsigned long)matchn)) {
52657 + write_lock(&gr_inode_lock);
52658 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
52659 + write_unlock(&gr_inode_lock);
52660 + }
52661 + preempt_enable();
52662 +
52663 + return;
52664 +}
52665 +
52666 +void
52667 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
52668 + struct dentry *old_dentry,
52669 + struct dentry *new_dentry,
52670 + struct vfsmount *mnt, const __u8 replace)
52671 +{
52672 + struct name_entry *matchn;
52673 + struct inodev_entry *inodev;
52674 + struct inode *inode = new_dentry->d_inode;
52675 + ino_t old_ino = old_dentry->d_inode->i_ino;
52676 + dev_t old_dev = __get_dev(old_dentry);
52677 +
52678 + /* vfs_rename swaps the name and parent link for old_dentry and
52679 + new_dentry
52680 + at this point, old_dentry has the new name, parent link, and inode
52681 + for the renamed file
52682 + if a file is being replaced by a rename, new_dentry has the inode
52683 + and name for the replaced file
52684 + */
52685 +
52686 + if (unlikely(!(gr_status & GR_READY)))
52687 + return;
52688 +
52689 + preempt_disable();
52690 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
52691 +
52692 + /* we wouldn't have to check d_inode if it weren't for
52693 + NFS silly-renaming
52694 + */
52695 +
52696 + write_lock(&gr_inode_lock);
52697 + if (unlikely(replace && inode)) {
52698 + ino_t new_ino = inode->i_ino;
52699 + dev_t new_dev = __get_dev(new_dentry);
52700 +
52701 + inodev = lookup_inodev_entry(new_ino, new_dev);
52702 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
52703 + do_handle_delete(inodev, new_ino, new_dev);
52704 + }
52705 +
52706 + inodev = lookup_inodev_entry(old_ino, old_dev);
52707 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
52708 + do_handle_delete(inodev, old_ino, old_dev);
52709 +
52710 + if (unlikely((unsigned long)matchn))
52711 + do_handle_create(matchn, old_dentry, mnt);
52712 +
52713 + write_unlock(&gr_inode_lock);
52714 + preempt_enable();
52715 +
52716 + return;
52717 +}
52718 +
52719 +static int
52720 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
52721 + unsigned char **sum)
52722 +{
52723 + struct acl_role_label *r;
52724 + struct role_allowed_ip *ipp;
52725 + struct role_transition *trans;
52726 + unsigned int i;
52727 + int found = 0;
52728 + u32 curr_ip = current->signal->curr_ip;
52729 +
52730 + current->signal->saved_ip = curr_ip;
52731 +
52732 + /* check transition table */
52733 +
52734 + for (trans = current->role->transitions; trans; trans = trans->next) {
52735 + if (!strcmp(rolename, trans->rolename)) {
52736 + found = 1;
52737 + break;
52738 + }
52739 + }
52740 +
52741 + if (!found)
52742 + return 0;
52743 +
52744 + /* handle special roles that do not require authentication
52745 + and check ip */
52746 +
52747 + FOR_EACH_ROLE_START(r)
52748 + if (!strcmp(rolename, r->rolename) &&
52749 + (r->roletype & GR_ROLE_SPECIAL)) {
52750 + found = 0;
52751 + if (r->allowed_ips != NULL) {
52752 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
52753 + if ((ntohl(curr_ip) & ipp->netmask) ==
52754 + (ntohl(ipp->addr) & ipp->netmask))
52755 + found = 1;
52756 + }
52757 + } else
52758 + found = 2;
52759 + if (!found)
52760 + return 0;
52761 +
52762 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
52763 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
52764 + *salt = NULL;
52765 + *sum = NULL;
52766 + return 1;
52767 + }
52768 + }
52769 + FOR_EACH_ROLE_END(r)
52770 +
52771 + for (i = 0; i < num_sprole_pws; i++) {
52772 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
52773 + *salt = acl_special_roles[i]->salt;
52774 + *sum = acl_special_roles[i]->sum;
52775 + return 1;
52776 + }
52777 + }
52778 +
52779 + return 0;
52780 +}
52781 +
52782 +static void
52783 +assign_special_role(char *rolename)
52784 +{
52785 + struct acl_object_label *obj;
52786 + struct acl_role_label *r;
52787 + struct acl_role_label *assigned = NULL;
52788 + struct task_struct *tsk;
52789 + struct file *filp;
52790 +
52791 + FOR_EACH_ROLE_START(r)
52792 + if (!strcmp(rolename, r->rolename) &&
52793 + (r->roletype & GR_ROLE_SPECIAL)) {
52794 + assigned = r;
52795 + break;
52796 + }
52797 + FOR_EACH_ROLE_END(r)
52798 +
52799 + if (!assigned)
52800 + return;
52801 +
52802 + read_lock(&tasklist_lock);
52803 + read_lock(&grsec_exec_file_lock);
52804 +
52805 + tsk = current->real_parent;
52806 + if (tsk == NULL)
52807 + goto out_unlock;
52808 +
52809 + filp = tsk->exec_file;
52810 + if (filp == NULL)
52811 + goto out_unlock;
52812 +
52813 + tsk->is_writable = 0;
52814 +
52815 + tsk->acl_sp_role = 1;
52816 + tsk->acl_role_id = ++acl_sp_role_value;
52817 + tsk->role = assigned;
52818 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
52819 +
52820 + /* ignore additional mmap checks for processes that are writable
52821 + by the default ACL */
52822 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52823 + if (unlikely(obj->mode & GR_WRITE))
52824 + tsk->is_writable = 1;
52825 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
52826 + if (unlikely(obj->mode & GR_WRITE))
52827 + tsk->is_writable = 1;
52828 +
52829 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52830 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
52831 +#endif
52832 +
52833 +out_unlock:
52834 + read_unlock(&grsec_exec_file_lock);
52835 + read_unlock(&tasklist_lock);
52836 + return;
52837 +}
52838 +
52839 +int gr_check_secure_terminal(struct task_struct *task)
52840 +{
52841 + struct task_struct *p, *p2, *p3;
52842 + struct files_struct *files;
52843 + struct fdtable *fdt;
52844 + struct file *our_file = NULL, *file;
52845 + int i;
52846 +
52847 + if (task->signal->tty == NULL)
52848 + return 1;
52849 +
52850 + files = get_files_struct(task);
52851 + if (files != NULL) {
52852 + rcu_read_lock();
52853 + fdt = files_fdtable(files);
52854 + for (i=0; i < fdt->max_fds; i++) {
52855 + file = fcheck_files(files, i);
52856 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
52857 + get_file(file);
52858 + our_file = file;
52859 + }
52860 + }
52861 + rcu_read_unlock();
52862 + put_files_struct(files);
52863 + }
52864 +
52865 + if (our_file == NULL)
52866 + return 1;
52867 +
52868 + read_lock(&tasklist_lock);
52869 + do_each_thread(p2, p) {
52870 + files = get_files_struct(p);
52871 + if (files == NULL ||
52872 + (p->signal && p->signal->tty == task->signal->tty)) {
52873 + if (files != NULL)
52874 + put_files_struct(files);
52875 + continue;
52876 + }
52877 + rcu_read_lock();
52878 + fdt = files_fdtable(files);
52879 + for (i=0; i < fdt->max_fds; i++) {
52880 + file = fcheck_files(files, i);
52881 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
52882 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
52883 + p3 = task;
52884 + while (p3->pid > 0) {
52885 + if (p3 == p)
52886 + break;
52887 + p3 = p3->real_parent;
52888 + }
52889 + if (p3 == p)
52890 + break;
52891 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
52892 + gr_handle_alertkill(p);
52893 + rcu_read_unlock();
52894 + put_files_struct(files);
52895 + read_unlock(&tasklist_lock);
52896 + fput(our_file);
52897 + return 0;
52898 + }
52899 + }
52900 + rcu_read_unlock();
52901 + put_files_struct(files);
52902 + } while_each_thread(p2, p);
52903 + read_unlock(&tasklist_lock);
52904 +
52905 + fput(our_file);
52906 + return 1;
52907 +}
52908 +
52909 +ssize_t
52910 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
52911 +{
52912 + struct gr_arg_wrapper uwrap;
52913 + unsigned char *sprole_salt = NULL;
52914 + unsigned char *sprole_sum = NULL;
52915 + int error = sizeof (struct gr_arg_wrapper);
52916 + int error2 = 0;
52917 +
52918 + mutex_lock(&gr_dev_mutex);
52919 +
52920 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
52921 + error = -EPERM;
52922 + goto out;
52923 + }
52924 +
52925 + if (count != sizeof (struct gr_arg_wrapper)) {
52926 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
52927 + error = -EINVAL;
52928 + goto out;
52929 + }
52930 +
52931 +
52932 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
52933 + gr_auth_expires = 0;
52934 + gr_auth_attempts = 0;
52935 + }
52936 +
52937 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
52938 + error = -EFAULT;
52939 + goto out;
52940 + }
52941 +
52942 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
52943 + error = -EINVAL;
52944 + goto out;
52945 + }
52946 +
52947 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
52948 + error = -EFAULT;
52949 + goto out;
52950 + }
52951 +
52952 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
52953 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
52954 + time_after(gr_auth_expires, get_seconds())) {
52955 + error = -EBUSY;
52956 + goto out;
52957 + }
52958 +
52959 + /* if non-root trying to do anything other than use a special role,
52960 + do not attempt authentication, do not count towards authentication
52961 + locking
52962 + */
52963 +
52964 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
52965 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
52966 + current_uid()) {
52967 + error = -EPERM;
52968 + goto out;
52969 + }
52970 +
52971 + /* ensure pw and special role name are null terminated */
52972 +
52973 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
52974 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
52975 +
52976 + /* Okay.
52977 + * We have our enough of the argument structure..(we have yet
52978 + * to copy_from_user the tables themselves) . Copy the tables
52979 + * only if we need them, i.e. for loading operations. */
52980 +
52981 + switch (gr_usermode->mode) {
52982 + case GR_STATUS:
52983 + if (gr_status & GR_READY) {
52984 + error = 1;
52985 + if (!gr_check_secure_terminal(current))
52986 + error = 3;
52987 + } else
52988 + error = 2;
52989 + goto out;
52990 + case GR_SHUTDOWN:
52991 + if ((gr_status & GR_READY)
52992 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
52993 + pax_open_kernel();
52994 + gr_status &= ~GR_READY;
52995 + pax_close_kernel();
52996 +
52997 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
52998 + free_variables();
52999 + memset(gr_usermode, 0, sizeof (struct gr_arg));
53000 + memset(gr_system_salt, 0, GR_SALT_LEN);
53001 + memset(gr_system_sum, 0, GR_SHA_LEN);
53002 + } else if (gr_status & GR_READY) {
53003 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
53004 + error = -EPERM;
53005 + } else {
53006 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
53007 + error = -EAGAIN;
53008 + }
53009 + break;
53010 + case GR_ENABLE:
53011 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
53012 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
53013 + else {
53014 + if (gr_status & GR_READY)
53015 + error = -EAGAIN;
53016 + else
53017 + error = error2;
53018 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
53019 + }
53020 + break;
53021 + case GR_RELOAD:
53022 + if (!(gr_status & GR_READY)) {
53023 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
53024 + error = -EAGAIN;
53025 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53026 + preempt_disable();
53027 +
53028 + pax_open_kernel();
53029 + gr_status &= ~GR_READY;
53030 + pax_close_kernel();
53031 +
53032 + free_variables();
53033 + if (!(error2 = gracl_init(gr_usermode))) {
53034 + preempt_enable();
53035 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
53036 + } else {
53037 + preempt_enable();
53038 + error = error2;
53039 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53040 + }
53041 + } else {
53042 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53043 + error = -EPERM;
53044 + }
53045 + break;
53046 + case GR_SEGVMOD:
53047 + if (unlikely(!(gr_status & GR_READY))) {
53048 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
53049 + error = -EAGAIN;
53050 + break;
53051 + }
53052 +
53053 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53054 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
53055 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
53056 + struct acl_subject_label *segvacl;
53057 + segvacl =
53058 + lookup_acl_subj_label(gr_usermode->segv_inode,
53059 + gr_usermode->segv_device,
53060 + current->role);
53061 + if (segvacl) {
53062 + segvacl->crashes = 0;
53063 + segvacl->expires = 0;
53064 + }
53065 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
53066 + gr_remove_uid(gr_usermode->segv_uid);
53067 + }
53068 + } else {
53069 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
53070 + error = -EPERM;
53071 + }
53072 + break;
53073 + case GR_SPROLE:
53074 + case GR_SPROLEPAM:
53075 + if (unlikely(!(gr_status & GR_READY))) {
53076 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
53077 + error = -EAGAIN;
53078 + break;
53079 + }
53080 +
53081 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
53082 + current->role->expires = 0;
53083 + current->role->auth_attempts = 0;
53084 + }
53085 +
53086 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53087 + time_after(current->role->expires, get_seconds())) {
53088 + error = -EBUSY;
53089 + goto out;
53090 + }
53091 +
53092 + if (lookup_special_role_auth
53093 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
53094 + && ((!sprole_salt && !sprole_sum)
53095 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
53096 + char *p = "";
53097 + assign_special_role(gr_usermode->sp_role);
53098 + read_lock(&tasklist_lock);
53099 + if (current->real_parent)
53100 + p = current->real_parent->role->rolename;
53101 + read_unlock(&tasklist_lock);
53102 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
53103 + p, acl_sp_role_value);
53104 + } else {
53105 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
53106 + error = -EPERM;
53107 + if(!(current->role->auth_attempts++))
53108 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53109 +
53110 + goto out;
53111 + }
53112 + break;
53113 + case GR_UNSPROLE:
53114 + if (unlikely(!(gr_status & GR_READY))) {
53115 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
53116 + error = -EAGAIN;
53117 + break;
53118 + }
53119 +
53120 + if (current->role->roletype & GR_ROLE_SPECIAL) {
53121 + char *p = "";
53122 + int i = 0;
53123 +
53124 + read_lock(&tasklist_lock);
53125 + if (current->real_parent) {
53126 + p = current->real_parent->role->rolename;
53127 + i = current->real_parent->acl_role_id;
53128 + }
53129 + read_unlock(&tasklist_lock);
53130 +
53131 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
53132 + gr_set_acls(1);
53133 + } else {
53134 + error = -EPERM;
53135 + goto out;
53136 + }
53137 + break;
53138 + default:
53139 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
53140 + error = -EINVAL;
53141 + break;
53142 + }
53143 +
53144 + if (error != -EPERM)
53145 + goto out;
53146 +
53147 + if(!(gr_auth_attempts++))
53148 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53149 +
53150 + out:
53151 + mutex_unlock(&gr_dev_mutex);
53152 + return error;
53153 +}
53154 +
53155 +/* must be called with
53156 + rcu_read_lock();
53157 + read_lock(&tasklist_lock);
53158 + read_lock(&grsec_exec_file_lock);
53159 +*/
53160 +int gr_apply_subject_to_task(struct task_struct *task)
53161 +{
53162 + struct acl_object_label *obj;
53163 + char *tmpname;
53164 + struct acl_subject_label *tmpsubj;
53165 + struct file *filp;
53166 + struct name_entry *nmatch;
53167 +
53168 + filp = task->exec_file;
53169 + if (filp == NULL)
53170 + return 0;
53171 +
53172 + /* the following is to apply the correct subject
53173 + on binaries running when the RBAC system
53174 + is enabled, when the binaries have been
53175 + replaced or deleted since their execution
53176 + -----
53177 + when the RBAC system starts, the inode/dev
53178 + from exec_file will be one the RBAC system
53179 + is unaware of. It only knows the inode/dev
53180 + of the present file on disk, or the absence
53181 + of it.
53182 + */
53183 + preempt_disable();
53184 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
53185 +
53186 + nmatch = lookup_name_entry(tmpname);
53187 + preempt_enable();
53188 + tmpsubj = NULL;
53189 + if (nmatch) {
53190 + if (nmatch->deleted)
53191 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
53192 + else
53193 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
53194 + if (tmpsubj != NULL)
53195 + task->acl = tmpsubj;
53196 + }
53197 + if (tmpsubj == NULL)
53198 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
53199 + task->role);
53200 + if (task->acl) {
53201 + task->is_writable = 0;
53202 + /* ignore additional mmap checks for processes that are writable
53203 + by the default ACL */
53204 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53205 + if (unlikely(obj->mode & GR_WRITE))
53206 + task->is_writable = 1;
53207 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53208 + if (unlikely(obj->mode & GR_WRITE))
53209 + task->is_writable = 1;
53210 +
53211 + gr_set_proc_res(task);
53212 +
53213 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53214 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53215 +#endif
53216 + } else {
53217 + return 1;
53218 + }
53219 +
53220 + return 0;
53221 +}
53222 +
53223 +int
53224 +gr_set_acls(const int type)
53225 +{
53226 + struct task_struct *task, *task2;
53227 + struct acl_role_label *role = current->role;
53228 + __u16 acl_role_id = current->acl_role_id;
53229 + const struct cred *cred;
53230 + int ret;
53231 +
53232 + rcu_read_lock();
53233 + read_lock(&tasklist_lock);
53234 + read_lock(&grsec_exec_file_lock);
53235 + do_each_thread(task2, task) {
53236 + /* check to see if we're called from the exit handler,
53237 + if so, only replace ACLs that have inherited the admin
53238 + ACL */
53239 +
53240 + if (type && (task->role != role ||
53241 + task->acl_role_id != acl_role_id))
53242 + continue;
53243 +
53244 + task->acl_role_id = 0;
53245 + task->acl_sp_role = 0;
53246 +
53247 + if (task->exec_file) {
53248 + cred = __task_cred(task);
53249 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
53250 + ret = gr_apply_subject_to_task(task);
53251 + if (ret) {
53252 + read_unlock(&grsec_exec_file_lock);
53253 + read_unlock(&tasklist_lock);
53254 + rcu_read_unlock();
53255 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
53256 + return ret;
53257 + }
53258 + } else {
53259 + // it's a kernel process
53260 + task->role = kernel_role;
53261 + task->acl = kernel_role->root_label;
53262 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
53263 + task->acl->mode &= ~GR_PROCFIND;
53264 +#endif
53265 + }
53266 + } while_each_thread(task2, task);
53267 + read_unlock(&grsec_exec_file_lock);
53268 + read_unlock(&tasklist_lock);
53269 + rcu_read_unlock();
53270 +
53271 + return 0;
53272 +}
53273 +
53274 +void
53275 +gr_learn_resource(const struct task_struct *task,
53276 + const int res, const unsigned long wanted, const int gt)
53277 +{
53278 + struct acl_subject_label *acl;
53279 + const struct cred *cred;
53280 +
53281 + if (unlikely((gr_status & GR_READY) &&
53282 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
53283 + goto skip_reslog;
53284 +
53285 +#ifdef CONFIG_GRKERNSEC_RESLOG
53286 + gr_log_resource(task, res, wanted, gt);
53287 +#endif
53288 + skip_reslog:
53289 +
53290 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
53291 + return;
53292 +
53293 + acl = task->acl;
53294 +
53295 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
53296 + !(acl->resmask & (1 << (unsigned short) res))))
53297 + return;
53298 +
53299 + if (wanted >= acl->res[res].rlim_cur) {
53300 + unsigned long res_add;
53301 +
53302 + res_add = wanted;
53303 + switch (res) {
53304 + case RLIMIT_CPU:
53305 + res_add += GR_RLIM_CPU_BUMP;
53306 + break;
53307 + case RLIMIT_FSIZE:
53308 + res_add += GR_RLIM_FSIZE_BUMP;
53309 + break;
53310 + case RLIMIT_DATA:
53311 + res_add += GR_RLIM_DATA_BUMP;
53312 + break;
53313 + case RLIMIT_STACK:
53314 + res_add += GR_RLIM_STACK_BUMP;
53315 + break;
53316 + case RLIMIT_CORE:
53317 + res_add += GR_RLIM_CORE_BUMP;
53318 + break;
53319 + case RLIMIT_RSS:
53320 + res_add += GR_RLIM_RSS_BUMP;
53321 + break;
53322 + case RLIMIT_NPROC:
53323 + res_add += GR_RLIM_NPROC_BUMP;
53324 + break;
53325 + case RLIMIT_NOFILE:
53326 + res_add += GR_RLIM_NOFILE_BUMP;
53327 + break;
53328 + case RLIMIT_MEMLOCK:
53329 + res_add += GR_RLIM_MEMLOCK_BUMP;
53330 + break;
53331 + case RLIMIT_AS:
53332 + res_add += GR_RLIM_AS_BUMP;
53333 + break;
53334 + case RLIMIT_LOCKS:
53335 + res_add += GR_RLIM_LOCKS_BUMP;
53336 + break;
53337 + case RLIMIT_SIGPENDING:
53338 + res_add += GR_RLIM_SIGPENDING_BUMP;
53339 + break;
53340 + case RLIMIT_MSGQUEUE:
53341 + res_add += GR_RLIM_MSGQUEUE_BUMP;
53342 + break;
53343 + case RLIMIT_NICE:
53344 + res_add += GR_RLIM_NICE_BUMP;
53345 + break;
53346 + case RLIMIT_RTPRIO:
53347 + res_add += GR_RLIM_RTPRIO_BUMP;
53348 + break;
53349 + case RLIMIT_RTTIME:
53350 + res_add += GR_RLIM_RTTIME_BUMP;
53351 + break;
53352 + }
53353 +
53354 + acl->res[res].rlim_cur = res_add;
53355 +
53356 + if (wanted > acl->res[res].rlim_max)
53357 + acl->res[res].rlim_max = res_add;
53358 +
53359 + /* only log the subject filename, since resource logging is supported for
53360 + single-subject learning only */
53361 + rcu_read_lock();
53362 + cred = __task_cred(task);
53363 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
53364 + task->role->roletype, cred->uid, cred->gid, acl->filename,
53365 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
53366 + "", (unsigned long) res, &task->signal->saved_ip);
53367 + rcu_read_unlock();
53368 + }
53369 +
53370 + return;
53371 +}
53372 +
53373 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
53374 +void
53375 +pax_set_initial_flags(struct linux_binprm *bprm)
53376 +{
53377 + struct task_struct *task = current;
53378 + struct acl_subject_label *proc;
53379 + unsigned long flags;
53380 +
53381 + if (unlikely(!(gr_status & GR_READY)))
53382 + return;
53383 +
53384 + flags = pax_get_flags(task);
53385 +
53386 + proc = task->acl;
53387 +
53388 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
53389 + flags &= ~MF_PAX_PAGEEXEC;
53390 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
53391 + flags &= ~MF_PAX_SEGMEXEC;
53392 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
53393 + flags &= ~MF_PAX_RANDMMAP;
53394 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
53395 + flags &= ~MF_PAX_EMUTRAMP;
53396 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
53397 + flags &= ~MF_PAX_MPROTECT;
53398 +
53399 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
53400 + flags |= MF_PAX_PAGEEXEC;
53401 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
53402 + flags |= MF_PAX_SEGMEXEC;
53403 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
53404 + flags |= MF_PAX_RANDMMAP;
53405 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
53406 + flags |= MF_PAX_EMUTRAMP;
53407 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
53408 + flags |= MF_PAX_MPROTECT;
53409 +
53410 + pax_set_flags(task, flags);
53411 +
53412 + return;
53413 +}
53414 +#endif
53415 +
53416 +#ifdef CONFIG_SYSCTL
53417 +/* Eric Biederman likes breaking userland ABI and every inode-based security
53418 + system to save 35kb of memory */
53419 +
53420 +/* we modify the passed in filename, but adjust it back before returning */
53421 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
53422 +{
53423 + struct name_entry *nmatch;
53424 + char *p, *lastp = NULL;
53425 + struct acl_object_label *obj = NULL, *tmp;
53426 + struct acl_subject_label *tmpsubj;
53427 + char c = '\0';
53428 +
53429 + read_lock(&gr_inode_lock);
53430 +
53431 + p = name + len - 1;
53432 + do {
53433 + nmatch = lookup_name_entry(name);
53434 + if (lastp != NULL)
53435 + *lastp = c;
53436 +
53437 + if (nmatch == NULL)
53438 + goto next_component;
53439 + tmpsubj = current->acl;
53440 + do {
53441 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
53442 + if (obj != NULL) {
53443 + tmp = obj->globbed;
53444 + while (tmp) {
53445 + if (!glob_match(tmp->filename, name)) {
53446 + obj = tmp;
53447 + goto found_obj;
53448 + }
53449 + tmp = tmp->next;
53450 + }
53451 + goto found_obj;
53452 + }
53453 + } while ((tmpsubj = tmpsubj->parent_subject));
53454 +next_component:
53455 + /* end case */
53456 + if (p == name)
53457 + break;
53458 +
53459 + while (*p != '/')
53460 + p--;
53461 + if (p == name)
53462 + lastp = p + 1;
53463 + else {
53464 + lastp = p;
53465 + p--;
53466 + }
53467 + c = *lastp;
53468 + *lastp = '\0';
53469 + } while (1);
53470 +found_obj:
53471 + read_unlock(&gr_inode_lock);
53472 + /* obj returned will always be non-null */
53473 + return obj;
53474 +}
53475 +
53476 +/* returns 0 when allowing, non-zero on error
53477 + op of 0 is used for readdir, so we don't log the names of hidden files
53478 +*/
53479 +__u32
53480 +gr_handle_sysctl(const struct ctl_table *table, const int op)
53481 +{
53482 + struct ctl_table *tmp;
53483 + const char *proc_sys = "/proc/sys";
53484 + char *path;
53485 + struct acl_object_label *obj;
53486 + unsigned short len = 0, pos = 0, depth = 0, i;
53487 + __u32 err = 0;
53488 + __u32 mode = 0;
53489 +
53490 + if (unlikely(!(gr_status & GR_READY)))
53491 + return 0;
53492 +
53493 + /* for now, ignore operations on non-sysctl entries if it's not a
53494 + readdir*/
53495 + if (table->child != NULL && op != 0)
53496 + return 0;
53497 +
53498 + mode |= GR_FIND;
53499 + /* it's only a read if it's an entry, read on dirs is for readdir */
53500 + if (op & MAY_READ)
53501 + mode |= GR_READ;
53502 + if (op & MAY_WRITE)
53503 + mode |= GR_WRITE;
53504 +
53505 + preempt_disable();
53506 +
53507 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
53508 +
53509 + /* it's only a read/write if it's an actual entry, not a dir
53510 + (which are opened for readdir)
53511 + */
53512 +
53513 + /* convert the requested sysctl entry into a pathname */
53514 +
53515 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
53516 + len += strlen(tmp->procname);
53517 + len++;
53518 + depth++;
53519 + }
53520 +
53521 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
53522 + /* deny */
53523 + goto out;
53524 + }
53525 +
53526 + memset(path, 0, PAGE_SIZE);
53527 +
53528 + memcpy(path, proc_sys, strlen(proc_sys));
53529 +
53530 + pos += strlen(proc_sys);
53531 +
53532 + for (; depth > 0; depth--) {
53533 + path[pos] = '/';
53534 + pos++;
53535 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
53536 + if (depth == i) {
53537 + memcpy(path + pos, tmp->procname,
53538 + strlen(tmp->procname));
53539 + pos += strlen(tmp->procname);
53540 + }
53541 + i++;
53542 + }
53543 + }
53544 +
53545 + obj = gr_lookup_by_name(path, pos);
53546 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
53547 +
53548 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
53549 + ((err & mode) != mode))) {
53550 + __u32 new_mode = mode;
53551 +
53552 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
53553 +
53554 + err = 0;
53555 + gr_log_learn_sysctl(path, new_mode);
53556 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
53557 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
53558 + err = -ENOENT;
53559 + } else if (!(err & GR_FIND)) {
53560 + err = -ENOENT;
53561 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
53562 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
53563 + path, (mode & GR_READ) ? " reading" : "",
53564 + (mode & GR_WRITE) ? " writing" : "");
53565 + err = -EACCES;
53566 + } else if ((err & mode) != mode) {
53567 + err = -EACCES;
53568 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
53569 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
53570 + path, (mode & GR_READ) ? " reading" : "",
53571 + (mode & GR_WRITE) ? " writing" : "");
53572 + err = 0;
53573 + } else
53574 + err = 0;
53575 +
53576 + out:
53577 + preempt_enable();
53578 +
53579 + return err;
53580 +}
53581 +#endif
53582 +
53583 +int
53584 +gr_handle_proc_ptrace(struct task_struct *task)
53585 +{
53586 + struct file *filp;
53587 + struct task_struct *tmp = task;
53588 + struct task_struct *curtemp = current;
53589 + __u32 retmode;
53590 +
53591 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
53592 + if (unlikely(!(gr_status & GR_READY)))
53593 + return 0;
53594 +#endif
53595 +
53596 + read_lock(&tasklist_lock);
53597 + read_lock(&grsec_exec_file_lock);
53598 + filp = task->exec_file;
53599 +
53600 + while (tmp->pid > 0) {
53601 + if (tmp == curtemp)
53602 + break;
53603 + tmp = tmp->real_parent;
53604 + }
53605 +
53606 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
53607 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
53608 + read_unlock(&grsec_exec_file_lock);
53609 + read_unlock(&tasklist_lock);
53610 + return 1;
53611 + }
53612 +
53613 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53614 + if (!(gr_status & GR_READY)) {
53615 + read_unlock(&grsec_exec_file_lock);
53616 + read_unlock(&tasklist_lock);
53617 + return 0;
53618 + }
53619 +#endif
53620 +
53621 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
53622 + read_unlock(&grsec_exec_file_lock);
53623 + read_unlock(&tasklist_lock);
53624 +
53625 + if (retmode & GR_NOPTRACE)
53626 + return 1;
53627 +
53628 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
53629 + && (current->acl != task->acl || (current->acl != current->role->root_label
53630 + && current->pid != task->pid)))
53631 + return 1;
53632 +
53633 + return 0;
53634 +}
53635 +
53636 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
53637 +{
53638 + if (unlikely(!(gr_status & GR_READY)))
53639 + return;
53640 +
53641 + if (!(current->role->roletype & GR_ROLE_GOD))
53642 + return;
53643 +
53644 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
53645 + p->role->rolename, gr_task_roletype_to_char(p),
53646 + p->acl->filename);
53647 +}
53648 +
53649 +int
53650 +gr_handle_ptrace(struct task_struct *task, const long request)
53651 +{
53652 + struct task_struct *tmp = task;
53653 + struct task_struct *curtemp = current;
53654 + __u32 retmode;
53655 +
53656 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
53657 + if (unlikely(!(gr_status & GR_READY)))
53658 + return 0;
53659 +#endif
53660 +
53661 + read_lock(&tasklist_lock);
53662 + while (tmp->pid > 0) {
53663 + if (tmp == curtemp)
53664 + break;
53665 + tmp = tmp->real_parent;
53666 + }
53667 +
53668 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
53669 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
53670 + read_unlock(&tasklist_lock);
53671 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53672 + return 1;
53673 + }
53674 + read_unlock(&tasklist_lock);
53675 +
53676 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53677 + if (!(gr_status & GR_READY))
53678 + return 0;
53679 +#endif
53680 +
53681 + read_lock(&grsec_exec_file_lock);
53682 + if (unlikely(!task->exec_file)) {
53683 + read_unlock(&grsec_exec_file_lock);
53684 + return 0;
53685 + }
53686 +
53687 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
53688 + read_unlock(&grsec_exec_file_lock);
53689 +
53690 + if (retmode & GR_NOPTRACE) {
53691 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53692 + return 1;
53693 + }
53694 +
53695 + if (retmode & GR_PTRACERD) {
53696 + switch (request) {
53697 + case PTRACE_SEIZE:
53698 + case PTRACE_POKETEXT:
53699 + case PTRACE_POKEDATA:
53700 + case PTRACE_POKEUSR:
53701 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
53702 + case PTRACE_SETREGS:
53703 + case PTRACE_SETFPREGS:
53704 +#endif
53705 +#ifdef CONFIG_X86
53706 + case PTRACE_SETFPXREGS:
53707 +#endif
53708 +#ifdef CONFIG_ALTIVEC
53709 + case PTRACE_SETVRREGS:
53710 +#endif
53711 + return 1;
53712 + default:
53713 + return 0;
53714 + }
53715 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
53716 + !(current->role->roletype & GR_ROLE_GOD) &&
53717 + (current->acl != task->acl)) {
53718 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53719 + return 1;
53720 + }
53721 +
53722 + return 0;
53723 +}
53724 +
53725 +static int is_writable_mmap(const struct file *filp)
53726 +{
53727 + struct task_struct *task = current;
53728 + struct acl_object_label *obj, *obj2;
53729 +
53730 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
53731 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
53732 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53733 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
53734 + task->role->root_label);
53735 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
53736 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
53737 + return 1;
53738 + }
53739 + }
53740 + return 0;
53741 +}
53742 +
53743 +int
53744 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
53745 +{
53746 + __u32 mode;
53747 +
53748 + if (unlikely(!file || !(prot & PROT_EXEC)))
53749 + return 1;
53750 +
53751 + if (is_writable_mmap(file))
53752 + return 0;
53753 +
53754 + mode =
53755 + gr_search_file(file->f_path.dentry,
53756 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
53757 + file->f_path.mnt);
53758 +
53759 + if (!gr_tpe_allow(file))
53760 + return 0;
53761 +
53762 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
53763 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53764 + return 0;
53765 + } else if (unlikely(!(mode & GR_EXEC))) {
53766 + return 0;
53767 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
53768 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53769 + return 1;
53770 + }
53771 +
53772 + return 1;
53773 +}
53774 +
53775 +int
53776 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53777 +{
53778 + __u32 mode;
53779 +
53780 + if (unlikely(!file || !(prot & PROT_EXEC)))
53781 + return 1;
53782 +
53783 + if (is_writable_mmap(file))
53784 + return 0;
53785 +
53786 + mode =
53787 + gr_search_file(file->f_path.dentry,
53788 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
53789 + file->f_path.mnt);
53790 +
53791 + if (!gr_tpe_allow(file))
53792 + return 0;
53793 +
53794 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
53795 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53796 + return 0;
53797 + } else if (unlikely(!(mode & GR_EXEC))) {
53798 + return 0;
53799 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
53800 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53801 + return 1;
53802 + }
53803 +
53804 + return 1;
53805 +}
53806 +
53807 +void
53808 +gr_acl_handle_psacct(struct task_struct *task, const long code)
53809 +{
53810 + unsigned long runtime;
53811 + unsigned long cputime;
53812 + unsigned int wday, cday;
53813 + __u8 whr, chr;
53814 + __u8 wmin, cmin;
53815 + __u8 wsec, csec;
53816 + struct timespec timeval;
53817 +
53818 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
53819 + !(task->acl->mode & GR_PROCACCT)))
53820 + return;
53821 +
53822 + do_posix_clock_monotonic_gettime(&timeval);
53823 + runtime = timeval.tv_sec - task->start_time.tv_sec;
53824 + wday = runtime / (3600 * 24);
53825 + runtime -= wday * (3600 * 24);
53826 + whr = runtime / 3600;
53827 + runtime -= whr * 3600;
53828 + wmin = runtime / 60;
53829 + runtime -= wmin * 60;
53830 + wsec = runtime;
53831 +
53832 + cputime = (task->utime + task->stime) / HZ;
53833 + cday = cputime / (3600 * 24);
53834 + cputime -= cday * (3600 * 24);
53835 + chr = cputime / 3600;
53836 + cputime -= chr * 3600;
53837 + cmin = cputime / 60;
53838 + cputime -= cmin * 60;
53839 + csec = cputime;
53840 +
53841 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
53842 +
53843 + return;
53844 +}
53845 +
53846 +void gr_set_kernel_label(struct task_struct *task)
53847 +{
53848 + if (gr_status & GR_READY) {
53849 + task->role = kernel_role;
53850 + task->acl = kernel_role->root_label;
53851 + }
53852 + return;
53853 +}
53854 +
53855 +#ifdef CONFIG_TASKSTATS
53856 +int gr_is_taskstats_denied(int pid)
53857 +{
53858 + struct task_struct *task;
53859 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53860 + const struct cred *cred;
53861 +#endif
53862 + int ret = 0;
53863 +
53864 + /* restrict taskstats viewing to un-chrooted root users
53865 + who have the 'view' subject flag if the RBAC system is enabled
53866 + */
53867 +
53868 + rcu_read_lock();
53869 + read_lock(&tasklist_lock);
53870 + task = find_task_by_vpid(pid);
53871 + if (task) {
53872 +#ifdef CONFIG_GRKERNSEC_CHROOT
53873 + if (proc_is_chrooted(task))
53874 + ret = -EACCES;
53875 +#endif
53876 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53877 + cred = __task_cred(task);
53878 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53879 + if (cred->uid != 0)
53880 + ret = -EACCES;
53881 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53882 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
53883 + ret = -EACCES;
53884 +#endif
53885 +#endif
53886 + if (gr_status & GR_READY) {
53887 + if (!(task->acl->mode & GR_VIEW))
53888 + ret = -EACCES;
53889 + }
53890 + } else
53891 + ret = -ENOENT;
53892 +
53893 + read_unlock(&tasklist_lock);
53894 + rcu_read_unlock();
53895 +
53896 + return ret;
53897 +}
53898 +#endif
53899 +
53900 +/* AUXV entries are filled via a descendant of search_binary_handler
53901 + after we've already applied the subject for the target
53902 +*/
53903 +int gr_acl_enable_at_secure(void)
53904 +{
53905 + if (unlikely(!(gr_status & GR_READY)))
53906 + return 0;
53907 +
53908 + if (current->acl->mode & GR_ATSECURE)
53909 + return 1;
53910 +
53911 + return 0;
53912 +}
53913 +
53914 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
53915 +{
53916 + struct task_struct *task = current;
53917 + struct dentry *dentry = file->f_path.dentry;
53918 + struct vfsmount *mnt = file->f_path.mnt;
53919 + struct acl_object_label *obj, *tmp;
53920 + struct acl_subject_label *subj;
53921 + unsigned int bufsize;
53922 + int is_not_root;
53923 + char *path;
53924 + dev_t dev = __get_dev(dentry);
53925 +
53926 + if (unlikely(!(gr_status & GR_READY)))
53927 + return 1;
53928 +
53929 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53930 + return 1;
53931 +
53932 + /* ignore Eric Biederman */
53933 + if (IS_PRIVATE(dentry->d_inode))
53934 + return 1;
53935 +
53936 + subj = task->acl;
53937 + do {
53938 + obj = lookup_acl_obj_label(ino, dev, subj);
53939 + if (obj != NULL)
53940 + return (obj->mode & GR_FIND) ? 1 : 0;
53941 + } while ((subj = subj->parent_subject));
53942 +
53943 + /* this is purely an optimization since we're looking for an object
53944 + for the directory we're doing a readdir on
53945 + if it's possible for any globbed object to match the entry we're
53946 + filling into the directory, then the object we find here will be
53947 + an anchor point with attached globbed objects
53948 + */
53949 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
53950 + if (obj->globbed == NULL)
53951 + return (obj->mode & GR_FIND) ? 1 : 0;
53952 +
53953 + is_not_root = ((obj->filename[0] == '/') &&
53954 + (obj->filename[1] == '\0')) ? 0 : 1;
53955 + bufsize = PAGE_SIZE - namelen - is_not_root;
53956 +
53957 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
53958 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
53959 + return 1;
53960 +
53961 + preempt_disable();
53962 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
53963 + bufsize);
53964 +
53965 + bufsize = strlen(path);
53966 +
53967 + /* if base is "/", don't append an additional slash */
53968 + if (is_not_root)
53969 + *(path + bufsize) = '/';
53970 + memcpy(path + bufsize + is_not_root, name, namelen);
53971 + *(path + bufsize + namelen + is_not_root) = '\0';
53972 +
53973 + tmp = obj->globbed;
53974 + while (tmp) {
53975 + if (!glob_match(tmp->filename, path)) {
53976 + preempt_enable();
53977 + return (tmp->mode & GR_FIND) ? 1 : 0;
53978 + }
53979 + tmp = tmp->next;
53980 + }
53981 + preempt_enable();
53982 + return (obj->mode & GR_FIND) ? 1 : 0;
53983 +}
53984 +
53985 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
53986 +EXPORT_SYMBOL(gr_acl_is_enabled);
53987 +#endif
53988 +EXPORT_SYMBOL(gr_learn_resource);
53989 +EXPORT_SYMBOL(gr_set_kernel_label);
53990 +#ifdef CONFIG_SECURITY
53991 +EXPORT_SYMBOL(gr_check_user_change);
53992 +EXPORT_SYMBOL(gr_check_group_change);
53993 +#endif
53994 +
53995 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
53996 new file mode 100644
53997 index 0000000..34fefda
53998 --- /dev/null
53999 +++ b/grsecurity/gracl_alloc.c
54000 @@ -0,0 +1,105 @@
54001 +#include <linux/kernel.h>
54002 +#include <linux/mm.h>
54003 +#include <linux/slab.h>
54004 +#include <linux/vmalloc.h>
54005 +#include <linux/gracl.h>
54006 +#include <linux/grsecurity.h>
54007 +
54008 +static unsigned long alloc_stack_next = 1;
54009 +static unsigned long alloc_stack_size = 1;
54010 +static void **alloc_stack;
54011 +
54012 +static __inline__ int
54013 +alloc_pop(void)
54014 +{
54015 + if (alloc_stack_next == 1)
54016 + return 0;
54017 +
54018 + kfree(alloc_stack[alloc_stack_next - 2]);
54019 +
54020 + alloc_stack_next--;
54021 +
54022 + return 1;
54023 +}
54024 +
54025 +static __inline__ int
54026 +alloc_push(void *buf)
54027 +{
54028 + if (alloc_stack_next >= alloc_stack_size)
54029 + return 1;
54030 +
54031 + alloc_stack[alloc_stack_next - 1] = buf;
54032 +
54033 + alloc_stack_next++;
54034 +
54035 + return 0;
54036 +}
54037 +
54038 +void *
54039 +acl_alloc(unsigned long len)
54040 +{
54041 + void *ret = NULL;
54042 +
54043 + if (!len || len > PAGE_SIZE)
54044 + goto out;
54045 +
54046 + ret = kmalloc(len, GFP_KERNEL);
54047 +
54048 + if (ret) {
54049 + if (alloc_push(ret)) {
54050 + kfree(ret);
54051 + ret = NULL;
54052 + }
54053 + }
54054 +
54055 +out:
54056 + return ret;
54057 +}
54058 +
54059 +void *
54060 +acl_alloc_num(unsigned long num, unsigned long len)
54061 +{
54062 + if (!len || (num > (PAGE_SIZE / len)))
54063 + return NULL;
54064 +
54065 + return acl_alloc(num * len);
54066 +}
54067 +
54068 +void
54069 +acl_free_all(void)
54070 +{
54071 + if (gr_acl_is_enabled() || !alloc_stack)
54072 + return;
54073 +
54074 + while (alloc_pop()) ;
54075 +
54076 + if (alloc_stack) {
54077 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
54078 + kfree(alloc_stack);
54079 + else
54080 + vfree(alloc_stack);
54081 + }
54082 +
54083 + alloc_stack = NULL;
54084 + alloc_stack_size = 1;
54085 + alloc_stack_next = 1;
54086 +
54087 + return;
54088 +}
54089 +
54090 +int
54091 +acl_alloc_stack_init(unsigned long size)
54092 +{
54093 + if ((size * sizeof (void *)) <= PAGE_SIZE)
54094 + alloc_stack =
54095 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
54096 + else
54097 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
54098 +
54099 + alloc_stack_size = size;
54100 +
54101 + if (!alloc_stack)
54102 + return 0;
54103 + else
54104 + return 1;
54105 +}
54106 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
54107 new file mode 100644
54108 index 0000000..955ddfb
54109 --- /dev/null
54110 +++ b/grsecurity/gracl_cap.c
54111 @@ -0,0 +1,101 @@
54112 +#include <linux/kernel.h>
54113 +#include <linux/module.h>
54114 +#include <linux/sched.h>
54115 +#include <linux/gracl.h>
54116 +#include <linux/grsecurity.h>
54117 +#include <linux/grinternal.h>
54118 +
54119 +extern const char *captab_log[];
54120 +extern int captab_log_entries;
54121 +
54122 +int
54123 +gr_acl_is_capable(const int cap)
54124 +{
54125 + struct task_struct *task = current;
54126 + const struct cred *cred = current_cred();
54127 + struct acl_subject_label *curracl;
54128 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54129 + kernel_cap_t cap_audit = __cap_empty_set;
54130 +
54131 + if (!gr_acl_is_enabled())
54132 + return 1;
54133 +
54134 + curracl = task->acl;
54135 +
54136 + cap_drop = curracl->cap_lower;
54137 + cap_mask = curracl->cap_mask;
54138 + cap_audit = curracl->cap_invert_audit;
54139 +
54140 + while ((curracl = curracl->parent_subject)) {
54141 + /* if the cap isn't specified in the current computed mask but is specified in the
54142 + current level subject, and is lowered in the current level subject, then add
54143 + it to the set of dropped capabilities
54144 + otherwise, add the current level subject's mask to the current computed mask
54145 + */
54146 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54147 + cap_raise(cap_mask, cap);
54148 + if (cap_raised(curracl->cap_lower, cap))
54149 + cap_raise(cap_drop, cap);
54150 + if (cap_raised(curracl->cap_invert_audit, cap))
54151 + cap_raise(cap_audit, cap);
54152 + }
54153 + }
54154 +
54155 + if (!cap_raised(cap_drop, cap)) {
54156 + if (cap_raised(cap_audit, cap))
54157 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
54158 + return 1;
54159 + }
54160 +
54161 + curracl = task->acl;
54162 +
54163 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
54164 + && cap_raised(cred->cap_effective, cap)) {
54165 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54166 + task->role->roletype, cred->uid,
54167 + cred->gid, task->exec_file ?
54168 + gr_to_filename(task->exec_file->f_path.dentry,
54169 + task->exec_file->f_path.mnt) : curracl->filename,
54170 + curracl->filename, 0UL,
54171 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
54172 + return 1;
54173 + }
54174 +
54175 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
54176 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
54177 + return 0;
54178 +}
54179 +
54180 +int
54181 +gr_acl_is_capable_nolog(const int cap)
54182 +{
54183 + struct acl_subject_label *curracl;
54184 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54185 +
54186 + if (!gr_acl_is_enabled())
54187 + return 1;
54188 +
54189 + curracl = current->acl;
54190 +
54191 + cap_drop = curracl->cap_lower;
54192 + cap_mask = curracl->cap_mask;
54193 +
54194 + while ((curracl = curracl->parent_subject)) {
54195 + /* if the cap isn't specified in the current computed mask but is specified in the
54196 + current level subject, and is lowered in the current level subject, then add
54197 + it to the set of dropped capabilities
54198 + otherwise, add the current level subject's mask to the current computed mask
54199 + */
54200 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54201 + cap_raise(cap_mask, cap);
54202 + if (cap_raised(curracl->cap_lower, cap))
54203 + cap_raise(cap_drop, cap);
54204 + }
54205 + }
54206 +
54207 + if (!cap_raised(cap_drop, cap))
54208 + return 1;
54209 +
54210 + return 0;
54211 +}
54212 +
54213 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
54214 new file mode 100644
54215 index 0000000..88d0e87
54216 --- /dev/null
54217 +++ b/grsecurity/gracl_fs.c
54218 @@ -0,0 +1,435 @@
54219 +#include <linux/kernel.h>
54220 +#include <linux/sched.h>
54221 +#include <linux/types.h>
54222 +#include <linux/fs.h>
54223 +#include <linux/file.h>
54224 +#include <linux/stat.h>
54225 +#include <linux/grsecurity.h>
54226 +#include <linux/grinternal.h>
54227 +#include <linux/gracl.h>
54228 +
54229 +umode_t
54230 +gr_acl_umask(void)
54231 +{
54232 + if (unlikely(!gr_acl_is_enabled()))
54233 + return 0;
54234 +
54235 + return current->role->umask;
54236 +}
54237 +
54238 +__u32
54239 +gr_acl_handle_hidden_file(const struct dentry * dentry,
54240 + const struct vfsmount * mnt)
54241 +{
54242 + __u32 mode;
54243 +
54244 + if (unlikely(!dentry->d_inode))
54245 + return GR_FIND;
54246 +
54247 + mode =
54248 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
54249 +
54250 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
54251 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54252 + return mode;
54253 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
54254 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54255 + return 0;
54256 + } else if (unlikely(!(mode & GR_FIND)))
54257 + return 0;
54258 +
54259 + return GR_FIND;
54260 +}
54261 +
54262 +__u32
54263 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54264 + int acc_mode)
54265 +{
54266 + __u32 reqmode = GR_FIND;
54267 + __u32 mode;
54268 +
54269 + if (unlikely(!dentry->d_inode))
54270 + return reqmode;
54271 +
54272 + if (acc_mode & MAY_APPEND)
54273 + reqmode |= GR_APPEND;
54274 + else if (acc_mode & MAY_WRITE)
54275 + reqmode |= GR_WRITE;
54276 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
54277 + reqmode |= GR_READ;
54278 +
54279 + mode =
54280 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54281 + mnt);
54282 +
54283 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54284 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54285 + reqmode & GR_READ ? " reading" : "",
54286 + reqmode & GR_WRITE ? " writing" : reqmode &
54287 + GR_APPEND ? " appending" : "");
54288 + return reqmode;
54289 + } else
54290 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54291 + {
54292 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54293 + reqmode & GR_READ ? " reading" : "",
54294 + reqmode & GR_WRITE ? " writing" : reqmode &
54295 + GR_APPEND ? " appending" : "");
54296 + return 0;
54297 + } else if (unlikely((mode & reqmode) != reqmode))
54298 + return 0;
54299 +
54300 + return reqmode;
54301 +}
54302 +
54303 +__u32
54304 +gr_acl_handle_creat(const struct dentry * dentry,
54305 + const struct dentry * p_dentry,
54306 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
54307 + const int imode)
54308 +{
54309 + __u32 reqmode = GR_WRITE | GR_CREATE;
54310 + __u32 mode;
54311 +
54312 + if (acc_mode & MAY_APPEND)
54313 + reqmode |= GR_APPEND;
54314 + // if a directory was required or the directory already exists, then
54315 + // don't count this open as a read
54316 + if ((acc_mode & MAY_READ) &&
54317 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
54318 + reqmode |= GR_READ;
54319 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
54320 + reqmode |= GR_SETID;
54321 +
54322 + mode =
54323 + gr_check_create(dentry, p_dentry, p_mnt,
54324 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54325 +
54326 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54327 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54328 + reqmode & GR_READ ? " reading" : "",
54329 + reqmode & GR_WRITE ? " writing" : reqmode &
54330 + GR_APPEND ? " appending" : "");
54331 + return reqmode;
54332 + } else
54333 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54334 + {
54335 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54336 + reqmode & GR_READ ? " reading" : "",
54337 + reqmode & GR_WRITE ? " writing" : reqmode &
54338 + GR_APPEND ? " appending" : "");
54339 + return 0;
54340 + } else if (unlikely((mode & reqmode) != reqmode))
54341 + return 0;
54342 +
54343 + return reqmode;
54344 +}
54345 +
54346 +__u32
54347 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
54348 + const int fmode)
54349 +{
54350 + __u32 mode, reqmode = GR_FIND;
54351 +
54352 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
54353 + reqmode |= GR_EXEC;
54354 + if (fmode & S_IWOTH)
54355 + reqmode |= GR_WRITE;
54356 + if (fmode & S_IROTH)
54357 + reqmode |= GR_READ;
54358 +
54359 + mode =
54360 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54361 + mnt);
54362 +
54363 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54364 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
54365 + reqmode & GR_READ ? " reading" : "",
54366 + reqmode & GR_WRITE ? " writing" : "",
54367 + reqmode & GR_EXEC ? " executing" : "");
54368 + return reqmode;
54369 + } else
54370 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54371 + {
54372 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
54373 + reqmode & GR_READ ? " reading" : "",
54374 + reqmode & GR_WRITE ? " writing" : "",
54375 + reqmode & GR_EXEC ? " executing" : "");
54376 + return 0;
54377 + } else if (unlikely((mode & reqmode) != reqmode))
54378 + return 0;
54379 +
54380 + return reqmode;
54381 +}
54382 +
54383 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
54384 +{
54385 + __u32 mode;
54386 +
54387 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
54388 +
54389 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
54390 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
54391 + return mode;
54392 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
54393 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
54394 + return 0;
54395 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
54396 + return 0;
54397 +
54398 + return (reqmode);
54399 +}
54400 +
54401 +__u32
54402 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54403 +{
54404 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
54405 +}
54406 +
54407 +__u32
54408 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
54409 +{
54410 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
54411 +}
54412 +
54413 +__u32
54414 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
54415 +{
54416 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
54417 +}
54418 +
54419 +__u32
54420 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
54421 +{
54422 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
54423 +}
54424 +
54425 +__u32
54426 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
54427 + umode_t *modeptr)
54428 +{
54429 + umode_t mode;
54430 +
54431 + *modeptr &= ~gr_acl_umask();
54432 + mode = *modeptr;
54433 +
54434 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
54435 + return 1;
54436 +
54437 + if (unlikely(mode & (S_ISUID | S_ISGID))) {
54438 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
54439 + GR_CHMOD_ACL_MSG);
54440 + } else {
54441 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
54442 + }
54443 +}
54444 +
54445 +__u32
54446 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
54447 +{
54448 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
54449 +}
54450 +
54451 +__u32
54452 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
54453 +{
54454 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
54455 +}
54456 +
54457 +__u32
54458 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
54459 +{
54460 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
54461 +}
54462 +
54463 +__u32
54464 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
54465 +{
54466 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
54467 + GR_UNIXCONNECT_ACL_MSG);
54468 +}
54469 +
54470 +/* hardlinks require at minimum create and link permission,
54471 + any additional privilege required is based on the
54472 + privilege of the file being linked to
54473 +*/
54474 +__u32
54475 +gr_acl_handle_link(const struct dentry * new_dentry,
54476 + const struct dentry * parent_dentry,
54477 + const struct vfsmount * parent_mnt,
54478 + const struct dentry * old_dentry,
54479 + const struct vfsmount * old_mnt, const char *to)
54480 +{
54481 + __u32 mode;
54482 + __u32 needmode = GR_CREATE | GR_LINK;
54483 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
54484 +
54485 + mode =
54486 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
54487 + old_mnt);
54488 +
54489 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
54490 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
54491 + return mode;
54492 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
54493 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
54494 + return 0;
54495 + } else if (unlikely((mode & needmode) != needmode))
54496 + return 0;
54497 +
54498 + return 1;
54499 +}
54500 +
54501 +__u32
54502 +gr_acl_handle_symlink(const struct dentry * new_dentry,
54503 + const struct dentry * parent_dentry,
54504 + const struct vfsmount * parent_mnt, const char *from)
54505 +{
54506 + __u32 needmode = GR_WRITE | GR_CREATE;
54507 + __u32 mode;
54508 +
54509 + mode =
54510 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
54511 + GR_CREATE | GR_AUDIT_CREATE |
54512 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
54513 +
54514 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
54515 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
54516 + return mode;
54517 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
54518 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
54519 + return 0;
54520 + } else if (unlikely((mode & needmode) != needmode))
54521 + return 0;
54522 +
54523 + return (GR_WRITE | GR_CREATE);
54524 +}
54525 +
54526 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
54527 +{
54528 + __u32 mode;
54529 +
54530 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54531 +
54532 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
54533 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
54534 + return mode;
54535 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
54536 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
54537 + return 0;
54538 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
54539 + return 0;
54540 +
54541 + return (reqmode);
54542 +}
54543 +
54544 +__u32
54545 +gr_acl_handle_mknod(const struct dentry * new_dentry,
54546 + const struct dentry * parent_dentry,
54547 + const struct vfsmount * parent_mnt,
54548 + const int mode)
54549 +{
54550 + __u32 reqmode = GR_WRITE | GR_CREATE;
54551 + if (unlikely(mode & (S_ISUID | S_ISGID)))
54552 + reqmode |= GR_SETID;
54553 +
54554 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
54555 + reqmode, GR_MKNOD_ACL_MSG);
54556 +}
54557 +
54558 +__u32
54559 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
54560 + const struct dentry *parent_dentry,
54561 + const struct vfsmount *parent_mnt)
54562 +{
54563 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
54564 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
54565 +}
54566 +
54567 +#define RENAME_CHECK_SUCCESS(old, new) \
54568 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
54569 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
54570 +
54571 +int
54572 +gr_acl_handle_rename(struct dentry *new_dentry,
54573 + struct dentry *parent_dentry,
54574 + const struct vfsmount *parent_mnt,
54575 + struct dentry *old_dentry,
54576 + struct inode *old_parent_inode,
54577 + struct vfsmount *old_mnt, const char *newname)
54578 +{
54579 + __u32 comp1, comp2;
54580 + int error = 0;
54581 +
54582 + if (unlikely(!gr_acl_is_enabled()))
54583 + return 0;
54584 +
54585 + if (!new_dentry->d_inode) {
54586 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
54587 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
54588 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
54589 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
54590 + GR_DELETE | GR_AUDIT_DELETE |
54591 + GR_AUDIT_READ | GR_AUDIT_WRITE |
54592 + GR_SUPPRESS, old_mnt);
54593 + } else {
54594 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
54595 + GR_CREATE | GR_DELETE |
54596 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
54597 + GR_AUDIT_READ | GR_AUDIT_WRITE |
54598 + GR_SUPPRESS, parent_mnt);
54599 + comp2 =
54600 + gr_search_file(old_dentry,
54601 + GR_READ | GR_WRITE | GR_AUDIT_READ |
54602 + GR_DELETE | GR_AUDIT_DELETE |
54603 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
54604 + }
54605 +
54606 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
54607 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
54608 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
54609 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
54610 + && !(comp2 & GR_SUPPRESS)) {
54611 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
54612 + error = -EACCES;
54613 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
54614 + error = -EACCES;
54615 +
54616 + return error;
54617 +}
54618 +
54619 +void
54620 +gr_acl_handle_exit(void)
54621 +{
54622 + u16 id;
54623 + char *rolename;
54624 + struct file *exec_file;
54625 +
54626 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
54627 + !(current->role->roletype & GR_ROLE_PERSIST))) {
54628 + id = current->acl_role_id;
54629 + rolename = current->role->rolename;
54630 + gr_set_acls(1);
54631 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
54632 + }
54633 +
54634 + write_lock(&grsec_exec_file_lock);
54635 + exec_file = current->exec_file;
54636 + current->exec_file = NULL;
54637 + write_unlock(&grsec_exec_file_lock);
54638 +
54639 + if (exec_file)
54640 + fput(exec_file);
54641 +}
54642 +
54643 +int
54644 +gr_acl_handle_procpidmem(const struct task_struct *task)
54645 +{
54646 + if (unlikely(!gr_acl_is_enabled()))
54647 + return 0;
54648 +
54649 + if (task != current && task->acl->mode & GR_PROTPROCFD)
54650 + return -EACCES;
54651 +
54652 + return 0;
54653 +}
54654 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
54655 new file mode 100644
54656 index 0000000..17050ca
54657 --- /dev/null
54658 +++ b/grsecurity/gracl_ip.c
54659 @@ -0,0 +1,381 @@
54660 +#include <linux/kernel.h>
54661 +#include <asm/uaccess.h>
54662 +#include <asm/errno.h>
54663 +#include <net/sock.h>
54664 +#include <linux/file.h>
54665 +#include <linux/fs.h>
54666 +#include <linux/net.h>
54667 +#include <linux/in.h>
54668 +#include <linux/skbuff.h>
54669 +#include <linux/ip.h>
54670 +#include <linux/udp.h>
54671 +#include <linux/types.h>
54672 +#include <linux/sched.h>
54673 +#include <linux/netdevice.h>
54674 +#include <linux/inetdevice.h>
54675 +#include <linux/gracl.h>
54676 +#include <linux/grsecurity.h>
54677 +#include <linux/grinternal.h>
54678 +
54679 +#define GR_BIND 0x01
54680 +#define GR_CONNECT 0x02
54681 +#define GR_INVERT 0x04
54682 +#define GR_BINDOVERRIDE 0x08
54683 +#define GR_CONNECTOVERRIDE 0x10
54684 +#define GR_SOCK_FAMILY 0x20
54685 +
54686 +static const char * gr_protocols[IPPROTO_MAX] = {
54687 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
54688 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
54689 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
54690 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
54691 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
54692 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
54693 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
54694 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
54695 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
54696 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
54697 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
54698 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
54699 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
54700 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
54701 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
54702 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
54703 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
54704 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
54705 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
54706 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
54707 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
54708 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
54709 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
54710 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
54711 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
54712 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
54713 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
54714 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
54715 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
54716 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
54717 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
54718 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
54719 + };
54720 +
54721 +static const char * gr_socktypes[SOCK_MAX] = {
54722 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
54723 + "unknown:7", "unknown:8", "unknown:9", "packet"
54724 + };
54725 +
54726 +static const char * gr_sockfamilies[AF_MAX+1] = {
54727 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
54728 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
54729 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
54730 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
54731 + };
54732 +
54733 +const char *
54734 +gr_proto_to_name(unsigned char proto)
54735 +{
54736 + return gr_protocols[proto];
54737 +}
54738 +
54739 +const char *
54740 +gr_socktype_to_name(unsigned char type)
54741 +{
54742 + return gr_socktypes[type];
54743 +}
54744 +
54745 +const char *
54746 +gr_sockfamily_to_name(unsigned char family)
54747 +{
54748 + return gr_sockfamilies[family];
54749 +}
54750 +
54751 +int
54752 +gr_search_socket(const int domain, const int type, const int protocol)
54753 +{
54754 + struct acl_subject_label *curr;
54755 + const struct cred *cred = current_cred();
54756 +
54757 + if (unlikely(!gr_acl_is_enabled()))
54758 + goto exit;
54759 +
54760 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
54761 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
54762 + goto exit; // let the kernel handle it
54763 +
54764 + curr = current->acl;
54765 +
54766 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
54767 + /* the family is allowed, if this is PF_INET allow it only if
54768 + the extra sock type/protocol checks pass */
54769 + if (domain == PF_INET)
54770 + goto inet_check;
54771 + goto exit;
54772 + } else {
54773 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
54774 + __u32 fakeip = 0;
54775 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54776 + current->role->roletype, cred->uid,
54777 + cred->gid, current->exec_file ?
54778 + gr_to_filename(current->exec_file->f_path.dentry,
54779 + current->exec_file->f_path.mnt) :
54780 + curr->filename, curr->filename,
54781 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
54782 + &current->signal->saved_ip);
54783 + goto exit;
54784 + }
54785 + goto exit_fail;
54786 + }
54787 +
54788 +inet_check:
54789 + /* the rest of this checking is for IPv4 only */
54790 + if (!curr->ips)
54791 + goto exit;
54792 +
54793 + if ((curr->ip_type & (1 << type)) &&
54794 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
54795 + goto exit;
54796 +
54797 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
54798 + /* we don't place acls on raw sockets , and sometimes
54799 + dgram/ip sockets are opened for ioctl and not
54800 + bind/connect, so we'll fake a bind learn log */
54801 + if (type == SOCK_RAW || type == SOCK_PACKET) {
54802 + __u32 fakeip = 0;
54803 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54804 + current->role->roletype, cred->uid,
54805 + cred->gid, current->exec_file ?
54806 + gr_to_filename(current->exec_file->f_path.dentry,
54807 + current->exec_file->f_path.mnt) :
54808 + curr->filename, curr->filename,
54809 + &fakeip, 0, type,
54810 + protocol, GR_CONNECT, &current->signal->saved_ip);
54811 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
54812 + __u32 fakeip = 0;
54813 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54814 + current->role->roletype, cred->uid,
54815 + cred->gid, current->exec_file ?
54816 + gr_to_filename(current->exec_file->f_path.dentry,
54817 + current->exec_file->f_path.mnt) :
54818 + curr->filename, curr->filename,
54819 + &fakeip, 0, type,
54820 + protocol, GR_BIND, &current->signal->saved_ip);
54821 + }
54822 + /* we'll log when they use connect or bind */
54823 + goto exit;
54824 + }
54825 +
54826 +exit_fail:
54827 + if (domain == PF_INET)
54828 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
54829 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
54830 + else
54831 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
54832 + gr_socktype_to_name(type), protocol);
54833 +
54834 + return 0;
54835 +exit:
54836 + return 1;
54837 +}
54838 +
54839 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
54840 +{
54841 + if ((ip->mode & mode) &&
54842 + (ip_port >= ip->low) &&
54843 + (ip_port <= ip->high) &&
54844 + ((ntohl(ip_addr) & our_netmask) ==
54845 + (ntohl(our_addr) & our_netmask))
54846 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
54847 + && (ip->type & (1 << type))) {
54848 + if (ip->mode & GR_INVERT)
54849 + return 2; // specifically denied
54850 + else
54851 + return 1; // allowed
54852 + }
54853 +
54854 + return 0; // not specifically allowed, may continue parsing
54855 +}
54856 +
54857 +static int
54858 +gr_search_connectbind(const int full_mode, struct sock *sk,
54859 + struct sockaddr_in *addr, const int type)
54860 +{
54861 + char iface[IFNAMSIZ] = {0};
54862 + struct acl_subject_label *curr;
54863 + struct acl_ip_label *ip;
54864 + struct inet_sock *isk;
54865 + struct net_device *dev;
54866 + struct in_device *idev;
54867 + unsigned long i;
54868 + int ret;
54869 + int mode = full_mode & (GR_BIND | GR_CONNECT);
54870 + __u32 ip_addr = 0;
54871 + __u32 our_addr;
54872 + __u32 our_netmask;
54873 + char *p;
54874 + __u16 ip_port = 0;
54875 + const struct cred *cred = current_cred();
54876 +
54877 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
54878 + return 0;
54879 +
54880 + curr = current->acl;
54881 + isk = inet_sk(sk);
54882 +
54883 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
54884 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
54885 + addr->sin_addr.s_addr = curr->inaddr_any_override;
54886 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
54887 + struct sockaddr_in saddr;
54888 + int err;
54889 +
54890 + saddr.sin_family = AF_INET;
54891 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
54892 + saddr.sin_port = isk->inet_sport;
54893 +
54894 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
54895 + if (err)
54896 + return err;
54897 +
54898 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
54899 + if (err)
54900 + return err;
54901 + }
54902 +
54903 + if (!curr->ips)
54904 + return 0;
54905 +
54906 + ip_addr = addr->sin_addr.s_addr;
54907 + ip_port = ntohs(addr->sin_port);
54908 +
54909 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
54910 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54911 + current->role->roletype, cred->uid,
54912 + cred->gid, current->exec_file ?
54913 + gr_to_filename(current->exec_file->f_path.dentry,
54914 + current->exec_file->f_path.mnt) :
54915 + curr->filename, curr->filename,
54916 + &ip_addr, ip_port, type,
54917 + sk->sk_protocol, mode, &current->signal->saved_ip);
54918 + return 0;
54919 + }
54920 +
54921 + for (i = 0; i < curr->ip_num; i++) {
54922 + ip = *(curr->ips + i);
54923 + if (ip->iface != NULL) {
54924 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
54925 + p = strchr(iface, ':');
54926 + if (p != NULL)
54927 + *p = '\0';
54928 + dev = dev_get_by_name(sock_net(sk), iface);
54929 + if (dev == NULL)
54930 + continue;
54931 + idev = in_dev_get(dev);
54932 + if (idev == NULL) {
54933 + dev_put(dev);
54934 + continue;
54935 + }
54936 + rcu_read_lock();
54937 + for_ifa(idev) {
54938 + if (!strcmp(ip->iface, ifa->ifa_label)) {
54939 + our_addr = ifa->ifa_address;
54940 + our_netmask = 0xffffffff;
54941 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
54942 + if (ret == 1) {
54943 + rcu_read_unlock();
54944 + in_dev_put(idev);
54945 + dev_put(dev);
54946 + return 0;
54947 + } else if (ret == 2) {
54948 + rcu_read_unlock();
54949 + in_dev_put(idev);
54950 + dev_put(dev);
54951 + goto denied;
54952 + }
54953 + }
54954 + } endfor_ifa(idev);
54955 + rcu_read_unlock();
54956 + in_dev_put(idev);
54957 + dev_put(dev);
54958 + } else {
54959 + our_addr = ip->addr;
54960 + our_netmask = ip->netmask;
54961 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
54962 + if (ret == 1)
54963 + return 0;
54964 + else if (ret == 2)
54965 + goto denied;
54966 + }
54967 + }
54968 +
54969 +denied:
54970 + if (mode == GR_BIND)
54971 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
54972 + else if (mode == GR_CONNECT)
54973 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
54974 +
54975 + return -EACCES;
54976 +}
54977 +
54978 +int
54979 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
54980 +{
54981 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
54982 +}
54983 +
54984 +int
54985 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
54986 +{
54987 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
54988 +}
54989 +
54990 +int gr_search_listen(struct socket *sock)
54991 +{
54992 + struct sock *sk = sock->sk;
54993 + struct sockaddr_in addr;
54994 +
54995 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
54996 + addr.sin_port = inet_sk(sk)->inet_sport;
54997 +
54998 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
54999 +}
55000 +
55001 +int gr_search_accept(struct socket *sock)
55002 +{
55003 + struct sock *sk = sock->sk;
55004 + struct sockaddr_in addr;
55005 +
55006 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55007 + addr.sin_port = inet_sk(sk)->inet_sport;
55008 +
55009 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55010 +}
55011 +
55012 +int
55013 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
55014 +{
55015 + if (addr)
55016 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
55017 + else {
55018 + struct sockaddr_in sin;
55019 + const struct inet_sock *inet = inet_sk(sk);
55020 +
55021 + sin.sin_addr.s_addr = inet->inet_daddr;
55022 + sin.sin_port = inet->inet_dport;
55023 +
55024 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55025 + }
55026 +}
55027 +
55028 +int
55029 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
55030 +{
55031 + struct sockaddr_in sin;
55032 +
55033 + if (unlikely(skb->len < sizeof (struct udphdr)))
55034 + return 0; // skip this packet
55035 +
55036 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
55037 + sin.sin_port = udp_hdr(skb)->source;
55038 +
55039 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55040 +}
55041 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
55042 new file mode 100644
55043 index 0000000..25f54ef
55044 --- /dev/null
55045 +++ b/grsecurity/gracl_learn.c
55046 @@ -0,0 +1,207 @@
55047 +#include <linux/kernel.h>
55048 +#include <linux/mm.h>
55049 +#include <linux/sched.h>
55050 +#include <linux/poll.h>
55051 +#include <linux/string.h>
55052 +#include <linux/file.h>
55053 +#include <linux/types.h>
55054 +#include <linux/vmalloc.h>
55055 +#include <linux/grinternal.h>
55056 +
55057 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
55058 + size_t count, loff_t *ppos);
55059 +extern int gr_acl_is_enabled(void);
55060 +
55061 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
55062 +static int gr_learn_attached;
55063 +
55064 +/* use a 512k buffer */
55065 +#define LEARN_BUFFER_SIZE (512 * 1024)
55066 +
55067 +static DEFINE_SPINLOCK(gr_learn_lock);
55068 +static DEFINE_MUTEX(gr_learn_user_mutex);
55069 +
55070 +/* we need to maintain two buffers, so that the kernel context of grlearn
55071 + uses a semaphore around the userspace copying, and the other kernel contexts
55072 + use a spinlock when copying into the buffer, since they cannot sleep
55073 +*/
55074 +static char *learn_buffer;
55075 +static char *learn_buffer_user;
55076 +static int learn_buffer_len;
55077 +static int learn_buffer_user_len;
55078 +
55079 +static ssize_t
55080 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
55081 +{
55082 + DECLARE_WAITQUEUE(wait, current);
55083 + ssize_t retval = 0;
55084 +
55085 + add_wait_queue(&learn_wait, &wait);
55086 + set_current_state(TASK_INTERRUPTIBLE);
55087 + do {
55088 + mutex_lock(&gr_learn_user_mutex);
55089 + spin_lock(&gr_learn_lock);
55090 + if (learn_buffer_len)
55091 + break;
55092 + spin_unlock(&gr_learn_lock);
55093 + mutex_unlock(&gr_learn_user_mutex);
55094 + if (file->f_flags & O_NONBLOCK) {
55095 + retval = -EAGAIN;
55096 + goto out;
55097 + }
55098 + if (signal_pending(current)) {
55099 + retval = -ERESTARTSYS;
55100 + goto out;
55101 + }
55102 +
55103 + schedule();
55104 + } while (1);
55105 +
55106 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
55107 + learn_buffer_user_len = learn_buffer_len;
55108 + retval = learn_buffer_len;
55109 + learn_buffer_len = 0;
55110 +
55111 + spin_unlock(&gr_learn_lock);
55112 +
55113 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
55114 + retval = -EFAULT;
55115 +
55116 + mutex_unlock(&gr_learn_user_mutex);
55117 +out:
55118 + set_current_state(TASK_RUNNING);
55119 + remove_wait_queue(&learn_wait, &wait);
55120 + return retval;
55121 +}
55122 +
55123 +static unsigned int
55124 +poll_learn(struct file * file, poll_table * wait)
55125 +{
55126 + poll_wait(file, &learn_wait, wait);
55127 +
55128 + if (learn_buffer_len)
55129 + return (POLLIN | POLLRDNORM);
55130 +
55131 + return 0;
55132 +}
55133 +
55134 +void
55135 +gr_clear_learn_entries(void)
55136 +{
55137 + char *tmp;
55138 +
55139 + mutex_lock(&gr_learn_user_mutex);
55140 + spin_lock(&gr_learn_lock);
55141 + tmp = learn_buffer;
55142 + learn_buffer = NULL;
55143 + spin_unlock(&gr_learn_lock);
55144 + if (tmp)
55145 + vfree(tmp);
55146 + if (learn_buffer_user != NULL) {
55147 + vfree(learn_buffer_user);
55148 + learn_buffer_user = NULL;
55149 + }
55150 + learn_buffer_len = 0;
55151 + mutex_unlock(&gr_learn_user_mutex);
55152 +
55153 + return;
55154 +}
55155 +
55156 +void
55157 +gr_add_learn_entry(const char *fmt, ...)
55158 +{
55159 + va_list args;
55160 + unsigned int len;
55161 +
55162 + if (!gr_learn_attached)
55163 + return;
55164 +
55165 + spin_lock(&gr_learn_lock);
55166 +
55167 + /* leave a gap at the end so we know when it's "full" but don't have to
55168 + compute the exact length of the string we're trying to append
55169 + */
55170 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
55171 + spin_unlock(&gr_learn_lock);
55172 + wake_up_interruptible(&learn_wait);
55173 + return;
55174 + }
55175 + if (learn_buffer == NULL) {
55176 + spin_unlock(&gr_learn_lock);
55177 + return;
55178 + }
55179 +
55180 + va_start(args, fmt);
55181 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
55182 + va_end(args);
55183 +
55184 + learn_buffer_len += len + 1;
55185 +
55186 + spin_unlock(&gr_learn_lock);
55187 + wake_up_interruptible(&learn_wait);
55188 +
55189 + return;
55190 +}
55191 +
55192 +static int
55193 +open_learn(struct inode *inode, struct file *file)
55194 +{
55195 + if (file->f_mode & FMODE_READ && gr_learn_attached)
55196 + return -EBUSY;
55197 + if (file->f_mode & FMODE_READ) {
55198 + int retval = 0;
55199 + mutex_lock(&gr_learn_user_mutex);
55200 + if (learn_buffer == NULL)
55201 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
55202 + if (learn_buffer_user == NULL)
55203 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
55204 + if (learn_buffer == NULL) {
55205 + retval = -ENOMEM;
55206 + goto out_error;
55207 + }
55208 + if (learn_buffer_user == NULL) {
55209 + retval = -ENOMEM;
55210 + goto out_error;
55211 + }
55212 + learn_buffer_len = 0;
55213 + learn_buffer_user_len = 0;
55214 + gr_learn_attached = 1;
55215 +out_error:
55216 + mutex_unlock(&gr_learn_user_mutex);
55217 + return retval;
55218 + }
55219 + return 0;
55220 +}
55221 +
55222 +static int
55223 +close_learn(struct inode *inode, struct file *file)
55224 +{
55225 + if (file->f_mode & FMODE_READ) {
55226 + char *tmp = NULL;
55227 + mutex_lock(&gr_learn_user_mutex);
55228 + spin_lock(&gr_learn_lock);
55229 + tmp = learn_buffer;
55230 + learn_buffer = NULL;
55231 + spin_unlock(&gr_learn_lock);
55232 + if (tmp)
55233 + vfree(tmp);
55234 + if (learn_buffer_user != NULL) {
55235 + vfree(learn_buffer_user);
55236 + learn_buffer_user = NULL;
55237 + }
55238 + learn_buffer_len = 0;
55239 + learn_buffer_user_len = 0;
55240 + gr_learn_attached = 0;
55241 + mutex_unlock(&gr_learn_user_mutex);
55242 + }
55243 +
55244 + return 0;
55245 +}
55246 +
55247 +const struct file_operations grsec_fops = {
55248 + .read = read_learn,
55249 + .write = write_grsec_handler,
55250 + .open = open_learn,
55251 + .release = close_learn,
55252 + .poll = poll_learn,
55253 +};
55254 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
55255 new file mode 100644
55256 index 0000000..39645c9
55257 --- /dev/null
55258 +++ b/grsecurity/gracl_res.c
55259 @@ -0,0 +1,68 @@
55260 +#include <linux/kernel.h>
55261 +#include <linux/sched.h>
55262 +#include <linux/gracl.h>
55263 +#include <linux/grinternal.h>
55264 +
55265 +static const char *restab_log[] = {
55266 + [RLIMIT_CPU] = "RLIMIT_CPU",
55267 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
55268 + [RLIMIT_DATA] = "RLIMIT_DATA",
55269 + [RLIMIT_STACK] = "RLIMIT_STACK",
55270 + [RLIMIT_CORE] = "RLIMIT_CORE",
55271 + [RLIMIT_RSS] = "RLIMIT_RSS",
55272 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
55273 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
55274 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
55275 + [RLIMIT_AS] = "RLIMIT_AS",
55276 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
55277 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
55278 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
55279 + [RLIMIT_NICE] = "RLIMIT_NICE",
55280 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
55281 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
55282 + [GR_CRASH_RES] = "RLIMIT_CRASH"
55283 +};
55284 +
55285 +void
55286 +gr_log_resource(const struct task_struct *task,
55287 + const int res, const unsigned long wanted, const int gt)
55288 +{
55289 + const struct cred *cred;
55290 + unsigned long rlim;
55291 +
55292 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
55293 + return;
55294 +
55295 + // not yet supported resource
55296 + if (unlikely(!restab_log[res]))
55297 + return;
55298 +
55299 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
55300 + rlim = task_rlimit_max(task, res);
55301 + else
55302 + rlim = task_rlimit(task, res);
55303 +
55304 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
55305 + return;
55306 +
55307 + rcu_read_lock();
55308 + cred = __task_cred(task);
55309 +
55310 + if (res == RLIMIT_NPROC &&
55311 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
55312 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
55313 + goto out_rcu_unlock;
55314 + else if (res == RLIMIT_MEMLOCK &&
55315 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
55316 + goto out_rcu_unlock;
55317 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
55318 + goto out_rcu_unlock;
55319 + rcu_read_unlock();
55320 +
55321 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
55322 +
55323 + return;
55324 +out_rcu_unlock:
55325 + rcu_read_unlock();
55326 + return;
55327 +}
55328 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
55329 new file mode 100644
55330 index 0000000..5556be3
55331 --- /dev/null
55332 +++ b/grsecurity/gracl_segv.c
55333 @@ -0,0 +1,299 @@
55334 +#include <linux/kernel.h>
55335 +#include <linux/mm.h>
55336 +#include <asm/uaccess.h>
55337 +#include <asm/errno.h>
55338 +#include <asm/mman.h>
55339 +#include <net/sock.h>
55340 +#include <linux/file.h>
55341 +#include <linux/fs.h>
55342 +#include <linux/net.h>
55343 +#include <linux/in.h>
55344 +#include <linux/slab.h>
55345 +#include <linux/types.h>
55346 +#include <linux/sched.h>
55347 +#include <linux/timer.h>
55348 +#include <linux/gracl.h>
55349 +#include <linux/grsecurity.h>
55350 +#include <linux/grinternal.h>
55351 +
55352 +static struct crash_uid *uid_set;
55353 +static unsigned short uid_used;
55354 +static DEFINE_SPINLOCK(gr_uid_lock);
55355 +extern rwlock_t gr_inode_lock;
55356 +extern struct acl_subject_label *
55357 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
55358 + struct acl_role_label *role);
55359 +
55360 +#ifdef CONFIG_BTRFS_FS
55361 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
55362 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
55363 +#endif
55364 +
55365 +static inline dev_t __get_dev(const struct dentry *dentry)
55366 +{
55367 +#ifdef CONFIG_BTRFS_FS
55368 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
55369 + return get_btrfs_dev_from_inode(dentry->d_inode);
55370 + else
55371 +#endif
55372 + return dentry->d_inode->i_sb->s_dev;
55373 +}
55374 +
55375 +int
55376 +gr_init_uidset(void)
55377 +{
55378 + uid_set =
55379 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
55380 + uid_used = 0;
55381 +
55382 + return uid_set ? 1 : 0;
55383 +}
55384 +
55385 +void
55386 +gr_free_uidset(void)
55387 +{
55388 + if (uid_set)
55389 + kfree(uid_set);
55390 +
55391 + return;
55392 +}
55393 +
55394 +int
55395 +gr_find_uid(const uid_t uid)
55396 +{
55397 + struct crash_uid *tmp = uid_set;
55398 + uid_t buid;
55399 + int low = 0, high = uid_used - 1, mid;
55400 +
55401 + while (high >= low) {
55402 + mid = (low + high) >> 1;
55403 + buid = tmp[mid].uid;
55404 + if (buid == uid)
55405 + return mid;
55406 + if (buid > uid)
55407 + high = mid - 1;
55408 + if (buid < uid)
55409 + low = mid + 1;
55410 + }
55411 +
55412 + return -1;
55413 +}
55414 +
55415 +static __inline__ void
55416 +gr_insertsort(void)
55417 +{
55418 + unsigned short i, j;
55419 + struct crash_uid index;
55420 +
55421 + for (i = 1; i < uid_used; i++) {
55422 + index = uid_set[i];
55423 + j = i;
55424 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
55425 + uid_set[j] = uid_set[j - 1];
55426 + j--;
55427 + }
55428 + uid_set[j] = index;
55429 + }
55430 +
55431 + return;
55432 +}
55433 +
55434 +static __inline__ void
55435 +gr_insert_uid(const uid_t uid, const unsigned long expires)
55436 +{
55437 + int loc;
55438 +
55439 + if (uid_used == GR_UIDTABLE_MAX)
55440 + return;
55441 +
55442 + loc = gr_find_uid(uid);
55443 +
55444 + if (loc >= 0) {
55445 + uid_set[loc].expires = expires;
55446 + return;
55447 + }
55448 +
55449 + uid_set[uid_used].uid = uid;
55450 + uid_set[uid_used].expires = expires;
55451 + uid_used++;
55452 +
55453 + gr_insertsort();
55454 +
55455 + return;
55456 +}
55457 +
55458 +void
55459 +gr_remove_uid(const unsigned short loc)
55460 +{
55461 + unsigned short i;
55462 +
55463 + for (i = loc + 1; i < uid_used; i++)
55464 + uid_set[i - 1] = uid_set[i];
55465 +
55466 + uid_used--;
55467 +
55468 + return;
55469 +}
55470 +
55471 +int
55472 +gr_check_crash_uid(const uid_t uid)
55473 +{
55474 + int loc;
55475 + int ret = 0;
55476 +
55477 + if (unlikely(!gr_acl_is_enabled()))
55478 + return 0;
55479 +
55480 + spin_lock(&gr_uid_lock);
55481 + loc = gr_find_uid(uid);
55482 +
55483 + if (loc < 0)
55484 + goto out_unlock;
55485 +
55486 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
55487 + gr_remove_uid(loc);
55488 + else
55489 + ret = 1;
55490 +
55491 +out_unlock:
55492 + spin_unlock(&gr_uid_lock);
55493 + return ret;
55494 +}
55495 +
55496 +static __inline__ int
55497 +proc_is_setxid(const struct cred *cred)
55498 +{
55499 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
55500 + cred->uid != cred->fsuid)
55501 + return 1;
55502 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
55503 + cred->gid != cred->fsgid)
55504 + return 1;
55505 +
55506 + return 0;
55507 +}
55508 +
55509 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
55510 +
55511 +void
55512 +gr_handle_crash(struct task_struct *task, const int sig)
55513 +{
55514 + struct acl_subject_label *curr;
55515 + struct task_struct *tsk, *tsk2;
55516 + const struct cred *cred;
55517 + const struct cred *cred2;
55518 +
55519 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
55520 + return;
55521 +
55522 + if (unlikely(!gr_acl_is_enabled()))
55523 + return;
55524 +
55525 + curr = task->acl;
55526 +
55527 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
55528 + return;
55529 +
55530 + if (time_before_eq(curr->expires, get_seconds())) {
55531 + curr->expires = 0;
55532 + curr->crashes = 0;
55533 + }
55534 +
55535 + curr->crashes++;
55536 +
55537 + if (!curr->expires)
55538 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
55539 +
55540 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
55541 + time_after(curr->expires, get_seconds())) {
55542 + rcu_read_lock();
55543 + cred = __task_cred(task);
55544 + if (cred->uid && proc_is_setxid(cred)) {
55545 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
55546 + spin_lock(&gr_uid_lock);
55547 + gr_insert_uid(cred->uid, curr->expires);
55548 + spin_unlock(&gr_uid_lock);
55549 + curr->expires = 0;
55550 + curr->crashes = 0;
55551 + read_lock(&tasklist_lock);
55552 + do_each_thread(tsk2, tsk) {
55553 + cred2 = __task_cred(tsk);
55554 + if (tsk != task && cred2->uid == cred->uid)
55555 + gr_fake_force_sig(SIGKILL, tsk);
55556 + } while_each_thread(tsk2, tsk);
55557 + read_unlock(&tasklist_lock);
55558 + } else {
55559 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
55560 + read_lock(&tasklist_lock);
55561 + read_lock(&grsec_exec_file_lock);
55562 + do_each_thread(tsk2, tsk) {
55563 + if (likely(tsk != task)) {
55564 + // if this thread has the same subject as the one that triggered
55565 + // RES_CRASH and it's the same binary, kill it
55566 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
55567 + gr_fake_force_sig(SIGKILL, tsk);
55568 + }
55569 + } while_each_thread(tsk2, tsk);
55570 + read_unlock(&grsec_exec_file_lock);
55571 + read_unlock(&tasklist_lock);
55572 + }
55573 + rcu_read_unlock();
55574 + }
55575 +
55576 + return;
55577 +}
55578 +
55579 +int
55580 +gr_check_crash_exec(const struct file *filp)
55581 +{
55582 + struct acl_subject_label *curr;
55583 +
55584 + if (unlikely(!gr_acl_is_enabled()))
55585 + return 0;
55586 +
55587 + read_lock(&gr_inode_lock);
55588 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
55589 + __get_dev(filp->f_path.dentry),
55590 + current->role);
55591 + read_unlock(&gr_inode_lock);
55592 +
55593 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
55594 + (!curr->crashes && !curr->expires))
55595 + return 0;
55596 +
55597 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
55598 + time_after(curr->expires, get_seconds()))
55599 + return 1;
55600 + else if (time_before_eq(curr->expires, get_seconds())) {
55601 + curr->crashes = 0;
55602 + curr->expires = 0;
55603 + }
55604 +
55605 + return 0;
55606 +}
55607 +
55608 +void
55609 +gr_handle_alertkill(struct task_struct *task)
55610 +{
55611 + struct acl_subject_label *curracl;
55612 + __u32 curr_ip;
55613 + struct task_struct *p, *p2;
55614 +
55615 + if (unlikely(!gr_acl_is_enabled()))
55616 + return;
55617 +
55618 + curracl = task->acl;
55619 + curr_ip = task->signal->curr_ip;
55620 +
55621 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
55622 + read_lock(&tasklist_lock);
55623 + do_each_thread(p2, p) {
55624 + if (p->signal->curr_ip == curr_ip)
55625 + gr_fake_force_sig(SIGKILL, p);
55626 + } while_each_thread(p2, p);
55627 + read_unlock(&tasklist_lock);
55628 + } else if (curracl->mode & GR_KILLPROC)
55629 + gr_fake_force_sig(SIGKILL, task);
55630 +
55631 + return;
55632 +}
55633 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
55634 new file mode 100644
55635 index 0000000..9d83a69
55636 --- /dev/null
55637 +++ b/grsecurity/gracl_shm.c
55638 @@ -0,0 +1,40 @@
55639 +#include <linux/kernel.h>
55640 +#include <linux/mm.h>
55641 +#include <linux/sched.h>
55642 +#include <linux/file.h>
55643 +#include <linux/ipc.h>
55644 +#include <linux/gracl.h>
55645 +#include <linux/grsecurity.h>
55646 +#include <linux/grinternal.h>
55647 +
55648 +int
55649 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55650 + const time_t shm_createtime, const uid_t cuid, const int shmid)
55651 +{
55652 + struct task_struct *task;
55653 +
55654 + if (!gr_acl_is_enabled())
55655 + return 1;
55656 +
55657 + rcu_read_lock();
55658 + read_lock(&tasklist_lock);
55659 +
55660 + task = find_task_by_vpid(shm_cprid);
55661 +
55662 + if (unlikely(!task))
55663 + task = find_task_by_vpid(shm_lapid);
55664 +
55665 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
55666 + (task->pid == shm_lapid)) &&
55667 + (task->acl->mode & GR_PROTSHM) &&
55668 + (task->acl != current->acl))) {
55669 + read_unlock(&tasklist_lock);
55670 + rcu_read_unlock();
55671 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
55672 + return 0;
55673 + }
55674 + read_unlock(&tasklist_lock);
55675 + rcu_read_unlock();
55676 +
55677 + return 1;
55678 +}
55679 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
55680 new file mode 100644
55681 index 0000000..bc0be01
55682 --- /dev/null
55683 +++ b/grsecurity/grsec_chdir.c
55684 @@ -0,0 +1,19 @@
55685 +#include <linux/kernel.h>
55686 +#include <linux/sched.h>
55687 +#include <linux/fs.h>
55688 +#include <linux/file.h>
55689 +#include <linux/grsecurity.h>
55690 +#include <linux/grinternal.h>
55691 +
55692 +void
55693 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
55694 +{
55695 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55696 + if ((grsec_enable_chdir && grsec_enable_group &&
55697 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
55698 + !grsec_enable_group)) {
55699 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
55700 + }
55701 +#endif
55702 + return;
55703 +}
55704 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
55705 new file mode 100644
55706 index 0000000..a2dc675
55707 --- /dev/null
55708 +++ b/grsecurity/grsec_chroot.c
55709 @@ -0,0 +1,351 @@
55710 +#include <linux/kernel.h>
55711 +#include <linux/module.h>
55712 +#include <linux/sched.h>
55713 +#include <linux/file.h>
55714 +#include <linux/fs.h>
55715 +#include <linux/mount.h>
55716 +#include <linux/types.h>
55717 +#include <linux/pid_namespace.h>
55718 +#include <linux/grsecurity.h>
55719 +#include <linux/grinternal.h>
55720 +
55721 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
55722 +{
55723 +#ifdef CONFIG_GRKERNSEC
55724 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
55725 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
55726 + task->gr_is_chrooted = 1;
55727 + else
55728 + task->gr_is_chrooted = 0;
55729 +
55730 + task->gr_chroot_dentry = path->dentry;
55731 +#endif
55732 + return;
55733 +}
55734 +
55735 +void gr_clear_chroot_entries(struct task_struct *task)
55736 +{
55737 +#ifdef CONFIG_GRKERNSEC
55738 + task->gr_is_chrooted = 0;
55739 + task->gr_chroot_dentry = NULL;
55740 +#endif
55741 + return;
55742 +}
55743 +
55744 +int
55745 +gr_handle_chroot_unix(const pid_t pid)
55746 +{
55747 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55748 + struct task_struct *p;
55749 +
55750 + if (unlikely(!grsec_enable_chroot_unix))
55751 + return 1;
55752 +
55753 + if (likely(!proc_is_chrooted(current)))
55754 + return 1;
55755 +
55756 + rcu_read_lock();
55757 + read_lock(&tasklist_lock);
55758 + p = find_task_by_vpid_unrestricted(pid);
55759 + if (unlikely(p && !have_same_root(current, p))) {
55760 + read_unlock(&tasklist_lock);
55761 + rcu_read_unlock();
55762 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
55763 + return 0;
55764 + }
55765 + read_unlock(&tasklist_lock);
55766 + rcu_read_unlock();
55767 +#endif
55768 + return 1;
55769 +}
55770 +
55771 +int
55772 +gr_handle_chroot_nice(void)
55773 +{
55774 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55775 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
55776 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
55777 + return -EPERM;
55778 + }
55779 +#endif
55780 + return 0;
55781 +}
55782 +
55783 +int
55784 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
55785 +{
55786 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55787 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
55788 + && proc_is_chrooted(current)) {
55789 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
55790 + return -EACCES;
55791 + }
55792 +#endif
55793 + return 0;
55794 +}
55795 +
55796 +int
55797 +gr_handle_chroot_rawio(const struct inode *inode)
55798 +{
55799 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55800 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
55801 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
55802 + return 1;
55803 +#endif
55804 + return 0;
55805 +}
55806 +
55807 +int
55808 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
55809 +{
55810 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55811 + struct task_struct *p;
55812 + int ret = 0;
55813 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
55814 + return ret;
55815 +
55816 + read_lock(&tasklist_lock);
55817 + do_each_pid_task(pid, type, p) {
55818 + if (!have_same_root(current, p)) {
55819 + ret = 1;
55820 + goto out;
55821 + }
55822 + } while_each_pid_task(pid, type, p);
55823 +out:
55824 + read_unlock(&tasklist_lock);
55825 + return ret;
55826 +#endif
55827 + return 0;
55828 +}
55829 +
55830 +int
55831 +gr_pid_is_chrooted(struct task_struct *p)
55832 +{
55833 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55834 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
55835 + return 0;
55836 +
55837 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
55838 + !have_same_root(current, p)) {
55839 + return 1;
55840 + }
55841 +#endif
55842 + return 0;
55843 +}
55844 +
55845 +EXPORT_SYMBOL(gr_pid_is_chrooted);
55846 +
55847 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
55848 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
55849 +{
55850 + struct path path, currentroot;
55851 + int ret = 0;
55852 +
55853 + path.dentry = (struct dentry *)u_dentry;
55854 + path.mnt = (struct vfsmount *)u_mnt;
55855 + get_fs_root(current->fs, &currentroot);
55856 + if (path_is_under(&path, &currentroot))
55857 + ret = 1;
55858 + path_put(&currentroot);
55859 +
55860 + return ret;
55861 +}
55862 +#endif
55863 +
55864 +int
55865 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
55866 +{
55867 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55868 + if (!grsec_enable_chroot_fchdir)
55869 + return 1;
55870 +
55871 + if (!proc_is_chrooted(current))
55872 + return 1;
55873 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
55874 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
55875 + return 0;
55876 + }
55877 +#endif
55878 + return 1;
55879 +}
55880 +
55881 +int
55882 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55883 + const time_t shm_createtime)
55884 +{
55885 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55886 + struct task_struct *p;
55887 + time_t starttime;
55888 +
55889 + if (unlikely(!grsec_enable_chroot_shmat))
55890 + return 1;
55891 +
55892 + if (likely(!proc_is_chrooted(current)))
55893 + return 1;
55894 +
55895 + rcu_read_lock();
55896 + read_lock(&tasklist_lock);
55897 +
55898 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
55899 + starttime = p->start_time.tv_sec;
55900 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
55901 + if (have_same_root(current, p)) {
55902 + goto allow;
55903 + } else {
55904 + read_unlock(&tasklist_lock);
55905 + rcu_read_unlock();
55906 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
55907 + return 0;
55908 + }
55909 + }
55910 + /* creator exited, pid reuse, fall through to next check */
55911 + }
55912 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
55913 + if (unlikely(!have_same_root(current, p))) {
55914 + read_unlock(&tasklist_lock);
55915 + rcu_read_unlock();
55916 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
55917 + return 0;
55918 + }
55919 + }
55920 +
55921 +allow:
55922 + read_unlock(&tasklist_lock);
55923 + rcu_read_unlock();
55924 +#endif
55925 + return 1;
55926 +}
55927 +
55928 +void
55929 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
55930 +{
55931 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55932 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
55933 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
55934 +#endif
55935 + return;
55936 +}
55937 +
55938 +int
55939 +gr_handle_chroot_mknod(const struct dentry *dentry,
55940 + const struct vfsmount *mnt, const int mode)
55941 +{
55942 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55943 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
55944 + proc_is_chrooted(current)) {
55945 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
55946 + return -EPERM;
55947 + }
55948 +#endif
55949 + return 0;
55950 +}
55951 +
55952 +int
55953 +gr_handle_chroot_mount(const struct dentry *dentry,
55954 + const struct vfsmount *mnt, const char *dev_name)
55955 +{
55956 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55957 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
55958 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
55959 + return -EPERM;
55960 + }
55961 +#endif
55962 + return 0;
55963 +}
55964 +
55965 +int
55966 +gr_handle_chroot_pivot(void)
55967 +{
55968 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55969 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
55970 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
55971 + return -EPERM;
55972 + }
55973 +#endif
55974 + return 0;
55975 +}
55976 +
55977 +int
55978 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
55979 +{
55980 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55981 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
55982 + !gr_is_outside_chroot(dentry, mnt)) {
55983 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
55984 + return -EPERM;
55985 + }
55986 +#endif
55987 + return 0;
55988 +}
55989 +
55990 +extern const char *captab_log[];
55991 +extern int captab_log_entries;
55992 +
55993 +int
55994 +gr_chroot_is_capable(const int cap)
55995 +{
55996 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55997 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
55998 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
55999 + if (cap_raised(chroot_caps, cap)) {
56000 + const struct cred *creds = current_cred();
56001 + if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
56002 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
56003 + }
56004 + return 0;
56005 + }
56006 + }
56007 +#endif
56008 + return 1;
56009 +}
56010 +
56011 +int
56012 +gr_chroot_is_capable_nolog(const int cap)
56013 +{
56014 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56015 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
56016 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56017 + if (cap_raised(chroot_caps, cap)) {
56018 + return 0;
56019 + }
56020 + }
56021 +#endif
56022 + return 1;
56023 +}
56024 +
56025 +int
56026 +gr_handle_chroot_sysctl(const int op)
56027 +{
56028 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56029 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
56030 + proc_is_chrooted(current))
56031 + return -EACCES;
56032 +#endif
56033 + return 0;
56034 +}
56035 +
56036 +void
56037 +gr_handle_chroot_chdir(struct path *path)
56038 +{
56039 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56040 + if (grsec_enable_chroot_chdir)
56041 + set_fs_pwd(current->fs, path);
56042 +#endif
56043 + return;
56044 +}
56045 +
56046 +int
56047 +gr_handle_chroot_chmod(const struct dentry *dentry,
56048 + const struct vfsmount *mnt, const int mode)
56049 +{
56050 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56051 + /* allow chmod +s on directories, but not files */
56052 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
56053 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
56054 + proc_is_chrooted(current)) {
56055 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
56056 + return -EPERM;
56057 + }
56058 +#endif
56059 + return 0;
56060 +}
56061 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
56062 new file mode 100644
56063 index 0000000..213ad8b
56064 --- /dev/null
56065 +++ b/grsecurity/grsec_disabled.c
56066 @@ -0,0 +1,437 @@
56067 +#include <linux/kernel.h>
56068 +#include <linux/module.h>
56069 +#include <linux/sched.h>
56070 +#include <linux/file.h>
56071 +#include <linux/fs.h>
56072 +#include <linux/kdev_t.h>
56073 +#include <linux/net.h>
56074 +#include <linux/in.h>
56075 +#include <linux/ip.h>
56076 +#include <linux/skbuff.h>
56077 +#include <linux/sysctl.h>
56078 +
56079 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56080 +void
56081 +pax_set_initial_flags(struct linux_binprm *bprm)
56082 +{
56083 + return;
56084 +}
56085 +#endif
56086 +
56087 +#ifdef CONFIG_SYSCTL
56088 +__u32
56089 +gr_handle_sysctl(const struct ctl_table * table, const int op)
56090 +{
56091 + return 0;
56092 +}
56093 +#endif
56094 +
56095 +#ifdef CONFIG_TASKSTATS
56096 +int gr_is_taskstats_denied(int pid)
56097 +{
56098 + return 0;
56099 +}
56100 +#endif
56101 +
56102 +int
56103 +gr_acl_is_enabled(void)
56104 +{
56105 + return 0;
56106 +}
56107 +
56108 +void
56109 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
56110 +{
56111 + return;
56112 +}
56113 +
56114 +int
56115 +gr_handle_rawio(const struct inode *inode)
56116 +{
56117 + return 0;
56118 +}
56119 +
56120 +void
56121 +gr_acl_handle_psacct(struct task_struct *task, const long code)
56122 +{
56123 + return;
56124 +}
56125 +
56126 +int
56127 +gr_handle_ptrace(struct task_struct *task, const long request)
56128 +{
56129 + return 0;
56130 +}
56131 +
56132 +int
56133 +gr_handle_proc_ptrace(struct task_struct *task)
56134 +{
56135 + return 0;
56136 +}
56137 +
56138 +void
56139 +gr_learn_resource(const struct task_struct *task,
56140 + const int res, const unsigned long wanted, const int gt)
56141 +{
56142 + return;
56143 +}
56144 +
56145 +int
56146 +gr_set_acls(const int type)
56147 +{
56148 + return 0;
56149 +}
56150 +
56151 +int
56152 +gr_check_hidden_task(const struct task_struct *tsk)
56153 +{
56154 + return 0;
56155 +}
56156 +
56157 +int
56158 +gr_check_protected_task(const struct task_struct *task)
56159 +{
56160 + return 0;
56161 +}
56162 +
56163 +int
56164 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
56165 +{
56166 + return 0;
56167 +}
56168 +
56169 +void
56170 +gr_copy_label(struct task_struct *tsk)
56171 +{
56172 + return;
56173 +}
56174 +
56175 +void
56176 +gr_set_pax_flags(struct task_struct *task)
56177 +{
56178 + return;
56179 +}
56180 +
56181 +int
56182 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
56183 + const int unsafe_share)
56184 +{
56185 + return 0;
56186 +}
56187 +
56188 +void
56189 +gr_handle_delete(const ino_t ino, const dev_t dev)
56190 +{
56191 + return;
56192 +}
56193 +
56194 +void
56195 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
56196 +{
56197 + return;
56198 +}
56199 +
56200 +void
56201 +gr_handle_crash(struct task_struct *task, const int sig)
56202 +{
56203 + return;
56204 +}
56205 +
56206 +int
56207 +gr_check_crash_exec(const struct file *filp)
56208 +{
56209 + return 0;
56210 +}
56211 +
56212 +int
56213 +gr_check_crash_uid(const uid_t uid)
56214 +{
56215 + return 0;
56216 +}
56217 +
56218 +void
56219 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56220 + struct dentry *old_dentry,
56221 + struct dentry *new_dentry,
56222 + struct vfsmount *mnt, const __u8 replace)
56223 +{
56224 + return;
56225 +}
56226 +
56227 +int
56228 +gr_search_socket(const int family, const int type, const int protocol)
56229 +{
56230 + return 1;
56231 +}
56232 +
56233 +int
56234 +gr_search_connectbind(const int mode, const struct socket *sock,
56235 + const struct sockaddr_in *addr)
56236 +{
56237 + return 0;
56238 +}
56239 +
56240 +void
56241 +gr_handle_alertkill(struct task_struct *task)
56242 +{
56243 + return;
56244 +}
56245 +
56246 +__u32
56247 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
56248 +{
56249 + return 1;
56250 +}
56251 +
56252 +__u32
56253 +gr_acl_handle_hidden_file(const struct dentry * dentry,
56254 + const struct vfsmount * mnt)
56255 +{
56256 + return 1;
56257 +}
56258 +
56259 +__u32
56260 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
56261 + int acc_mode)
56262 +{
56263 + return 1;
56264 +}
56265 +
56266 +__u32
56267 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
56268 +{
56269 + return 1;
56270 +}
56271 +
56272 +__u32
56273 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
56274 +{
56275 + return 1;
56276 +}
56277 +
56278 +int
56279 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
56280 + unsigned int *vm_flags)
56281 +{
56282 + return 1;
56283 +}
56284 +
56285 +__u32
56286 +gr_acl_handle_truncate(const struct dentry * dentry,
56287 + const struct vfsmount * mnt)
56288 +{
56289 + return 1;
56290 +}
56291 +
56292 +__u32
56293 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
56294 +{
56295 + return 1;
56296 +}
56297 +
56298 +__u32
56299 +gr_acl_handle_access(const struct dentry * dentry,
56300 + const struct vfsmount * mnt, const int fmode)
56301 +{
56302 + return 1;
56303 +}
56304 +
56305 +__u32
56306 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
56307 + umode_t *mode)
56308 +{
56309 + return 1;
56310 +}
56311 +
56312 +__u32
56313 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
56314 +{
56315 + return 1;
56316 +}
56317 +
56318 +__u32
56319 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
56320 +{
56321 + return 1;
56322 +}
56323 +
56324 +void
56325 +grsecurity_init(void)
56326 +{
56327 + return;
56328 +}
56329 +
56330 +umode_t gr_acl_umask(void)
56331 +{
56332 + return 0;
56333 +}
56334 +
56335 +__u32
56336 +gr_acl_handle_mknod(const struct dentry * new_dentry,
56337 + const struct dentry * parent_dentry,
56338 + const struct vfsmount * parent_mnt,
56339 + const int mode)
56340 +{
56341 + return 1;
56342 +}
56343 +
56344 +__u32
56345 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
56346 + const struct dentry * parent_dentry,
56347 + const struct vfsmount * parent_mnt)
56348 +{
56349 + return 1;
56350 +}
56351 +
56352 +__u32
56353 +gr_acl_handle_symlink(const struct dentry * new_dentry,
56354 + const struct dentry * parent_dentry,
56355 + const struct vfsmount * parent_mnt, const char *from)
56356 +{
56357 + return 1;
56358 +}
56359 +
56360 +__u32
56361 +gr_acl_handle_link(const struct dentry * new_dentry,
56362 + const struct dentry * parent_dentry,
56363 + const struct vfsmount * parent_mnt,
56364 + const struct dentry * old_dentry,
56365 + const struct vfsmount * old_mnt, const char *to)
56366 +{
56367 + return 1;
56368 +}
56369 +
56370 +int
56371 +gr_acl_handle_rename(const struct dentry *new_dentry,
56372 + const struct dentry *parent_dentry,
56373 + const struct vfsmount *parent_mnt,
56374 + const struct dentry *old_dentry,
56375 + const struct inode *old_parent_inode,
56376 + const struct vfsmount *old_mnt, const char *newname)
56377 +{
56378 + return 0;
56379 +}
56380 +
56381 +int
56382 +gr_acl_handle_filldir(const struct file *file, const char *name,
56383 + const int namelen, const ino_t ino)
56384 +{
56385 + return 1;
56386 +}
56387 +
56388 +int
56389 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56390 + const time_t shm_createtime, const uid_t cuid, const int shmid)
56391 +{
56392 + return 1;
56393 +}
56394 +
56395 +int
56396 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
56397 +{
56398 + return 0;
56399 +}
56400 +
56401 +int
56402 +gr_search_accept(const struct socket *sock)
56403 +{
56404 + return 0;
56405 +}
56406 +
56407 +int
56408 +gr_search_listen(const struct socket *sock)
56409 +{
56410 + return 0;
56411 +}
56412 +
56413 +int
56414 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
56415 +{
56416 + return 0;
56417 +}
56418 +
56419 +__u32
56420 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
56421 +{
56422 + return 1;
56423 +}
56424 +
56425 +__u32
56426 +gr_acl_handle_creat(const struct dentry * dentry,
56427 + const struct dentry * p_dentry,
56428 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
56429 + const int imode)
56430 +{
56431 + return 1;
56432 +}
56433 +
56434 +void
56435 +gr_acl_handle_exit(void)
56436 +{
56437 + return;
56438 +}
56439 +
56440 +int
56441 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
56442 +{
56443 + return 1;
56444 +}
56445 +
56446 +void
56447 +gr_set_role_label(const uid_t uid, const gid_t gid)
56448 +{
56449 + return;
56450 +}
56451 +
56452 +int
56453 +gr_acl_handle_procpidmem(const struct task_struct *task)
56454 +{
56455 + return 0;
56456 +}
56457 +
56458 +int
56459 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
56460 +{
56461 + return 0;
56462 +}
56463 +
56464 +int
56465 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
56466 +{
56467 + return 0;
56468 +}
56469 +
56470 +void
56471 +gr_set_kernel_label(struct task_struct *task)
56472 +{
56473 + return;
56474 +}
56475 +
56476 +int
56477 +gr_check_user_change(int real, int effective, int fs)
56478 +{
56479 + return 0;
56480 +}
56481 +
56482 +int
56483 +gr_check_group_change(int real, int effective, int fs)
56484 +{
56485 + return 0;
56486 +}
56487 +
56488 +int gr_acl_enable_at_secure(void)
56489 +{
56490 + return 0;
56491 +}
56492 +
56493 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
56494 +{
56495 + return dentry->d_inode->i_sb->s_dev;
56496 +}
56497 +
56498 +EXPORT_SYMBOL(gr_learn_resource);
56499 +EXPORT_SYMBOL(gr_set_kernel_label);
56500 +#ifdef CONFIG_SECURITY
56501 +EXPORT_SYMBOL(gr_check_user_change);
56502 +EXPORT_SYMBOL(gr_check_group_change);
56503 +#endif
56504 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
56505 new file mode 100644
56506 index 0000000..2b05ada
56507 --- /dev/null
56508 +++ b/grsecurity/grsec_exec.c
56509 @@ -0,0 +1,146 @@
56510 +#include <linux/kernel.h>
56511 +#include <linux/sched.h>
56512 +#include <linux/file.h>
56513 +#include <linux/binfmts.h>
56514 +#include <linux/fs.h>
56515 +#include <linux/types.h>
56516 +#include <linux/grdefs.h>
56517 +#include <linux/grsecurity.h>
56518 +#include <linux/grinternal.h>
56519 +#include <linux/capability.h>
56520 +#include <linux/module.h>
56521 +
56522 +#include <asm/uaccess.h>
56523 +
56524 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56525 +static char gr_exec_arg_buf[132];
56526 +static DEFINE_MUTEX(gr_exec_arg_mutex);
56527 +#endif
56528 +
56529 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
56530 +
56531 +void
56532 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
56533 +{
56534 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56535 + char *grarg = gr_exec_arg_buf;
56536 + unsigned int i, x, execlen = 0;
56537 + char c;
56538 +
56539 + if (!((grsec_enable_execlog && grsec_enable_group &&
56540 + in_group_p(grsec_audit_gid))
56541 + || (grsec_enable_execlog && !grsec_enable_group)))
56542 + return;
56543 +
56544 + mutex_lock(&gr_exec_arg_mutex);
56545 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
56546 +
56547 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
56548 + const char __user *p;
56549 + unsigned int len;
56550 +
56551 + p = get_user_arg_ptr(argv, i);
56552 + if (IS_ERR(p))
56553 + goto log;
56554 +
56555 + len = strnlen_user(p, 128 - execlen);
56556 + if (len > 128 - execlen)
56557 + len = 128 - execlen;
56558 + else if (len > 0)
56559 + len--;
56560 + if (copy_from_user(grarg + execlen, p, len))
56561 + goto log;
56562 +
56563 + /* rewrite unprintable characters */
56564 + for (x = 0; x < len; x++) {
56565 + c = *(grarg + execlen + x);
56566 + if (c < 32 || c > 126)
56567 + *(grarg + execlen + x) = ' ';
56568 + }
56569 +
56570 + execlen += len;
56571 + *(grarg + execlen) = ' ';
56572 + *(grarg + execlen + 1) = '\0';
56573 + execlen++;
56574 + }
56575 +
56576 + log:
56577 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
56578 + bprm->file->f_path.mnt, grarg);
56579 + mutex_unlock(&gr_exec_arg_mutex);
56580 +#endif
56581 + return;
56582 +}
56583 +
56584 +#ifdef CONFIG_GRKERNSEC
56585 +extern int gr_acl_is_capable(const int cap);
56586 +extern int gr_acl_is_capable_nolog(const int cap);
56587 +extern int gr_chroot_is_capable(const int cap);
56588 +extern int gr_chroot_is_capable_nolog(const int cap);
56589 +#endif
56590 +
56591 +const char *captab_log[] = {
56592 + "CAP_CHOWN",
56593 + "CAP_DAC_OVERRIDE",
56594 + "CAP_DAC_READ_SEARCH",
56595 + "CAP_FOWNER",
56596 + "CAP_FSETID",
56597 + "CAP_KILL",
56598 + "CAP_SETGID",
56599 + "CAP_SETUID",
56600 + "CAP_SETPCAP",
56601 + "CAP_LINUX_IMMUTABLE",
56602 + "CAP_NET_BIND_SERVICE",
56603 + "CAP_NET_BROADCAST",
56604 + "CAP_NET_ADMIN",
56605 + "CAP_NET_RAW",
56606 + "CAP_IPC_LOCK",
56607 + "CAP_IPC_OWNER",
56608 + "CAP_SYS_MODULE",
56609 + "CAP_SYS_RAWIO",
56610 + "CAP_SYS_CHROOT",
56611 + "CAP_SYS_PTRACE",
56612 + "CAP_SYS_PACCT",
56613 + "CAP_SYS_ADMIN",
56614 + "CAP_SYS_BOOT",
56615 + "CAP_SYS_NICE",
56616 + "CAP_SYS_RESOURCE",
56617 + "CAP_SYS_TIME",
56618 + "CAP_SYS_TTY_CONFIG",
56619 + "CAP_MKNOD",
56620 + "CAP_LEASE",
56621 + "CAP_AUDIT_WRITE",
56622 + "CAP_AUDIT_CONTROL",
56623 + "CAP_SETFCAP",
56624 + "CAP_MAC_OVERRIDE",
56625 + "CAP_MAC_ADMIN",
56626 + "CAP_SYSLOG",
56627 + "CAP_WAKE_ALARM"
56628 +};
56629 +
56630 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
56631 +
56632 +int gr_is_capable(const int cap)
56633 +{
56634 +#ifdef CONFIG_GRKERNSEC
56635 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
56636 + return 1;
56637 + return 0;
56638 +#else
56639 + return 1;
56640 +#endif
56641 +}
56642 +
56643 +int gr_is_capable_nolog(const int cap)
56644 +{
56645 +#ifdef CONFIG_GRKERNSEC
56646 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
56647 + return 1;
56648 + return 0;
56649 +#else
56650 + return 1;
56651 +#endif
56652 +}
56653 +
56654 +EXPORT_SYMBOL(gr_is_capable);
56655 +EXPORT_SYMBOL(gr_is_capable_nolog);
56656 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
56657 new file mode 100644
56658 index 0000000..d3ee748
56659 --- /dev/null
56660 +++ b/grsecurity/grsec_fifo.c
56661 @@ -0,0 +1,24 @@
56662 +#include <linux/kernel.h>
56663 +#include <linux/sched.h>
56664 +#include <linux/fs.h>
56665 +#include <linux/file.h>
56666 +#include <linux/grinternal.h>
56667 +
56668 +int
56669 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
56670 + const struct dentry *dir, const int flag, const int acc_mode)
56671 +{
56672 +#ifdef CONFIG_GRKERNSEC_FIFO
56673 + const struct cred *cred = current_cred();
56674 +
56675 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
56676 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
56677 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
56678 + (cred->fsuid != dentry->d_inode->i_uid)) {
56679 + if (!inode_permission(dentry->d_inode, acc_mode))
56680 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
56681 + return -EACCES;
56682 + }
56683 +#endif
56684 + return 0;
56685 +}
56686 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
56687 new file mode 100644
56688 index 0000000..8ca18bf
56689 --- /dev/null
56690 +++ b/grsecurity/grsec_fork.c
56691 @@ -0,0 +1,23 @@
56692 +#include <linux/kernel.h>
56693 +#include <linux/sched.h>
56694 +#include <linux/grsecurity.h>
56695 +#include <linux/grinternal.h>
56696 +#include <linux/errno.h>
56697 +
56698 +void
56699 +gr_log_forkfail(const int retval)
56700 +{
56701 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
56702 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
56703 + switch (retval) {
56704 + case -EAGAIN:
56705 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
56706 + break;
56707 + case -ENOMEM:
56708 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
56709 + break;
56710 + }
56711 + }
56712 +#endif
56713 + return;
56714 +}
56715 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
56716 new file mode 100644
56717 index 0000000..01ddde4
56718 --- /dev/null
56719 +++ b/grsecurity/grsec_init.c
56720 @@ -0,0 +1,277 @@
56721 +#include <linux/kernel.h>
56722 +#include <linux/sched.h>
56723 +#include <linux/mm.h>
56724 +#include <linux/gracl.h>
56725 +#include <linux/slab.h>
56726 +#include <linux/vmalloc.h>
56727 +#include <linux/percpu.h>
56728 +#include <linux/module.h>
56729 +
56730 +int grsec_enable_ptrace_readexec;
56731 +int grsec_enable_setxid;
56732 +int grsec_enable_brute;
56733 +int grsec_enable_link;
56734 +int grsec_enable_dmesg;
56735 +int grsec_enable_harden_ptrace;
56736 +int grsec_enable_fifo;
56737 +int grsec_enable_execlog;
56738 +int grsec_enable_signal;
56739 +int grsec_enable_forkfail;
56740 +int grsec_enable_audit_ptrace;
56741 +int grsec_enable_time;
56742 +int grsec_enable_audit_textrel;
56743 +int grsec_enable_group;
56744 +int grsec_audit_gid;
56745 +int grsec_enable_chdir;
56746 +int grsec_enable_mount;
56747 +int grsec_enable_rofs;
56748 +int grsec_enable_chroot_findtask;
56749 +int grsec_enable_chroot_mount;
56750 +int grsec_enable_chroot_shmat;
56751 +int grsec_enable_chroot_fchdir;
56752 +int grsec_enable_chroot_double;
56753 +int grsec_enable_chroot_pivot;
56754 +int grsec_enable_chroot_chdir;
56755 +int grsec_enable_chroot_chmod;
56756 +int grsec_enable_chroot_mknod;
56757 +int grsec_enable_chroot_nice;
56758 +int grsec_enable_chroot_execlog;
56759 +int grsec_enable_chroot_caps;
56760 +int grsec_enable_chroot_sysctl;
56761 +int grsec_enable_chroot_unix;
56762 +int grsec_enable_tpe;
56763 +int grsec_tpe_gid;
56764 +int grsec_enable_blackhole;
56765 +#ifdef CONFIG_IPV6_MODULE
56766 +EXPORT_SYMBOL(grsec_enable_blackhole);
56767 +#endif
56768 +int grsec_lastack_retries;
56769 +int grsec_enable_tpe_all;
56770 +int grsec_enable_tpe_invert;
56771 +int grsec_enable_socket_all;
56772 +int grsec_socket_all_gid;
56773 +int grsec_enable_socket_client;
56774 +int grsec_socket_client_gid;
56775 +int grsec_enable_socket_server;
56776 +int grsec_socket_server_gid;
56777 +int grsec_resource_logging;
56778 +int grsec_disable_privio;
56779 +int grsec_enable_log_rwxmaps;
56780 +int grsec_lock;
56781 +
56782 +DEFINE_SPINLOCK(grsec_alert_lock);
56783 +unsigned long grsec_alert_wtime = 0;
56784 +unsigned long grsec_alert_fyet = 0;
56785 +
56786 +DEFINE_SPINLOCK(grsec_audit_lock);
56787 +
56788 +DEFINE_RWLOCK(grsec_exec_file_lock);
56789 +
56790 +char *gr_shared_page[4];
56791 +
56792 +char *gr_alert_log_fmt;
56793 +char *gr_audit_log_fmt;
56794 +char *gr_alert_log_buf;
56795 +char *gr_audit_log_buf;
56796 +
56797 +extern struct gr_arg *gr_usermode;
56798 +extern unsigned char *gr_system_salt;
56799 +extern unsigned char *gr_system_sum;
56800 +
56801 +void __init
56802 +grsecurity_init(void)
56803 +{
56804 + int j;
56805 + /* create the per-cpu shared pages */
56806 +
56807 +#ifdef CONFIG_X86
56808 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
56809 +#endif
56810 +
56811 + for (j = 0; j < 4; j++) {
56812 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
56813 + if (gr_shared_page[j] == NULL) {
56814 + panic("Unable to allocate grsecurity shared page");
56815 + return;
56816 + }
56817 + }
56818 +
56819 + /* allocate log buffers */
56820 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
56821 + if (!gr_alert_log_fmt) {
56822 + panic("Unable to allocate grsecurity alert log format buffer");
56823 + return;
56824 + }
56825 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
56826 + if (!gr_audit_log_fmt) {
56827 + panic("Unable to allocate grsecurity audit log format buffer");
56828 + return;
56829 + }
56830 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
56831 + if (!gr_alert_log_buf) {
56832 + panic("Unable to allocate grsecurity alert log buffer");
56833 + return;
56834 + }
56835 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
56836 + if (!gr_audit_log_buf) {
56837 + panic("Unable to allocate grsecurity audit log buffer");
56838 + return;
56839 + }
56840 +
56841 + /* allocate memory for authentication structure */
56842 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
56843 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
56844 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
56845 +
56846 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
56847 + panic("Unable to allocate grsecurity authentication structure");
56848 + return;
56849 + }
56850 +
56851 +
56852 +#ifdef CONFIG_GRKERNSEC_IO
56853 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
56854 + grsec_disable_privio = 1;
56855 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
56856 + grsec_disable_privio = 1;
56857 +#else
56858 + grsec_disable_privio = 0;
56859 +#endif
56860 +#endif
56861 +
56862 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56863 + /* for backward compatibility, tpe_invert always defaults to on if
56864 + enabled in the kernel
56865 + */
56866 + grsec_enable_tpe_invert = 1;
56867 +#endif
56868 +
56869 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
56870 +#ifndef CONFIG_GRKERNSEC_SYSCTL
56871 + grsec_lock = 1;
56872 +#endif
56873 +
56874 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
56875 + grsec_enable_audit_textrel = 1;
56876 +#endif
56877 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56878 + grsec_enable_log_rwxmaps = 1;
56879 +#endif
56880 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
56881 + grsec_enable_group = 1;
56882 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
56883 +#endif
56884 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
56885 + grsec_enable_ptrace_readexec = 1;
56886 +#endif
56887 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56888 + grsec_enable_chdir = 1;
56889 +#endif
56890 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
56891 + grsec_enable_harden_ptrace = 1;
56892 +#endif
56893 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56894 + grsec_enable_mount = 1;
56895 +#endif
56896 +#ifdef CONFIG_GRKERNSEC_LINK
56897 + grsec_enable_link = 1;
56898 +#endif
56899 +#ifdef CONFIG_GRKERNSEC_BRUTE
56900 + grsec_enable_brute = 1;
56901 +#endif
56902 +#ifdef CONFIG_GRKERNSEC_DMESG
56903 + grsec_enable_dmesg = 1;
56904 +#endif
56905 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
56906 + grsec_enable_blackhole = 1;
56907 + grsec_lastack_retries = 4;
56908 +#endif
56909 +#ifdef CONFIG_GRKERNSEC_FIFO
56910 + grsec_enable_fifo = 1;
56911 +#endif
56912 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56913 + grsec_enable_execlog = 1;
56914 +#endif
56915 +#ifdef CONFIG_GRKERNSEC_SETXID
56916 + grsec_enable_setxid = 1;
56917 +#endif
56918 +#ifdef CONFIG_GRKERNSEC_SIGNAL
56919 + grsec_enable_signal = 1;
56920 +#endif
56921 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
56922 + grsec_enable_forkfail = 1;
56923 +#endif
56924 +#ifdef CONFIG_GRKERNSEC_TIME
56925 + grsec_enable_time = 1;
56926 +#endif
56927 +#ifdef CONFIG_GRKERNSEC_RESLOG
56928 + grsec_resource_logging = 1;
56929 +#endif
56930 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56931 + grsec_enable_chroot_findtask = 1;
56932 +#endif
56933 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56934 + grsec_enable_chroot_unix = 1;
56935 +#endif
56936 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56937 + grsec_enable_chroot_mount = 1;
56938 +#endif
56939 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56940 + grsec_enable_chroot_fchdir = 1;
56941 +#endif
56942 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56943 + grsec_enable_chroot_shmat = 1;
56944 +#endif
56945 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
56946 + grsec_enable_audit_ptrace = 1;
56947 +#endif
56948 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56949 + grsec_enable_chroot_double = 1;
56950 +#endif
56951 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56952 + grsec_enable_chroot_pivot = 1;
56953 +#endif
56954 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56955 + grsec_enable_chroot_chdir = 1;
56956 +#endif
56957 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56958 + grsec_enable_chroot_chmod = 1;
56959 +#endif
56960 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56961 + grsec_enable_chroot_mknod = 1;
56962 +#endif
56963 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56964 + grsec_enable_chroot_nice = 1;
56965 +#endif
56966 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56967 + grsec_enable_chroot_execlog = 1;
56968 +#endif
56969 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56970 + grsec_enable_chroot_caps = 1;
56971 +#endif
56972 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56973 + grsec_enable_chroot_sysctl = 1;
56974 +#endif
56975 +#ifdef CONFIG_GRKERNSEC_TPE
56976 + grsec_enable_tpe = 1;
56977 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
56978 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
56979 + grsec_enable_tpe_all = 1;
56980 +#endif
56981 +#endif
56982 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56983 + grsec_enable_socket_all = 1;
56984 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
56985 +#endif
56986 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56987 + grsec_enable_socket_client = 1;
56988 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
56989 +#endif
56990 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56991 + grsec_enable_socket_server = 1;
56992 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
56993 +#endif
56994 +#endif
56995 +
56996 + return;
56997 +}
56998 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
56999 new file mode 100644
57000 index 0000000..3efe141
57001 --- /dev/null
57002 +++ b/grsecurity/grsec_link.c
57003 @@ -0,0 +1,43 @@
57004 +#include <linux/kernel.h>
57005 +#include <linux/sched.h>
57006 +#include <linux/fs.h>
57007 +#include <linux/file.h>
57008 +#include <linux/grinternal.h>
57009 +
57010 +int
57011 +gr_handle_follow_link(const struct inode *parent,
57012 + const struct inode *inode,
57013 + const struct dentry *dentry, const struct vfsmount *mnt)
57014 +{
57015 +#ifdef CONFIG_GRKERNSEC_LINK
57016 + const struct cred *cred = current_cred();
57017 +
57018 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
57019 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
57020 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
57021 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
57022 + return -EACCES;
57023 + }
57024 +#endif
57025 + return 0;
57026 +}
57027 +
57028 +int
57029 +gr_handle_hardlink(const struct dentry *dentry,
57030 + const struct vfsmount *mnt,
57031 + struct inode *inode, const int mode, const char *to)
57032 +{
57033 +#ifdef CONFIG_GRKERNSEC_LINK
57034 + const struct cred *cred = current_cred();
57035 +
57036 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
57037 + (!S_ISREG(mode) || (mode & S_ISUID) ||
57038 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
57039 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
57040 + !capable(CAP_FOWNER) && cred->uid) {
57041 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
57042 + return -EPERM;
57043 + }
57044 +#endif
57045 + return 0;
57046 +}
57047 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
57048 new file mode 100644
57049 index 0000000..a45d2e9
57050 --- /dev/null
57051 +++ b/grsecurity/grsec_log.c
57052 @@ -0,0 +1,322 @@
57053 +#include <linux/kernel.h>
57054 +#include <linux/sched.h>
57055 +#include <linux/file.h>
57056 +#include <linux/tty.h>
57057 +#include <linux/fs.h>
57058 +#include <linux/grinternal.h>
57059 +
57060 +#ifdef CONFIG_TREE_PREEMPT_RCU
57061 +#define DISABLE_PREEMPT() preempt_disable()
57062 +#define ENABLE_PREEMPT() preempt_enable()
57063 +#else
57064 +#define DISABLE_PREEMPT()
57065 +#define ENABLE_PREEMPT()
57066 +#endif
57067 +
57068 +#define BEGIN_LOCKS(x) \
57069 + DISABLE_PREEMPT(); \
57070 + rcu_read_lock(); \
57071 + read_lock(&tasklist_lock); \
57072 + read_lock(&grsec_exec_file_lock); \
57073 + if (x != GR_DO_AUDIT) \
57074 + spin_lock(&grsec_alert_lock); \
57075 + else \
57076 + spin_lock(&grsec_audit_lock)
57077 +
57078 +#define END_LOCKS(x) \
57079 + if (x != GR_DO_AUDIT) \
57080 + spin_unlock(&grsec_alert_lock); \
57081 + else \
57082 + spin_unlock(&grsec_audit_lock); \
57083 + read_unlock(&grsec_exec_file_lock); \
57084 + read_unlock(&tasklist_lock); \
57085 + rcu_read_unlock(); \
57086 + ENABLE_PREEMPT(); \
57087 + if (x == GR_DONT_AUDIT) \
57088 + gr_handle_alertkill(current)
57089 +
57090 +enum {
57091 + FLOODING,
57092 + NO_FLOODING
57093 +};
57094 +
57095 +extern char *gr_alert_log_fmt;
57096 +extern char *gr_audit_log_fmt;
57097 +extern char *gr_alert_log_buf;
57098 +extern char *gr_audit_log_buf;
57099 +
57100 +static int gr_log_start(int audit)
57101 +{
57102 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
57103 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
57104 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57105 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
57106 + unsigned long curr_secs = get_seconds();
57107 +
57108 + if (audit == GR_DO_AUDIT)
57109 + goto set_fmt;
57110 +
57111 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
57112 + grsec_alert_wtime = curr_secs;
57113 + grsec_alert_fyet = 0;
57114 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
57115 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
57116 + grsec_alert_fyet++;
57117 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
57118 + grsec_alert_wtime = curr_secs;
57119 + grsec_alert_fyet++;
57120 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
57121 + return FLOODING;
57122 + }
57123 + else return FLOODING;
57124 +
57125 +set_fmt:
57126 +#endif
57127 + memset(buf, 0, PAGE_SIZE);
57128 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
57129 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
57130 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57131 + } else if (current->signal->curr_ip) {
57132 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
57133 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
57134 + } else if (gr_acl_is_enabled()) {
57135 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
57136 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57137 + } else {
57138 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
57139 + strcpy(buf, fmt);
57140 + }
57141 +
57142 + return NO_FLOODING;
57143 +}
57144 +
57145 +static void gr_log_middle(int audit, const char *msg, va_list ap)
57146 + __attribute__ ((format (printf, 2, 0)));
57147 +
57148 +static void gr_log_middle(int audit, const char *msg, va_list ap)
57149 +{
57150 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57151 + unsigned int len = strlen(buf);
57152 +
57153 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57154 +
57155 + return;
57156 +}
57157 +
57158 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
57159 + __attribute__ ((format (printf, 2, 3)));
57160 +
57161 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
57162 +{
57163 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57164 + unsigned int len = strlen(buf);
57165 + va_list ap;
57166 +
57167 + va_start(ap, msg);
57168 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57169 + va_end(ap);
57170 +
57171 + return;
57172 +}
57173 +
57174 +static void gr_log_end(int audit, int append_default)
57175 +{
57176 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57177 +
57178 + if (append_default) {
57179 + unsigned int len = strlen(buf);
57180 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
57181 + }
57182 +
57183 + printk("%s\n", buf);
57184 +
57185 + return;
57186 +}
57187 +
57188 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
57189 +{
57190 + int logtype;
57191 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
57192 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
57193 + void *voidptr = NULL;
57194 + int num1 = 0, num2 = 0;
57195 + unsigned long ulong1 = 0, ulong2 = 0;
57196 + struct dentry *dentry = NULL;
57197 + struct vfsmount *mnt = NULL;
57198 + struct file *file = NULL;
57199 + struct task_struct *task = NULL;
57200 + const struct cred *cred, *pcred;
57201 + va_list ap;
57202 +
57203 + BEGIN_LOCKS(audit);
57204 + logtype = gr_log_start(audit);
57205 + if (logtype == FLOODING) {
57206 + END_LOCKS(audit);
57207 + return;
57208 + }
57209 + va_start(ap, argtypes);
57210 + switch (argtypes) {
57211 + case GR_TTYSNIFF:
57212 + task = va_arg(ap, struct task_struct *);
57213 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
57214 + break;
57215 + case GR_SYSCTL_HIDDEN:
57216 + str1 = va_arg(ap, char *);
57217 + gr_log_middle_varargs(audit, msg, result, str1);
57218 + break;
57219 + case GR_RBAC:
57220 + dentry = va_arg(ap, struct dentry *);
57221 + mnt = va_arg(ap, struct vfsmount *);
57222 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
57223 + break;
57224 + case GR_RBAC_STR:
57225 + dentry = va_arg(ap, struct dentry *);
57226 + mnt = va_arg(ap, struct vfsmount *);
57227 + str1 = va_arg(ap, char *);
57228 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
57229 + break;
57230 + case GR_STR_RBAC:
57231 + str1 = va_arg(ap, char *);
57232 + dentry = va_arg(ap, struct dentry *);
57233 + mnt = va_arg(ap, struct vfsmount *);
57234 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
57235 + break;
57236 + case GR_RBAC_MODE2:
57237 + dentry = va_arg(ap, struct dentry *);
57238 + mnt = va_arg(ap, struct vfsmount *);
57239 + str1 = va_arg(ap, char *);
57240 + str2 = va_arg(ap, char *);
57241 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
57242 + break;
57243 + case GR_RBAC_MODE3:
57244 + dentry = va_arg(ap, struct dentry *);
57245 + mnt = va_arg(ap, struct vfsmount *);
57246 + str1 = va_arg(ap, char *);
57247 + str2 = va_arg(ap, char *);
57248 + str3 = va_arg(ap, char *);
57249 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
57250 + break;
57251 + case GR_FILENAME:
57252 + dentry = va_arg(ap, struct dentry *);
57253 + mnt = va_arg(ap, struct vfsmount *);
57254 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
57255 + break;
57256 + case GR_STR_FILENAME:
57257 + str1 = va_arg(ap, char *);
57258 + dentry = va_arg(ap, struct dentry *);
57259 + mnt = va_arg(ap, struct vfsmount *);
57260 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
57261 + break;
57262 + case GR_FILENAME_STR:
57263 + dentry = va_arg(ap, struct dentry *);
57264 + mnt = va_arg(ap, struct vfsmount *);
57265 + str1 = va_arg(ap, char *);
57266 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
57267 + break;
57268 + case GR_FILENAME_TWO_INT:
57269 + dentry = va_arg(ap, struct dentry *);
57270 + mnt = va_arg(ap, struct vfsmount *);
57271 + num1 = va_arg(ap, int);
57272 + num2 = va_arg(ap, int);
57273 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
57274 + break;
57275 + case GR_FILENAME_TWO_INT_STR:
57276 + dentry = va_arg(ap, struct dentry *);
57277 + mnt = va_arg(ap, struct vfsmount *);
57278 + num1 = va_arg(ap, int);
57279 + num2 = va_arg(ap, int);
57280 + str1 = va_arg(ap, char *);
57281 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
57282 + break;
57283 + case GR_TEXTREL:
57284 + file = va_arg(ap, struct file *);
57285 + ulong1 = va_arg(ap, unsigned long);
57286 + ulong2 = va_arg(ap, unsigned long);
57287 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
57288 + break;
57289 + case GR_PTRACE:
57290 + task = va_arg(ap, struct task_struct *);
57291 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
57292 + break;
57293 + case GR_RESOURCE:
57294 + task = va_arg(ap, struct task_struct *);
57295 + cred = __task_cred(task);
57296 + pcred = __task_cred(task->real_parent);
57297 + ulong1 = va_arg(ap, unsigned long);
57298 + str1 = va_arg(ap, char *);
57299 + ulong2 = va_arg(ap, unsigned long);
57300 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57301 + break;
57302 + case GR_CAP:
57303 + task = va_arg(ap, struct task_struct *);
57304 + cred = __task_cred(task);
57305 + pcred = __task_cred(task->real_parent);
57306 + str1 = va_arg(ap, char *);
57307 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57308 + break;
57309 + case GR_SIG:
57310 + str1 = va_arg(ap, char *);
57311 + voidptr = va_arg(ap, void *);
57312 + gr_log_middle_varargs(audit, msg, str1, voidptr);
57313 + break;
57314 + case GR_SIG2:
57315 + task = va_arg(ap, struct task_struct *);
57316 + cred = __task_cred(task);
57317 + pcred = __task_cred(task->real_parent);
57318 + num1 = va_arg(ap, int);
57319 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57320 + break;
57321 + case GR_CRASH1:
57322 + task = va_arg(ap, struct task_struct *);
57323 + cred = __task_cred(task);
57324 + pcred = __task_cred(task->real_parent);
57325 + ulong1 = va_arg(ap, unsigned long);
57326 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
57327 + break;
57328 + case GR_CRASH2:
57329 + task = va_arg(ap, struct task_struct *);
57330 + cred = __task_cred(task);
57331 + pcred = __task_cred(task->real_parent);
57332 + ulong1 = va_arg(ap, unsigned long);
57333 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
57334 + break;
57335 + case GR_RWXMAP:
57336 + file = va_arg(ap, struct file *);
57337 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
57338 + break;
57339 + case GR_PSACCT:
57340 + {
57341 + unsigned int wday, cday;
57342 + __u8 whr, chr;
57343 + __u8 wmin, cmin;
57344 + __u8 wsec, csec;
57345 + char cur_tty[64] = { 0 };
57346 + char parent_tty[64] = { 0 };
57347 +
57348 + task = va_arg(ap, struct task_struct *);
57349 + wday = va_arg(ap, unsigned int);
57350 + cday = va_arg(ap, unsigned int);
57351 + whr = va_arg(ap, int);
57352 + chr = va_arg(ap, int);
57353 + wmin = va_arg(ap, int);
57354 + cmin = va_arg(ap, int);
57355 + wsec = va_arg(ap, int);
57356 + csec = va_arg(ap, int);
57357 + ulong1 = va_arg(ap, unsigned long);
57358 + cred = __task_cred(task);
57359 + pcred = __task_cred(task->real_parent);
57360 +
57361 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57362 + }
57363 + break;
57364 + default:
57365 + gr_log_middle(audit, msg, ap);
57366 + }
57367 + va_end(ap);
57368 + // these don't need DEFAULTSECARGS printed on the end
57369 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
57370 + gr_log_end(audit, 0);
57371 + else
57372 + gr_log_end(audit, 1);
57373 + END_LOCKS(audit);
57374 +}
57375 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
57376 new file mode 100644
57377 index 0000000..f536303
57378 --- /dev/null
57379 +++ b/grsecurity/grsec_mem.c
57380 @@ -0,0 +1,40 @@
57381 +#include <linux/kernel.h>
57382 +#include <linux/sched.h>
57383 +#include <linux/mm.h>
57384 +#include <linux/mman.h>
57385 +#include <linux/grinternal.h>
57386 +
57387 +void
57388 +gr_handle_ioperm(void)
57389 +{
57390 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
57391 + return;
57392 +}
57393 +
57394 +void
57395 +gr_handle_iopl(void)
57396 +{
57397 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
57398 + return;
57399 +}
57400 +
57401 +void
57402 +gr_handle_mem_readwrite(u64 from, u64 to)
57403 +{
57404 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
57405 + return;
57406 +}
57407 +
57408 +void
57409 +gr_handle_vm86(void)
57410 +{
57411 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
57412 + return;
57413 +}
57414 +
57415 +void
57416 +gr_log_badprocpid(const char *entry)
57417 +{
57418 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
57419 + return;
57420 +}
57421 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
57422 new file mode 100644
57423 index 0000000..2131422
57424 --- /dev/null
57425 +++ b/grsecurity/grsec_mount.c
57426 @@ -0,0 +1,62 @@
57427 +#include <linux/kernel.h>
57428 +#include <linux/sched.h>
57429 +#include <linux/mount.h>
57430 +#include <linux/grsecurity.h>
57431 +#include <linux/grinternal.h>
57432 +
57433 +void
57434 +gr_log_remount(const char *devname, const int retval)
57435 +{
57436 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57437 + if (grsec_enable_mount && (retval >= 0))
57438 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
57439 +#endif
57440 + return;
57441 +}
57442 +
57443 +void
57444 +gr_log_unmount(const char *devname, const int retval)
57445 +{
57446 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57447 + if (grsec_enable_mount && (retval >= 0))
57448 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
57449 +#endif
57450 + return;
57451 +}
57452 +
57453 +void
57454 +gr_log_mount(const char *from, const char *to, const int retval)
57455 +{
57456 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57457 + if (grsec_enable_mount && (retval >= 0))
57458 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
57459 +#endif
57460 + return;
57461 +}
57462 +
57463 +int
57464 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
57465 +{
57466 +#ifdef CONFIG_GRKERNSEC_ROFS
57467 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
57468 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
57469 + return -EPERM;
57470 + } else
57471 + return 0;
57472 +#endif
57473 + return 0;
57474 +}
57475 +
57476 +int
57477 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
57478 +{
57479 +#ifdef CONFIG_GRKERNSEC_ROFS
57480 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
57481 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
57482 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
57483 + return -EPERM;
57484 + } else
57485 + return 0;
57486 +#endif
57487 + return 0;
57488 +}
57489 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
57490 new file mode 100644
57491 index 0000000..a3b12a0
57492 --- /dev/null
57493 +++ b/grsecurity/grsec_pax.c
57494 @@ -0,0 +1,36 @@
57495 +#include <linux/kernel.h>
57496 +#include <linux/sched.h>
57497 +#include <linux/mm.h>
57498 +#include <linux/file.h>
57499 +#include <linux/grinternal.h>
57500 +#include <linux/grsecurity.h>
57501 +
57502 +void
57503 +gr_log_textrel(struct vm_area_struct * vma)
57504 +{
57505 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57506 + if (grsec_enable_audit_textrel)
57507 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
57508 +#endif
57509 + return;
57510 +}
57511 +
57512 +void
57513 +gr_log_rwxmmap(struct file *file)
57514 +{
57515 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57516 + if (grsec_enable_log_rwxmaps)
57517 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
57518 +#endif
57519 + return;
57520 +}
57521 +
57522 +void
57523 +gr_log_rwxmprotect(struct file *file)
57524 +{
57525 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57526 + if (grsec_enable_log_rwxmaps)
57527 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
57528 +#endif
57529 + return;
57530 +}
57531 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
57532 new file mode 100644
57533 index 0000000..f7f29aa
57534 --- /dev/null
57535 +++ b/grsecurity/grsec_ptrace.c
57536 @@ -0,0 +1,30 @@
57537 +#include <linux/kernel.h>
57538 +#include <linux/sched.h>
57539 +#include <linux/grinternal.h>
57540 +#include <linux/security.h>
57541 +
57542 +void
57543 +gr_audit_ptrace(struct task_struct *task)
57544 +{
57545 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57546 + if (grsec_enable_audit_ptrace)
57547 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
57548 +#endif
57549 + return;
57550 +}
57551 +
57552 +int
57553 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
57554 +{
57555 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57556 + const struct dentry *dentry = file->f_path.dentry;
57557 + const struct vfsmount *mnt = file->f_path.mnt;
57558 +
57559 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
57560 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
57561 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
57562 + return -EACCES;
57563 + }
57564 +#endif
57565 + return 0;
57566 +}
57567 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
57568 new file mode 100644
57569 index 0000000..7a5b2de
57570 --- /dev/null
57571 +++ b/grsecurity/grsec_sig.c
57572 @@ -0,0 +1,207 @@
57573 +#include <linux/kernel.h>
57574 +#include <linux/sched.h>
57575 +#include <linux/delay.h>
57576 +#include <linux/grsecurity.h>
57577 +#include <linux/grinternal.h>
57578 +#include <linux/hardirq.h>
57579 +
57580 +char *signames[] = {
57581 + [SIGSEGV] = "Segmentation fault",
57582 + [SIGILL] = "Illegal instruction",
57583 + [SIGABRT] = "Abort",
57584 + [SIGBUS] = "Invalid alignment/Bus error"
57585 +};
57586 +
57587 +void
57588 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
57589 +{
57590 +#ifdef CONFIG_GRKERNSEC_SIGNAL
57591 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
57592 + (sig == SIGABRT) || (sig == SIGBUS))) {
57593 + if (t->pid == current->pid) {
57594 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
57595 + } else {
57596 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
57597 + }
57598 + }
57599 +#endif
57600 + return;
57601 +}
57602 +
57603 +int
57604 +gr_handle_signal(const struct task_struct *p, const int sig)
57605 +{
57606 +#ifdef CONFIG_GRKERNSEC
57607 + /* ignore the 0 signal for protected task checks */
57608 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
57609 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
57610 + return -EPERM;
57611 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
57612 + return -EPERM;
57613 + }
57614 +#endif
57615 + return 0;
57616 +}
57617 +
57618 +#ifdef CONFIG_GRKERNSEC
57619 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
57620 +
57621 +int gr_fake_force_sig(int sig, struct task_struct *t)
57622 +{
57623 + unsigned long int flags;
57624 + int ret, blocked, ignored;
57625 + struct k_sigaction *action;
57626 +
57627 + spin_lock_irqsave(&t->sighand->siglock, flags);
57628 + action = &t->sighand->action[sig-1];
57629 + ignored = action->sa.sa_handler == SIG_IGN;
57630 + blocked = sigismember(&t->blocked, sig);
57631 + if (blocked || ignored) {
57632 + action->sa.sa_handler = SIG_DFL;
57633 + if (blocked) {
57634 + sigdelset(&t->blocked, sig);
57635 + recalc_sigpending_and_wake(t);
57636 + }
57637 + }
57638 + if (action->sa.sa_handler == SIG_DFL)
57639 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
57640 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
57641 +
57642 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
57643 +
57644 + return ret;
57645 +}
57646 +#endif
57647 +
57648 +#ifdef CONFIG_GRKERNSEC_BRUTE
57649 +#define GR_USER_BAN_TIME (15 * 60)
57650 +
57651 +static int __get_dumpable(unsigned long mm_flags)
57652 +{
57653 + int ret;
57654 +
57655 + ret = mm_flags & MMF_DUMPABLE_MASK;
57656 + return (ret >= 2) ? 2 : ret;
57657 +}
57658 +#endif
57659 +
57660 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
57661 +{
57662 +#ifdef CONFIG_GRKERNSEC_BRUTE
57663 + uid_t uid = 0;
57664 +
57665 + if (!grsec_enable_brute)
57666 + return;
57667 +
57668 + rcu_read_lock();
57669 + read_lock(&tasklist_lock);
57670 + read_lock(&grsec_exec_file_lock);
57671 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
57672 + p->real_parent->brute = 1;
57673 + else {
57674 + const struct cred *cred = __task_cred(p), *cred2;
57675 + struct task_struct *tsk, *tsk2;
57676 +
57677 + if (!__get_dumpable(mm_flags) && cred->uid) {
57678 + struct user_struct *user;
57679 +
57680 + uid = cred->uid;
57681 +
57682 + /* this is put upon execution past expiration */
57683 + user = find_user(uid);
57684 + if (user == NULL)
57685 + goto unlock;
57686 + user->banned = 1;
57687 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
57688 + if (user->ban_expires == ~0UL)
57689 + user->ban_expires--;
57690 +
57691 + do_each_thread(tsk2, tsk) {
57692 + cred2 = __task_cred(tsk);
57693 + if (tsk != p && cred2->uid == uid)
57694 + gr_fake_force_sig(SIGKILL, tsk);
57695 + } while_each_thread(tsk2, tsk);
57696 + }
57697 + }
57698 +unlock:
57699 + read_unlock(&grsec_exec_file_lock);
57700 + read_unlock(&tasklist_lock);
57701 + rcu_read_unlock();
57702 +
57703 + if (uid)
57704 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
57705 +
57706 +#endif
57707 + return;
57708 +}
57709 +
57710 +void gr_handle_brute_check(void)
57711 +{
57712 +#ifdef CONFIG_GRKERNSEC_BRUTE
57713 + if (current->brute)
57714 + msleep(30 * 1000);
57715 +#endif
57716 + return;
57717 +}
57718 +
57719 +void gr_handle_kernel_exploit(void)
57720 +{
57721 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
57722 + const struct cred *cred;
57723 + struct task_struct *tsk, *tsk2;
57724 + struct user_struct *user;
57725 + uid_t uid;
57726 +
57727 + if (in_irq() || in_serving_softirq() || in_nmi())
57728 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
57729 +
57730 + uid = current_uid();
57731 +
57732 + if (uid == 0)
57733 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
57734 + else {
57735 + /* kill all the processes of this user, hold a reference
57736 + to their creds struct, and prevent them from creating
57737 + another process until system reset
57738 + */
57739 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
57740 + /* we intentionally leak this ref */
57741 + user = get_uid(current->cred->user);
57742 + if (user) {
57743 + user->banned = 1;
57744 + user->ban_expires = ~0UL;
57745 + }
57746 +
57747 + read_lock(&tasklist_lock);
57748 + do_each_thread(tsk2, tsk) {
57749 + cred = __task_cred(tsk);
57750 + if (cred->uid == uid)
57751 + gr_fake_force_sig(SIGKILL, tsk);
57752 + } while_each_thread(tsk2, tsk);
57753 + read_unlock(&tasklist_lock);
57754 + }
57755 +#endif
57756 +}
57757 +
57758 +int __gr_process_user_ban(struct user_struct *user)
57759 +{
57760 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57761 + if (unlikely(user->banned)) {
57762 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
57763 + user->banned = 0;
57764 + user->ban_expires = 0;
57765 + free_uid(user);
57766 + } else
57767 + return -EPERM;
57768 + }
57769 +#endif
57770 + return 0;
57771 +}
57772 +
57773 +int gr_process_user_ban(void)
57774 +{
57775 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57776 + return __gr_process_user_ban(current->cred->user);
57777 +#endif
57778 + return 0;
57779 +}
57780 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
57781 new file mode 100644
57782 index 0000000..4030d57
57783 --- /dev/null
57784 +++ b/grsecurity/grsec_sock.c
57785 @@ -0,0 +1,244 @@
57786 +#include <linux/kernel.h>
57787 +#include <linux/module.h>
57788 +#include <linux/sched.h>
57789 +#include <linux/file.h>
57790 +#include <linux/net.h>
57791 +#include <linux/in.h>
57792 +#include <linux/ip.h>
57793 +#include <net/sock.h>
57794 +#include <net/inet_sock.h>
57795 +#include <linux/grsecurity.h>
57796 +#include <linux/grinternal.h>
57797 +#include <linux/gracl.h>
57798 +
57799 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
57800 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
57801 +
57802 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
57803 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
57804 +
57805 +#ifdef CONFIG_UNIX_MODULE
57806 +EXPORT_SYMBOL(gr_acl_handle_unix);
57807 +EXPORT_SYMBOL(gr_acl_handle_mknod);
57808 +EXPORT_SYMBOL(gr_handle_chroot_unix);
57809 +EXPORT_SYMBOL(gr_handle_create);
57810 +#endif
57811 +
57812 +#ifdef CONFIG_GRKERNSEC
57813 +#define gr_conn_table_size 32749
57814 +struct conn_table_entry {
57815 + struct conn_table_entry *next;
57816 + struct signal_struct *sig;
57817 +};
57818 +
57819 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
57820 +DEFINE_SPINLOCK(gr_conn_table_lock);
57821 +
57822 +extern const char * gr_socktype_to_name(unsigned char type);
57823 +extern const char * gr_proto_to_name(unsigned char proto);
57824 +extern const char * gr_sockfamily_to_name(unsigned char family);
57825 +
57826 +static __inline__ int
57827 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
57828 +{
57829 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
57830 +}
57831 +
57832 +static __inline__ int
57833 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
57834 + __u16 sport, __u16 dport)
57835 +{
57836 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
57837 + sig->gr_sport == sport && sig->gr_dport == dport))
57838 + return 1;
57839 + else
57840 + return 0;
57841 +}
57842 +
57843 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
57844 +{
57845 + struct conn_table_entry **match;
57846 + unsigned int index;
57847 +
57848 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
57849 + sig->gr_sport, sig->gr_dport,
57850 + gr_conn_table_size);
57851 +
57852 + newent->sig = sig;
57853 +
57854 + match = &gr_conn_table[index];
57855 + newent->next = *match;
57856 + *match = newent;
57857 +
57858 + return;
57859 +}
57860 +
57861 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
57862 +{
57863 + struct conn_table_entry *match, *last = NULL;
57864 + unsigned int index;
57865 +
57866 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
57867 + sig->gr_sport, sig->gr_dport,
57868 + gr_conn_table_size);
57869 +
57870 + match = gr_conn_table[index];
57871 + while (match && !conn_match(match->sig,
57872 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
57873 + sig->gr_dport)) {
57874 + last = match;
57875 + match = match->next;
57876 + }
57877 +
57878 + if (match) {
57879 + if (last)
57880 + last->next = match->next;
57881 + else
57882 + gr_conn_table[index] = NULL;
57883 + kfree(match);
57884 + }
57885 +
57886 + return;
57887 +}
57888 +
57889 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
57890 + __u16 sport, __u16 dport)
57891 +{
57892 + struct conn_table_entry *match;
57893 + unsigned int index;
57894 +
57895 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
57896 +
57897 + match = gr_conn_table[index];
57898 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
57899 + match = match->next;
57900 +
57901 + if (match)
57902 + return match->sig;
57903 + else
57904 + return NULL;
57905 +}
57906 +
57907 +#endif
57908 +
57909 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
57910 +{
57911 +#ifdef CONFIG_GRKERNSEC
57912 + struct signal_struct *sig = task->signal;
57913 + struct conn_table_entry *newent;
57914 +
57915 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
57916 + if (newent == NULL)
57917 + return;
57918 + /* no bh lock needed since we are called with bh disabled */
57919 + spin_lock(&gr_conn_table_lock);
57920 + gr_del_task_from_ip_table_nolock(sig);
57921 + sig->gr_saddr = inet->inet_rcv_saddr;
57922 + sig->gr_daddr = inet->inet_daddr;
57923 + sig->gr_sport = inet->inet_sport;
57924 + sig->gr_dport = inet->inet_dport;
57925 + gr_add_to_task_ip_table_nolock(sig, newent);
57926 + spin_unlock(&gr_conn_table_lock);
57927 +#endif
57928 + return;
57929 +}
57930 +
57931 +void gr_del_task_from_ip_table(struct task_struct *task)
57932 +{
57933 +#ifdef CONFIG_GRKERNSEC
57934 + spin_lock_bh(&gr_conn_table_lock);
57935 + gr_del_task_from_ip_table_nolock(task->signal);
57936 + spin_unlock_bh(&gr_conn_table_lock);
57937 +#endif
57938 + return;
57939 +}
57940 +
57941 +void
57942 +gr_attach_curr_ip(const struct sock *sk)
57943 +{
57944 +#ifdef CONFIG_GRKERNSEC
57945 + struct signal_struct *p, *set;
57946 + const struct inet_sock *inet = inet_sk(sk);
57947 +
57948 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
57949 + return;
57950 +
57951 + set = current->signal;
57952 +
57953 + spin_lock_bh(&gr_conn_table_lock);
57954 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
57955 + inet->inet_dport, inet->inet_sport);
57956 + if (unlikely(p != NULL)) {
57957 + set->curr_ip = p->curr_ip;
57958 + set->used_accept = 1;
57959 + gr_del_task_from_ip_table_nolock(p);
57960 + spin_unlock_bh(&gr_conn_table_lock);
57961 + return;
57962 + }
57963 + spin_unlock_bh(&gr_conn_table_lock);
57964 +
57965 + set->curr_ip = inet->inet_daddr;
57966 + set->used_accept = 1;
57967 +#endif
57968 + return;
57969 +}
57970 +
57971 +int
57972 +gr_handle_sock_all(const int family, const int type, const int protocol)
57973 +{
57974 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
57975 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
57976 + (family != AF_UNIX)) {
57977 + if (family == AF_INET)
57978 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
57979 + else
57980 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
57981 + return -EACCES;
57982 + }
57983 +#endif
57984 + return 0;
57985 +}
57986 +
57987 +int
57988 +gr_handle_sock_server(const struct sockaddr *sck)
57989 +{
57990 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57991 + if (grsec_enable_socket_server &&
57992 + in_group_p(grsec_socket_server_gid) &&
57993 + sck && (sck->sa_family != AF_UNIX) &&
57994 + (sck->sa_family != AF_LOCAL)) {
57995 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
57996 + return -EACCES;
57997 + }
57998 +#endif
57999 + return 0;
58000 +}
58001 +
58002 +int
58003 +gr_handle_sock_server_other(const struct sock *sck)
58004 +{
58005 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58006 + if (grsec_enable_socket_server &&
58007 + in_group_p(grsec_socket_server_gid) &&
58008 + sck && (sck->sk_family != AF_UNIX) &&
58009 + (sck->sk_family != AF_LOCAL)) {
58010 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58011 + return -EACCES;
58012 + }
58013 +#endif
58014 + return 0;
58015 +}
58016 +
58017 +int
58018 +gr_handle_sock_client(const struct sockaddr *sck)
58019 +{
58020 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58021 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
58022 + sck && (sck->sa_family != AF_UNIX) &&
58023 + (sck->sa_family != AF_LOCAL)) {
58024 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
58025 + return -EACCES;
58026 + }
58027 +#endif
58028 + return 0;
58029 +}
58030 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
58031 new file mode 100644
58032 index 0000000..a1aedd7
58033 --- /dev/null
58034 +++ b/grsecurity/grsec_sysctl.c
58035 @@ -0,0 +1,451 @@
58036 +#include <linux/kernel.h>
58037 +#include <linux/sched.h>
58038 +#include <linux/sysctl.h>
58039 +#include <linux/grsecurity.h>
58040 +#include <linux/grinternal.h>
58041 +
58042 +int
58043 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
58044 +{
58045 +#ifdef CONFIG_GRKERNSEC_SYSCTL
58046 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
58047 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
58048 + return -EACCES;
58049 + }
58050 +#endif
58051 + return 0;
58052 +}
58053 +
58054 +#ifdef CONFIG_GRKERNSEC_ROFS
58055 +static int __maybe_unused one = 1;
58056 +#endif
58057 +
58058 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
58059 +struct ctl_table grsecurity_table[] = {
58060 +#ifdef CONFIG_GRKERNSEC_SYSCTL
58061 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
58062 +#ifdef CONFIG_GRKERNSEC_IO
58063 + {
58064 + .procname = "disable_priv_io",
58065 + .data = &grsec_disable_privio,
58066 + .maxlen = sizeof(int),
58067 + .mode = 0600,
58068 + .proc_handler = &proc_dointvec,
58069 + },
58070 +#endif
58071 +#endif
58072 +#ifdef CONFIG_GRKERNSEC_LINK
58073 + {
58074 + .procname = "linking_restrictions",
58075 + .data = &grsec_enable_link,
58076 + .maxlen = sizeof(int),
58077 + .mode = 0600,
58078 + .proc_handler = &proc_dointvec,
58079 + },
58080 +#endif
58081 +#ifdef CONFIG_GRKERNSEC_BRUTE
58082 + {
58083 + .procname = "deter_bruteforce",
58084 + .data = &grsec_enable_brute,
58085 + .maxlen = sizeof(int),
58086 + .mode = 0600,
58087 + .proc_handler = &proc_dointvec,
58088 + },
58089 +#endif
58090 +#ifdef CONFIG_GRKERNSEC_FIFO
58091 + {
58092 + .procname = "fifo_restrictions",
58093 + .data = &grsec_enable_fifo,
58094 + .maxlen = sizeof(int),
58095 + .mode = 0600,
58096 + .proc_handler = &proc_dointvec,
58097 + },
58098 +#endif
58099 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58100 + {
58101 + .procname = "ptrace_readexec",
58102 + .data = &grsec_enable_ptrace_readexec,
58103 + .maxlen = sizeof(int),
58104 + .mode = 0600,
58105 + .proc_handler = &proc_dointvec,
58106 + },
58107 +#endif
58108 +#ifdef CONFIG_GRKERNSEC_SETXID
58109 + {
58110 + .procname = "consistent_setxid",
58111 + .data = &grsec_enable_setxid,
58112 + .maxlen = sizeof(int),
58113 + .mode = 0600,
58114 + .proc_handler = &proc_dointvec,
58115 + },
58116 +#endif
58117 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58118 + {
58119 + .procname = "ip_blackhole",
58120 + .data = &grsec_enable_blackhole,
58121 + .maxlen = sizeof(int),
58122 + .mode = 0600,
58123 + .proc_handler = &proc_dointvec,
58124 + },
58125 + {
58126 + .procname = "lastack_retries",
58127 + .data = &grsec_lastack_retries,
58128 + .maxlen = sizeof(int),
58129 + .mode = 0600,
58130 + .proc_handler = &proc_dointvec,
58131 + },
58132 +#endif
58133 +#ifdef CONFIG_GRKERNSEC_EXECLOG
58134 + {
58135 + .procname = "exec_logging",
58136 + .data = &grsec_enable_execlog,
58137 + .maxlen = sizeof(int),
58138 + .mode = 0600,
58139 + .proc_handler = &proc_dointvec,
58140 + },
58141 +#endif
58142 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58143 + {
58144 + .procname = "rwxmap_logging",
58145 + .data = &grsec_enable_log_rwxmaps,
58146 + .maxlen = sizeof(int),
58147 + .mode = 0600,
58148 + .proc_handler = &proc_dointvec,
58149 + },
58150 +#endif
58151 +#ifdef CONFIG_GRKERNSEC_SIGNAL
58152 + {
58153 + .procname = "signal_logging",
58154 + .data = &grsec_enable_signal,
58155 + .maxlen = sizeof(int),
58156 + .mode = 0600,
58157 + .proc_handler = &proc_dointvec,
58158 + },
58159 +#endif
58160 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
58161 + {
58162 + .procname = "forkfail_logging",
58163 + .data = &grsec_enable_forkfail,
58164 + .maxlen = sizeof(int),
58165 + .mode = 0600,
58166 + .proc_handler = &proc_dointvec,
58167 + },
58168 +#endif
58169 +#ifdef CONFIG_GRKERNSEC_TIME
58170 + {
58171 + .procname = "timechange_logging",
58172 + .data = &grsec_enable_time,
58173 + .maxlen = sizeof(int),
58174 + .mode = 0600,
58175 + .proc_handler = &proc_dointvec,
58176 + },
58177 +#endif
58178 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
58179 + {
58180 + .procname = "chroot_deny_shmat",
58181 + .data = &grsec_enable_chroot_shmat,
58182 + .maxlen = sizeof(int),
58183 + .mode = 0600,
58184 + .proc_handler = &proc_dointvec,
58185 + },
58186 +#endif
58187 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58188 + {
58189 + .procname = "chroot_deny_unix",
58190 + .data = &grsec_enable_chroot_unix,
58191 + .maxlen = sizeof(int),
58192 + .mode = 0600,
58193 + .proc_handler = &proc_dointvec,
58194 + },
58195 +#endif
58196 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
58197 + {
58198 + .procname = "chroot_deny_mount",
58199 + .data = &grsec_enable_chroot_mount,
58200 + .maxlen = sizeof(int),
58201 + .mode = 0600,
58202 + .proc_handler = &proc_dointvec,
58203 + },
58204 +#endif
58205 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
58206 + {
58207 + .procname = "chroot_deny_fchdir",
58208 + .data = &grsec_enable_chroot_fchdir,
58209 + .maxlen = sizeof(int),
58210 + .mode = 0600,
58211 + .proc_handler = &proc_dointvec,
58212 + },
58213 +#endif
58214 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
58215 + {
58216 + .procname = "chroot_deny_chroot",
58217 + .data = &grsec_enable_chroot_double,
58218 + .maxlen = sizeof(int),
58219 + .mode = 0600,
58220 + .proc_handler = &proc_dointvec,
58221 + },
58222 +#endif
58223 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
58224 + {
58225 + .procname = "chroot_deny_pivot",
58226 + .data = &grsec_enable_chroot_pivot,
58227 + .maxlen = sizeof(int),
58228 + .mode = 0600,
58229 + .proc_handler = &proc_dointvec,
58230 + },
58231 +#endif
58232 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
58233 + {
58234 + .procname = "chroot_enforce_chdir",
58235 + .data = &grsec_enable_chroot_chdir,
58236 + .maxlen = sizeof(int),
58237 + .mode = 0600,
58238 + .proc_handler = &proc_dointvec,
58239 + },
58240 +#endif
58241 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
58242 + {
58243 + .procname = "chroot_deny_chmod",
58244 + .data = &grsec_enable_chroot_chmod,
58245 + .maxlen = sizeof(int),
58246 + .mode = 0600,
58247 + .proc_handler = &proc_dointvec,
58248 + },
58249 +#endif
58250 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
58251 + {
58252 + .procname = "chroot_deny_mknod",
58253 + .data = &grsec_enable_chroot_mknod,
58254 + .maxlen = sizeof(int),
58255 + .mode = 0600,
58256 + .proc_handler = &proc_dointvec,
58257 + },
58258 +#endif
58259 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
58260 + {
58261 + .procname = "chroot_restrict_nice",
58262 + .data = &grsec_enable_chroot_nice,
58263 + .maxlen = sizeof(int),
58264 + .mode = 0600,
58265 + .proc_handler = &proc_dointvec,
58266 + },
58267 +#endif
58268 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
58269 + {
58270 + .procname = "chroot_execlog",
58271 + .data = &grsec_enable_chroot_execlog,
58272 + .maxlen = sizeof(int),
58273 + .mode = 0600,
58274 + .proc_handler = &proc_dointvec,
58275 + },
58276 +#endif
58277 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58278 + {
58279 + .procname = "chroot_caps",
58280 + .data = &grsec_enable_chroot_caps,
58281 + .maxlen = sizeof(int),
58282 + .mode = 0600,
58283 + .proc_handler = &proc_dointvec,
58284 + },
58285 +#endif
58286 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
58287 + {
58288 + .procname = "chroot_deny_sysctl",
58289 + .data = &grsec_enable_chroot_sysctl,
58290 + .maxlen = sizeof(int),
58291 + .mode = 0600,
58292 + .proc_handler = &proc_dointvec,
58293 + },
58294 +#endif
58295 +#ifdef CONFIG_GRKERNSEC_TPE
58296 + {
58297 + .procname = "tpe",
58298 + .data = &grsec_enable_tpe,
58299 + .maxlen = sizeof(int),
58300 + .mode = 0600,
58301 + .proc_handler = &proc_dointvec,
58302 + },
58303 + {
58304 + .procname = "tpe_gid",
58305 + .data = &grsec_tpe_gid,
58306 + .maxlen = sizeof(int),
58307 + .mode = 0600,
58308 + .proc_handler = &proc_dointvec,
58309 + },
58310 +#endif
58311 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58312 + {
58313 + .procname = "tpe_invert",
58314 + .data = &grsec_enable_tpe_invert,
58315 + .maxlen = sizeof(int),
58316 + .mode = 0600,
58317 + .proc_handler = &proc_dointvec,
58318 + },
58319 +#endif
58320 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
58321 + {
58322 + .procname = "tpe_restrict_all",
58323 + .data = &grsec_enable_tpe_all,
58324 + .maxlen = sizeof(int),
58325 + .mode = 0600,
58326 + .proc_handler = &proc_dointvec,
58327 + },
58328 +#endif
58329 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58330 + {
58331 + .procname = "socket_all",
58332 + .data = &grsec_enable_socket_all,
58333 + .maxlen = sizeof(int),
58334 + .mode = 0600,
58335 + .proc_handler = &proc_dointvec,
58336 + },
58337 + {
58338 + .procname = "socket_all_gid",
58339 + .data = &grsec_socket_all_gid,
58340 + .maxlen = sizeof(int),
58341 + .mode = 0600,
58342 + .proc_handler = &proc_dointvec,
58343 + },
58344 +#endif
58345 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58346 + {
58347 + .procname = "socket_client",
58348 + .data = &grsec_enable_socket_client,
58349 + .maxlen = sizeof(int),
58350 + .mode = 0600,
58351 + .proc_handler = &proc_dointvec,
58352 + },
58353 + {
58354 + .procname = "socket_client_gid",
58355 + .data = &grsec_socket_client_gid,
58356 + .maxlen = sizeof(int),
58357 + .mode = 0600,
58358 + .proc_handler = &proc_dointvec,
58359 + },
58360 +#endif
58361 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58362 + {
58363 + .procname = "socket_server",
58364 + .data = &grsec_enable_socket_server,
58365 + .maxlen = sizeof(int),
58366 + .mode = 0600,
58367 + .proc_handler = &proc_dointvec,
58368 + },
58369 + {
58370 + .procname = "socket_server_gid",
58371 + .data = &grsec_socket_server_gid,
58372 + .maxlen = sizeof(int),
58373 + .mode = 0600,
58374 + .proc_handler = &proc_dointvec,
58375 + },
58376 +#endif
58377 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
58378 + {
58379 + .procname = "audit_group",
58380 + .data = &grsec_enable_group,
58381 + .maxlen = sizeof(int),
58382 + .mode = 0600,
58383 + .proc_handler = &proc_dointvec,
58384 + },
58385 + {
58386 + .procname = "audit_gid",
58387 + .data = &grsec_audit_gid,
58388 + .maxlen = sizeof(int),
58389 + .mode = 0600,
58390 + .proc_handler = &proc_dointvec,
58391 + },
58392 +#endif
58393 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
58394 + {
58395 + .procname = "audit_chdir",
58396 + .data = &grsec_enable_chdir,
58397 + .maxlen = sizeof(int),
58398 + .mode = 0600,
58399 + .proc_handler = &proc_dointvec,
58400 + },
58401 +#endif
58402 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58403 + {
58404 + .procname = "audit_mount",
58405 + .data = &grsec_enable_mount,
58406 + .maxlen = sizeof(int),
58407 + .mode = 0600,
58408 + .proc_handler = &proc_dointvec,
58409 + },
58410 +#endif
58411 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58412 + {
58413 + .procname = "audit_textrel",
58414 + .data = &grsec_enable_audit_textrel,
58415 + .maxlen = sizeof(int),
58416 + .mode = 0600,
58417 + .proc_handler = &proc_dointvec,
58418 + },
58419 +#endif
58420 +#ifdef CONFIG_GRKERNSEC_DMESG
58421 + {
58422 + .procname = "dmesg",
58423 + .data = &grsec_enable_dmesg,
58424 + .maxlen = sizeof(int),
58425 + .mode = 0600,
58426 + .proc_handler = &proc_dointvec,
58427 + },
58428 +#endif
58429 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58430 + {
58431 + .procname = "chroot_findtask",
58432 + .data = &grsec_enable_chroot_findtask,
58433 + .maxlen = sizeof(int),
58434 + .mode = 0600,
58435 + .proc_handler = &proc_dointvec,
58436 + },
58437 +#endif
58438 +#ifdef CONFIG_GRKERNSEC_RESLOG
58439 + {
58440 + .procname = "resource_logging",
58441 + .data = &grsec_resource_logging,
58442 + .maxlen = sizeof(int),
58443 + .mode = 0600,
58444 + .proc_handler = &proc_dointvec,
58445 + },
58446 +#endif
58447 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58448 + {
58449 + .procname = "audit_ptrace",
58450 + .data = &grsec_enable_audit_ptrace,
58451 + .maxlen = sizeof(int),
58452 + .mode = 0600,
58453 + .proc_handler = &proc_dointvec,
58454 + },
58455 +#endif
58456 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
58457 + {
58458 + .procname = "harden_ptrace",
58459 + .data = &grsec_enable_harden_ptrace,
58460 + .maxlen = sizeof(int),
58461 + .mode = 0600,
58462 + .proc_handler = &proc_dointvec,
58463 + },
58464 +#endif
58465 + {
58466 + .procname = "grsec_lock",
58467 + .data = &grsec_lock,
58468 + .maxlen = sizeof(int),
58469 + .mode = 0600,
58470 + .proc_handler = &proc_dointvec,
58471 + },
58472 +#endif
58473 +#ifdef CONFIG_GRKERNSEC_ROFS
58474 + {
58475 + .procname = "romount_protect",
58476 + .data = &grsec_enable_rofs,
58477 + .maxlen = sizeof(int),
58478 + .mode = 0600,
58479 + .proc_handler = &proc_dointvec_minmax,
58480 + .extra1 = &one,
58481 + .extra2 = &one,
58482 + },
58483 +#endif
58484 + { }
58485 +};
58486 +#endif
58487 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
58488 new file mode 100644
58489 index 0000000..0dc13c3
58490 --- /dev/null
58491 +++ b/grsecurity/grsec_time.c
58492 @@ -0,0 +1,16 @@
58493 +#include <linux/kernel.h>
58494 +#include <linux/sched.h>
58495 +#include <linux/grinternal.h>
58496 +#include <linux/module.h>
58497 +
58498 +void
58499 +gr_log_timechange(void)
58500 +{
58501 +#ifdef CONFIG_GRKERNSEC_TIME
58502 + if (grsec_enable_time)
58503 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
58504 +#endif
58505 + return;
58506 +}
58507 +
58508 +EXPORT_SYMBOL(gr_log_timechange);
58509 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
58510 new file mode 100644
58511 index 0000000..07e0dc0
58512 --- /dev/null
58513 +++ b/grsecurity/grsec_tpe.c
58514 @@ -0,0 +1,73 @@
58515 +#include <linux/kernel.h>
58516 +#include <linux/sched.h>
58517 +#include <linux/file.h>
58518 +#include <linux/fs.h>
58519 +#include <linux/grinternal.h>
58520 +
58521 +extern int gr_acl_tpe_check(void);
58522 +
58523 +int
58524 +gr_tpe_allow(const struct file *file)
58525 +{
58526 +#ifdef CONFIG_GRKERNSEC
58527 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
58528 + const struct cred *cred = current_cred();
58529 + char *msg = NULL;
58530 + char *msg2 = NULL;
58531 +
58532 + // never restrict root
58533 + if (!cred->uid)
58534 + return 1;
58535 +
58536 + if (grsec_enable_tpe) {
58537 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58538 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
58539 + msg = "not being in trusted group";
58540 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
58541 + msg = "being in untrusted group";
58542 +#else
58543 + if (in_group_p(grsec_tpe_gid))
58544 + msg = "being in untrusted group";
58545 +#endif
58546 + }
58547 + if (!msg && gr_acl_tpe_check())
58548 + msg = "being in untrusted role";
58549 +
58550 + // not in any affected group/role
58551 + if (!msg)
58552 + goto next_check;
58553 +
58554 + if (inode->i_uid)
58555 + msg2 = "file in non-root-owned directory";
58556 + else if (inode->i_mode & S_IWOTH)
58557 + msg2 = "file in world-writable directory";
58558 + else if (inode->i_mode & S_IWGRP)
58559 + msg2 = "file in group-writable directory";
58560 +
58561 + if (msg && msg2) {
58562 + char fullmsg[70] = {0};
58563 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
58564 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
58565 + return 0;
58566 + }
58567 + msg = NULL;
58568 +next_check:
58569 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
58570 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
58571 + return 1;
58572 +
58573 + if (inode->i_uid && (inode->i_uid != cred->uid))
58574 + msg = "directory not owned by user";
58575 + else if (inode->i_mode & S_IWOTH)
58576 + msg = "file in world-writable directory";
58577 + else if (inode->i_mode & S_IWGRP)
58578 + msg = "file in group-writable directory";
58579 +
58580 + if (msg) {
58581 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
58582 + return 0;
58583 + }
58584 +#endif
58585 +#endif
58586 + return 1;
58587 +}
58588 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
58589 new file mode 100644
58590 index 0000000..9f7b1ac
58591 --- /dev/null
58592 +++ b/grsecurity/grsum.c
58593 @@ -0,0 +1,61 @@
58594 +#include <linux/err.h>
58595 +#include <linux/kernel.h>
58596 +#include <linux/sched.h>
58597 +#include <linux/mm.h>
58598 +#include <linux/scatterlist.h>
58599 +#include <linux/crypto.h>
58600 +#include <linux/gracl.h>
58601 +
58602 +
58603 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
58604 +#error "crypto and sha256 must be built into the kernel"
58605 +#endif
58606 +
58607 +int
58608 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
58609 +{
58610 + char *p;
58611 + struct crypto_hash *tfm;
58612 + struct hash_desc desc;
58613 + struct scatterlist sg;
58614 + unsigned char temp_sum[GR_SHA_LEN];
58615 + volatile int retval = 0;
58616 + volatile int dummy = 0;
58617 + unsigned int i;
58618 +
58619 + sg_init_table(&sg, 1);
58620 +
58621 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
58622 + if (IS_ERR(tfm)) {
58623 + /* should never happen, since sha256 should be built in */
58624 + return 1;
58625 + }
58626 +
58627 + desc.tfm = tfm;
58628 + desc.flags = 0;
58629 +
58630 + crypto_hash_init(&desc);
58631 +
58632 + p = salt;
58633 + sg_set_buf(&sg, p, GR_SALT_LEN);
58634 + crypto_hash_update(&desc, &sg, sg.length);
58635 +
58636 + p = entry->pw;
58637 + sg_set_buf(&sg, p, strlen(p));
58638 +
58639 + crypto_hash_update(&desc, &sg, sg.length);
58640 +
58641 + crypto_hash_final(&desc, temp_sum);
58642 +
58643 + memset(entry->pw, 0, GR_PW_LEN);
58644 +
58645 + for (i = 0; i < GR_SHA_LEN; i++)
58646 + if (sum[i] != temp_sum[i])
58647 + retval = 1;
58648 + else
58649 + dummy = 1; // waste a cycle
58650 +
58651 + crypto_free_hash(tfm);
58652 +
58653 + return retval;
58654 +}
58655 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
58656 index 6cd5b64..f620d2d 100644
58657 --- a/include/acpi/acpi_bus.h
58658 +++ b/include/acpi/acpi_bus.h
58659 @@ -107,7 +107,7 @@ struct acpi_device_ops {
58660 acpi_op_bind bind;
58661 acpi_op_unbind unbind;
58662 acpi_op_notify notify;
58663 -};
58664 +} __no_const;
58665
58666 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
58667
58668 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
58669 index b7babf0..71e4e74 100644
58670 --- a/include/asm-generic/atomic-long.h
58671 +++ b/include/asm-generic/atomic-long.h
58672 @@ -22,6 +22,12 @@
58673
58674 typedef atomic64_t atomic_long_t;
58675
58676 +#ifdef CONFIG_PAX_REFCOUNT
58677 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
58678 +#else
58679 +typedef atomic64_t atomic_long_unchecked_t;
58680 +#endif
58681 +
58682 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
58683
58684 static inline long atomic_long_read(atomic_long_t *l)
58685 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
58686 return (long)atomic64_read(v);
58687 }
58688
58689 +#ifdef CONFIG_PAX_REFCOUNT
58690 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
58691 +{
58692 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58693 +
58694 + return (long)atomic64_read_unchecked(v);
58695 +}
58696 +#endif
58697 +
58698 static inline void atomic_long_set(atomic_long_t *l, long i)
58699 {
58700 atomic64_t *v = (atomic64_t *)l;
58701 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
58702 atomic64_set(v, i);
58703 }
58704
58705 +#ifdef CONFIG_PAX_REFCOUNT
58706 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
58707 +{
58708 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58709 +
58710 + atomic64_set_unchecked(v, i);
58711 +}
58712 +#endif
58713 +
58714 static inline void atomic_long_inc(atomic_long_t *l)
58715 {
58716 atomic64_t *v = (atomic64_t *)l;
58717 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
58718 atomic64_inc(v);
58719 }
58720
58721 +#ifdef CONFIG_PAX_REFCOUNT
58722 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
58723 +{
58724 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58725 +
58726 + atomic64_inc_unchecked(v);
58727 +}
58728 +#endif
58729 +
58730 static inline void atomic_long_dec(atomic_long_t *l)
58731 {
58732 atomic64_t *v = (atomic64_t *)l;
58733 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
58734 atomic64_dec(v);
58735 }
58736
58737 +#ifdef CONFIG_PAX_REFCOUNT
58738 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
58739 +{
58740 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58741 +
58742 + atomic64_dec_unchecked(v);
58743 +}
58744 +#endif
58745 +
58746 static inline void atomic_long_add(long i, atomic_long_t *l)
58747 {
58748 atomic64_t *v = (atomic64_t *)l;
58749 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
58750 atomic64_add(i, v);
58751 }
58752
58753 +#ifdef CONFIG_PAX_REFCOUNT
58754 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
58755 +{
58756 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58757 +
58758 + atomic64_add_unchecked(i, v);
58759 +}
58760 +#endif
58761 +
58762 static inline void atomic_long_sub(long i, atomic_long_t *l)
58763 {
58764 atomic64_t *v = (atomic64_t *)l;
58765 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
58766 atomic64_sub(i, v);
58767 }
58768
58769 +#ifdef CONFIG_PAX_REFCOUNT
58770 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
58771 +{
58772 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58773 +
58774 + atomic64_sub_unchecked(i, v);
58775 +}
58776 +#endif
58777 +
58778 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
58779 {
58780 atomic64_t *v = (atomic64_t *)l;
58781 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
58782 return (long)atomic64_inc_return(v);
58783 }
58784
58785 +#ifdef CONFIG_PAX_REFCOUNT
58786 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
58787 +{
58788 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58789 +
58790 + return (long)atomic64_inc_return_unchecked(v);
58791 +}
58792 +#endif
58793 +
58794 static inline long atomic_long_dec_return(atomic_long_t *l)
58795 {
58796 atomic64_t *v = (atomic64_t *)l;
58797 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
58798
58799 typedef atomic_t atomic_long_t;
58800
58801 +#ifdef CONFIG_PAX_REFCOUNT
58802 +typedef atomic_unchecked_t atomic_long_unchecked_t;
58803 +#else
58804 +typedef atomic_t atomic_long_unchecked_t;
58805 +#endif
58806 +
58807 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
58808 static inline long atomic_long_read(atomic_long_t *l)
58809 {
58810 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
58811 return (long)atomic_read(v);
58812 }
58813
58814 +#ifdef CONFIG_PAX_REFCOUNT
58815 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
58816 +{
58817 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58818 +
58819 + return (long)atomic_read_unchecked(v);
58820 +}
58821 +#endif
58822 +
58823 static inline void atomic_long_set(atomic_long_t *l, long i)
58824 {
58825 atomic_t *v = (atomic_t *)l;
58826 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
58827 atomic_set(v, i);
58828 }
58829
58830 +#ifdef CONFIG_PAX_REFCOUNT
58831 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
58832 +{
58833 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58834 +
58835 + atomic_set_unchecked(v, i);
58836 +}
58837 +#endif
58838 +
58839 static inline void atomic_long_inc(atomic_long_t *l)
58840 {
58841 atomic_t *v = (atomic_t *)l;
58842 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
58843 atomic_inc(v);
58844 }
58845
58846 +#ifdef CONFIG_PAX_REFCOUNT
58847 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
58848 +{
58849 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58850 +
58851 + atomic_inc_unchecked(v);
58852 +}
58853 +#endif
58854 +
58855 static inline void atomic_long_dec(atomic_long_t *l)
58856 {
58857 atomic_t *v = (atomic_t *)l;
58858 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
58859 atomic_dec(v);
58860 }
58861
58862 +#ifdef CONFIG_PAX_REFCOUNT
58863 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
58864 +{
58865 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58866 +
58867 + atomic_dec_unchecked(v);
58868 +}
58869 +#endif
58870 +
58871 static inline void atomic_long_add(long i, atomic_long_t *l)
58872 {
58873 atomic_t *v = (atomic_t *)l;
58874 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
58875 atomic_add(i, v);
58876 }
58877
58878 +#ifdef CONFIG_PAX_REFCOUNT
58879 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
58880 +{
58881 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58882 +
58883 + atomic_add_unchecked(i, v);
58884 +}
58885 +#endif
58886 +
58887 static inline void atomic_long_sub(long i, atomic_long_t *l)
58888 {
58889 atomic_t *v = (atomic_t *)l;
58890 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
58891 atomic_sub(i, v);
58892 }
58893
58894 +#ifdef CONFIG_PAX_REFCOUNT
58895 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
58896 +{
58897 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58898 +
58899 + atomic_sub_unchecked(i, v);
58900 +}
58901 +#endif
58902 +
58903 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
58904 {
58905 atomic_t *v = (atomic_t *)l;
58906 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
58907 return (long)atomic_inc_return(v);
58908 }
58909
58910 +#ifdef CONFIG_PAX_REFCOUNT
58911 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
58912 +{
58913 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58914 +
58915 + return (long)atomic_inc_return_unchecked(v);
58916 +}
58917 +#endif
58918 +
58919 static inline long atomic_long_dec_return(atomic_long_t *l)
58920 {
58921 atomic_t *v = (atomic_t *)l;
58922 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
58923
58924 #endif /* BITS_PER_LONG == 64 */
58925
58926 +#ifdef CONFIG_PAX_REFCOUNT
58927 +static inline void pax_refcount_needs_these_functions(void)
58928 +{
58929 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
58930 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
58931 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
58932 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
58933 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
58934 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
58935 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
58936 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
58937 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
58938 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
58939 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
58940 +
58941 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
58942 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
58943 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
58944 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
58945 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
58946 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
58947 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
58948 +}
58949 +#else
58950 +#define atomic_read_unchecked(v) atomic_read(v)
58951 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
58952 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
58953 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
58954 +#define atomic_inc_unchecked(v) atomic_inc(v)
58955 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
58956 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
58957 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
58958 +#define atomic_dec_unchecked(v) atomic_dec(v)
58959 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
58960 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
58961 +
58962 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
58963 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
58964 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
58965 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
58966 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
58967 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
58968 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
58969 +#endif
58970 +
58971 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
58972 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
58973 index b18ce4f..2ee2843 100644
58974 --- a/include/asm-generic/atomic64.h
58975 +++ b/include/asm-generic/atomic64.h
58976 @@ -16,6 +16,8 @@ typedef struct {
58977 long long counter;
58978 } atomic64_t;
58979
58980 +typedef atomic64_t atomic64_unchecked_t;
58981 +
58982 #define ATOMIC64_INIT(i) { (i) }
58983
58984 extern long long atomic64_read(const atomic64_t *v);
58985 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
58986 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
58987 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
58988
58989 +#define atomic64_read_unchecked(v) atomic64_read(v)
58990 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
58991 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
58992 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
58993 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
58994 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
58995 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
58996 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
58997 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
58998 +
58999 #endif /* _ASM_GENERIC_ATOMIC64_H */
59000 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
59001 index 1bfcfe5..e04c5c9 100644
59002 --- a/include/asm-generic/cache.h
59003 +++ b/include/asm-generic/cache.h
59004 @@ -6,7 +6,7 @@
59005 * cache lines need to provide their own cache.h.
59006 */
59007
59008 -#define L1_CACHE_SHIFT 5
59009 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
59010 +#define L1_CACHE_SHIFT 5UL
59011 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
59012
59013 #endif /* __ASM_GENERIC_CACHE_H */
59014 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
59015 index 0d68a1e..b74a761 100644
59016 --- a/include/asm-generic/emergency-restart.h
59017 +++ b/include/asm-generic/emergency-restart.h
59018 @@ -1,7 +1,7 @@
59019 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
59020 #define _ASM_GENERIC_EMERGENCY_RESTART_H
59021
59022 -static inline void machine_emergency_restart(void)
59023 +static inline __noreturn void machine_emergency_restart(void)
59024 {
59025 machine_restart(NULL);
59026 }
59027 diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
59028 index 1ca3efc..e3dc852 100644
59029 --- a/include/asm-generic/int-l64.h
59030 +++ b/include/asm-generic/int-l64.h
59031 @@ -46,6 +46,8 @@ typedef unsigned int u32;
59032 typedef signed long s64;
59033 typedef unsigned long u64;
59034
59035 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
59036 +
59037 #define S8_C(x) x
59038 #define U8_C(x) x ## U
59039 #define S16_C(x) x
59040 diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
59041 index f394147..b6152b9 100644
59042 --- a/include/asm-generic/int-ll64.h
59043 +++ b/include/asm-generic/int-ll64.h
59044 @@ -51,6 +51,8 @@ typedef unsigned int u32;
59045 typedef signed long long s64;
59046 typedef unsigned long long u64;
59047
59048 +typedef unsigned long long intoverflow_t;
59049 +
59050 #define S8_C(x) x
59051 #define U8_C(x) x ## U
59052 #define S16_C(x) x
59053 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
59054 index 0232ccb..13d9165 100644
59055 --- a/include/asm-generic/kmap_types.h
59056 +++ b/include/asm-generic/kmap_types.h
59057 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
59058 KMAP_D(17) KM_NMI,
59059 KMAP_D(18) KM_NMI_PTE,
59060 KMAP_D(19) KM_KDB,
59061 +KMAP_D(20) KM_CLEARPAGE,
59062 /*
59063 * Remember to update debug_kmap_atomic() when adding new kmap types!
59064 */
59065 -KMAP_D(20) KM_TYPE_NR
59066 +KMAP_D(21) KM_TYPE_NR
59067 };
59068
59069 #undef KMAP_D
59070 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
59071 index 725612b..9cc513a 100644
59072 --- a/include/asm-generic/pgtable-nopmd.h
59073 +++ b/include/asm-generic/pgtable-nopmd.h
59074 @@ -1,14 +1,19 @@
59075 #ifndef _PGTABLE_NOPMD_H
59076 #define _PGTABLE_NOPMD_H
59077
59078 -#ifndef __ASSEMBLY__
59079 -
59080 #include <asm-generic/pgtable-nopud.h>
59081
59082 -struct mm_struct;
59083 -
59084 #define __PAGETABLE_PMD_FOLDED
59085
59086 +#define PMD_SHIFT PUD_SHIFT
59087 +#define PTRS_PER_PMD 1
59088 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
59089 +#define PMD_MASK (~(PMD_SIZE-1))
59090 +
59091 +#ifndef __ASSEMBLY__
59092 +
59093 +struct mm_struct;
59094 +
59095 /*
59096 * Having the pmd type consist of a pud gets the size right, and allows
59097 * us to conceptually access the pud entry that this pmd is folded into
59098 @@ -16,11 +21,6 @@ struct mm_struct;
59099 */
59100 typedef struct { pud_t pud; } pmd_t;
59101
59102 -#define PMD_SHIFT PUD_SHIFT
59103 -#define PTRS_PER_PMD 1
59104 -#define PMD_SIZE (1UL << PMD_SHIFT)
59105 -#define PMD_MASK (~(PMD_SIZE-1))
59106 -
59107 /*
59108 * The "pud_xxx()" functions here are trivial for a folded two-level
59109 * setup: the pmd is never bad, and a pmd always exists (as it's folded
59110 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
59111 index 810431d..ccc3638 100644
59112 --- a/include/asm-generic/pgtable-nopud.h
59113 +++ b/include/asm-generic/pgtable-nopud.h
59114 @@ -1,10 +1,15 @@
59115 #ifndef _PGTABLE_NOPUD_H
59116 #define _PGTABLE_NOPUD_H
59117
59118 -#ifndef __ASSEMBLY__
59119 -
59120 #define __PAGETABLE_PUD_FOLDED
59121
59122 +#define PUD_SHIFT PGDIR_SHIFT
59123 +#define PTRS_PER_PUD 1
59124 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
59125 +#define PUD_MASK (~(PUD_SIZE-1))
59126 +
59127 +#ifndef __ASSEMBLY__
59128 +
59129 /*
59130 * Having the pud type consist of a pgd gets the size right, and allows
59131 * us to conceptually access the pgd entry that this pud is folded into
59132 @@ -12,11 +17,6 @@
59133 */
59134 typedef struct { pgd_t pgd; } pud_t;
59135
59136 -#define PUD_SHIFT PGDIR_SHIFT
59137 -#define PTRS_PER_PUD 1
59138 -#define PUD_SIZE (1UL << PUD_SHIFT)
59139 -#define PUD_MASK (~(PUD_SIZE-1))
59140 -
59141 /*
59142 * The "pgd_xxx()" functions here are trivial for a folded two-level
59143 * setup: the pud is never bad, and a pud always exists (as it's folded
59144 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
59145 index 76bff2b..c7a14e2 100644
59146 --- a/include/asm-generic/pgtable.h
59147 +++ b/include/asm-generic/pgtable.h
59148 @@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
59149 #endif /* __HAVE_ARCH_PMD_WRITE */
59150 #endif
59151
59152 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
59153 +static inline unsigned long pax_open_kernel(void) { return 0; }
59154 +#endif
59155 +
59156 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
59157 +static inline unsigned long pax_close_kernel(void) { return 0; }
59158 +#endif
59159 +
59160 #endif /* !__ASSEMBLY__ */
59161
59162 #endif /* _ASM_GENERIC_PGTABLE_H */
59163 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
59164 index b5e2e4c..6a5373e 100644
59165 --- a/include/asm-generic/vmlinux.lds.h
59166 +++ b/include/asm-generic/vmlinux.lds.h
59167 @@ -217,6 +217,7 @@
59168 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
59169 VMLINUX_SYMBOL(__start_rodata) = .; \
59170 *(.rodata) *(.rodata.*) \
59171 + *(.data..read_only) \
59172 *(__vermagic) /* Kernel version magic */ \
59173 . = ALIGN(8); \
59174 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
59175 @@ -722,17 +723,18 @@
59176 * section in the linker script will go there too. @phdr should have
59177 * a leading colon.
59178 *
59179 - * Note that this macros defines __per_cpu_load as an absolute symbol.
59180 + * Note that this macros defines per_cpu_load as an absolute symbol.
59181 * If there is no need to put the percpu section at a predetermined
59182 * address, use PERCPU_SECTION.
59183 */
59184 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
59185 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
59186 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
59187 + per_cpu_load = .; \
59188 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
59189 - LOAD_OFFSET) { \
59190 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
59191 PERCPU_INPUT(cacheline) \
59192 } phdr \
59193 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
59194 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
59195
59196 /**
59197 * PERCPU_SECTION - define output section for percpu area, simple version
59198 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
59199 index bf4b2dc..2d0762f 100644
59200 --- a/include/drm/drmP.h
59201 +++ b/include/drm/drmP.h
59202 @@ -72,6 +72,7 @@
59203 #include <linux/workqueue.h>
59204 #include <linux/poll.h>
59205 #include <asm/pgalloc.h>
59206 +#include <asm/local.h>
59207 #include "drm.h"
59208
59209 #include <linux/idr.h>
59210 @@ -1038,7 +1039,7 @@ struct drm_device {
59211
59212 /** \name Usage Counters */
59213 /*@{ */
59214 - int open_count; /**< Outstanding files open */
59215 + local_t open_count; /**< Outstanding files open */
59216 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
59217 atomic_t vma_count; /**< Outstanding vma areas open */
59218 int buf_use; /**< Buffers in use -- cannot alloc */
59219 @@ -1049,7 +1050,7 @@ struct drm_device {
59220 /*@{ */
59221 unsigned long counters;
59222 enum drm_stat_type types[15];
59223 - atomic_t counts[15];
59224 + atomic_unchecked_t counts[15];
59225 /*@} */
59226
59227 struct list_head filelist;
59228 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
59229 index 73b0712..0b7ef2f 100644
59230 --- a/include/drm/drm_crtc_helper.h
59231 +++ b/include/drm/drm_crtc_helper.h
59232 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
59233
59234 /* disable crtc when not in use - more explicit than dpms off */
59235 void (*disable)(struct drm_crtc *crtc);
59236 -};
59237 +} __no_const;
59238
59239 struct drm_encoder_helper_funcs {
59240 void (*dpms)(struct drm_encoder *encoder, int mode);
59241 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
59242 struct drm_connector *connector);
59243 /* disable encoder when not in use - more explicit than dpms off */
59244 void (*disable)(struct drm_encoder *encoder);
59245 -};
59246 +} __no_const;
59247
59248 struct drm_connector_helper_funcs {
59249 int (*get_modes)(struct drm_connector *connector);
59250 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
59251 index 26c1f78..6722682 100644
59252 --- a/include/drm/ttm/ttm_memory.h
59253 +++ b/include/drm/ttm/ttm_memory.h
59254 @@ -47,7 +47,7 @@
59255
59256 struct ttm_mem_shrink {
59257 int (*do_shrink) (struct ttm_mem_shrink *);
59258 -};
59259 +} __no_const;
59260
59261 /**
59262 * struct ttm_mem_global - Global memory accounting structure.
59263 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
59264 index e86dfca..40cc55f 100644
59265 --- a/include/linux/a.out.h
59266 +++ b/include/linux/a.out.h
59267 @@ -39,6 +39,14 @@ enum machine_type {
59268 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
59269 };
59270
59271 +/* Constants for the N_FLAGS field */
59272 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
59273 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
59274 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
59275 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
59276 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
59277 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
59278 +
59279 #if !defined (N_MAGIC)
59280 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
59281 #endif
59282 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
59283 index 49a83ca..df96b54 100644
59284 --- a/include/linux/atmdev.h
59285 +++ b/include/linux/atmdev.h
59286 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
59287 #endif
59288
59289 struct k_atm_aal_stats {
59290 -#define __HANDLE_ITEM(i) atomic_t i
59291 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
59292 __AAL_STAT_ITEMS
59293 #undef __HANDLE_ITEM
59294 };
59295 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
59296 index fd88a39..8a801b4 100644
59297 --- a/include/linux/binfmts.h
59298 +++ b/include/linux/binfmts.h
59299 @@ -18,7 +18,7 @@ struct pt_regs;
59300 #define BINPRM_BUF_SIZE 128
59301
59302 #ifdef __KERNEL__
59303 -#include <linux/list.h>
59304 +#include <linux/sched.h>
59305
59306 #define CORENAME_MAX_SIZE 128
59307
59308 @@ -58,6 +58,7 @@ struct linux_binprm {
59309 unsigned interp_flags;
59310 unsigned interp_data;
59311 unsigned long loader, exec;
59312 + char tcomm[TASK_COMM_LEN];
59313 };
59314
59315 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
59316 @@ -88,6 +89,7 @@ struct linux_binfmt {
59317 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
59318 int (*load_shlib)(struct file *);
59319 int (*core_dump)(struct coredump_params *cprm);
59320 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
59321 unsigned long min_coredump; /* minimal dump size */
59322 };
59323
59324 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
59325 index 0ed1eb0..3ab569b 100644
59326 --- a/include/linux/blkdev.h
59327 +++ b/include/linux/blkdev.h
59328 @@ -1315,7 +1315,7 @@ struct block_device_operations {
59329 /* this callback is with swap_lock and sometimes page table lock held */
59330 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
59331 struct module *owner;
59332 -};
59333 +} __do_const;
59334
59335 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
59336 unsigned long);
59337 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
59338 index 4d1a074..88f929a 100644
59339 --- a/include/linux/blktrace_api.h
59340 +++ b/include/linux/blktrace_api.h
59341 @@ -162,7 +162,7 @@ struct blk_trace {
59342 struct dentry *dir;
59343 struct dentry *dropped_file;
59344 struct dentry *msg_file;
59345 - atomic_t dropped;
59346 + atomic_unchecked_t dropped;
59347 };
59348
59349 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
59350 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
59351 index 83195fb..0b0f77d 100644
59352 --- a/include/linux/byteorder/little_endian.h
59353 +++ b/include/linux/byteorder/little_endian.h
59354 @@ -42,51 +42,51 @@
59355
59356 static inline __le64 __cpu_to_le64p(const __u64 *p)
59357 {
59358 - return (__force __le64)*p;
59359 + return (__force const __le64)*p;
59360 }
59361 static inline __u64 __le64_to_cpup(const __le64 *p)
59362 {
59363 - return (__force __u64)*p;
59364 + return (__force const __u64)*p;
59365 }
59366 static inline __le32 __cpu_to_le32p(const __u32 *p)
59367 {
59368 - return (__force __le32)*p;
59369 + return (__force const __le32)*p;
59370 }
59371 static inline __u32 __le32_to_cpup(const __le32 *p)
59372 {
59373 - return (__force __u32)*p;
59374 + return (__force const __u32)*p;
59375 }
59376 static inline __le16 __cpu_to_le16p(const __u16 *p)
59377 {
59378 - return (__force __le16)*p;
59379 + return (__force const __le16)*p;
59380 }
59381 static inline __u16 __le16_to_cpup(const __le16 *p)
59382 {
59383 - return (__force __u16)*p;
59384 + return (__force const __u16)*p;
59385 }
59386 static inline __be64 __cpu_to_be64p(const __u64 *p)
59387 {
59388 - return (__force __be64)__swab64p(p);
59389 + return (__force const __be64)__swab64p(p);
59390 }
59391 static inline __u64 __be64_to_cpup(const __be64 *p)
59392 {
59393 - return __swab64p((__u64 *)p);
59394 + return __swab64p((const __u64 *)p);
59395 }
59396 static inline __be32 __cpu_to_be32p(const __u32 *p)
59397 {
59398 - return (__force __be32)__swab32p(p);
59399 + return (__force const __be32)__swab32p(p);
59400 }
59401 static inline __u32 __be32_to_cpup(const __be32 *p)
59402 {
59403 - return __swab32p((__u32 *)p);
59404 + return __swab32p((const __u32 *)p);
59405 }
59406 static inline __be16 __cpu_to_be16p(const __u16 *p)
59407 {
59408 - return (__force __be16)__swab16p(p);
59409 + return (__force const __be16)__swab16p(p);
59410 }
59411 static inline __u16 __be16_to_cpup(const __be16 *p)
59412 {
59413 - return __swab16p((__u16 *)p);
59414 + return __swab16p((const __u16 *)p);
59415 }
59416 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
59417 #define __le64_to_cpus(x) do { (void)(x); } while (0)
59418 diff --git a/include/linux/cache.h b/include/linux/cache.h
59419 index 4c57065..4307975 100644
59420 --- a/include/linux/cache.h
59421 +++ b/include/linux/cache.h
59422 @@ -16,6 +16,10 @@
59423 #define __read_mostly
59424 #endif
59425
59426 +#ifndef __read_only
59427 +#define __read_only __read_mostly
59428 +#endif
59429 +
59430 #ifndef ____cacheline_aligned
59431 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
59432 #endif
59433 diff --git a/include/linux/capability.h b/include/linux/capability.h
59434 index a63d13d..069bfd5 100644
59435 --- a/include/linux/capability.h
59436 +++ b/include/linux/capability.h
59437 @@ -548,6 +548,9 @@ extern bool capable(int cap);
59438 extern bool ns_capable(struct user_namespace *ns, int cap);
59439 extern bool task_ns_capable(struct task_struct *t, int cap);
59440 extern bool nsown_capable(int cap);
59441 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
59442 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
59443 +extern bool capable_nolog(int cap);
59444
59445 /* audit system wants to get cap info from files as well */
59446 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
59447 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
59448 index 04ffb2e..6799180 100644
59449 --- a/include/linux/cleancache.h
59450 +++ b/include/linux/cleancache.h
59451 @@ -31,7 +31,7 @@ struct cleancache_ops {
59452 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
59453 void (*flush_inode)(int, struct cleancache_filekey);
59454 void (*flush_fs)(int);
59455 -};
59456 +} __no_const;
59457
59458 extern struct cleancache_ops
59459 cleancache_register_ops(struct cleancache_ops *ops);
59460 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
59461 index dfadc96..c0e70c1 100644
59462 --- a/include/linux/compiler-gcc4.h
59463 +++ b/include/linux/compiler-gcc4.h
59464 @@ -31,6 +31,12 @@
59465
59466
59467 #if __GNUC_MINOR__ >= 5
59468 +
59469 +#ifdef CONSTIFY_PLUGIN
59470 +#define __no_const __attribute__((no_const))
59471 +#define __do_const __attribute__((do_const))
59472 +#endif
59473 +
59474 /*
59475 * Mark a position in code as unreachable. This can be used to
59476 * suppress control flow warnings after asm blocks that transfer
59477 @@ -46,6 +52,11 @@
59478 #define __noclone __attribute__((__noclone__))
59479
59480 #endif
59481 +
59482 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
59483 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
59484 +#define __bos0(ptr) __bos((ptr), 0)
59485 +#define __bos1(ptr) __bos((ptr), 1)
59486 #endif
59487
59488 #if __GNUC_MINOR__ > 0
59489 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
59490 index 320d6c9..8573a1c 100644
59491 --- a/include/linux/compiler.h
59492 +++ b/include/linux/compiler.h
59493 @@ -5,31 +5,62 @@
59494
59495 #ifdef __CHECKER__
59496 # define __user __attribute__((noderef, address_space(1)))
59497 +# define __force_user __force __user
59498 # define __kernel __attribute__((address_space(0)))
59499 +# define __force_kernel __force __kernel
59500 # define __safe __attribute__((safe))
59501 # define __force __attribute__((force))
59502 # define __nocast __attribute__((nocast))
59503 # define __iomem __attribute__((noderef, address_space(2)))
59504 +# define __force_iomem __force __iomem
59505 # define __acquires(x) __attribute__((context(x,0,1)))
59506 # define __releases(x) __attribute__((context(x,1,0)))
59507 # define __acquire(x) __context__(x,1)
59508 # define __release(x) __context__(x,-1)
59509 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
59510 # define __percpu __attribute__((noderef, address_space(3)))
59511 +# define __force_percpu __force __percpu
59512 #ifdef CONFIG_SPARSE_RCU_POINTER
59513 # define __rcu __attribute__((noderef, address_space(4)))
59514 +# define __force_rcu __force __rcu
59515 #else
59516 # define __rcu
59517 +# define __force_rcu
59518 #endif
59519 extern void __chk_user_ptr(const volatile void __user *);
59520 extern void __chk_io_ptr(const volatile void __iomem *);
59521 +#elif defined(CHECKER_PLUGIN)
59522 +//# define __user
59523 +//# define __force_user
59524 +//# define __kernel
59525 +//# define __force_kernel
59526 +# define __safe
59527 +# define __force
59528 +# define __nocast
59529 +# define __iomem
59530 +# define __force_iomem
59531 +# define __chk_user_ptr(x) (void)0
59532 +# define __chk_io_ptr(x) (void)0
59533 +# define __builtin_warning(x, y...) (1)
59534 +# define __acquires(x)
59535 +# define __releases(x)
59536 +# define __acquire(x) (void)0
59537 +# define __release(x) (void)0
59538 +# define __cond_lock(x,c) (c)
59539 +# define __percpu
59540 +# define __force_percpu
59541 +# define __rcu
59542 +# define __force_rcu
59543 #else
59544 # define __user
59545 +# define __force_user
59546 # define __kernel
59547 +# define __force_kernel
59548 # define __safe
59549 # define __force
59550 # define __nocast
59551 # define __iomem
59552 +# define __force_iomem
59553 # define __chk_user_ptr(x) (void)0
59554 # define __chk_io_ptr(x) (void)0
59555 # define __builtin_warning(x, y...) (1)
59556 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
59557 # define __release(x) (void)0
59558 # define __cond_lock(x,c) (c)
59559 # define __percpu
59560 +# define __force_percpu
59561 # define __rcu
59562 +# define __force_rcu
59563 #endif
59564
59565 #ifdef __KERNEL__
59566 @@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59567 # define __attribute_const__ /* unimplemented */
59568 #endif
59569
59570 +#ifndef __no_const
59571 +# define __no_const
59572 +#endif
59573 +
59574 +#ifndef __do_const
59575 +# define __do_const
59576 +#endif
59577 +
59578 /*
59579 * Tell gcc if a function is cold. The compiler will assume any path
59580 * directly leading to the call is unlikely.
59581 @@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59582 #define __cold
59583 #endif
59584
59585 +#ifndef __alloc_size
59586 +#define __alloc_size(...)
59587 +#endif
59588 +
59589 +#ifndef __bos
59590 +#define __bos(ptr, arg)
59591 +#endif
59592 +
59593 +#ifndef __bos0
59594 +#define __bos0(ptr)
59595 +#endif
59596 +
59597 +#ifndef __bos1
59598 +#define __bos1(ptr)
59599 +#endif
59600 +
59601 /* Simple shorthand for a section definition */
59602 #ifndef __section
59603 # define __section(S) __attribute__ ((__section__(#S)))
59604 @@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59605 * use is to mediate communication between process-level code and irq/NMI
59606 * handlers, all running on the same CPU.
59607 */
59608 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
59609 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
59610 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
59611
59612 #endif /* __LINUX_COMPILER_H */
59613 diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
59614 index e9eaec5..bfeb9bb 100644
59615 --- a/include/linux/cpuset.h
59616 +++ b/include/linux/cpuset.h
59617 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
59618 * nodemask.
59619 */
59620 smp_mb();
59621 - --ACCESS_ONCE(current->mems_allowed_change_disable);
59622 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
59623 }
59624
59625 static inline void set_mems_allowed(nodemask_t nodemask)
59626 diff --git a/include/linux/cred.h b/include/linux/cred.h
59627 index 4030896..8d6f342 100644
59628 --- a/include/linux/cred.h
59629 +++ b/include/linux/cred.h
59630 @@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
59631 static inline void validate_process_creds(void)
59632 {
59633 }
59634 +static inline void validate_task_creds(struct task_struct *task)
59635 +{
59636 +}
59637 #endif
59638
59639 /**
59640 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
59641 index 8a94217..15d49e3 100644
59642 --- a/include/linux/crypto.h
59643 +++ b/include/linux/crypto.h
59644 @@ -365,7 +365,7 @@ struct cipher_tfm {
59645 const u8 *key, unsigned int keylen);
59646 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
59647 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
59648 -};
59649 +} __no_const;
59650
59651 struct hash_tfm {
59652 int (*init)(struct hash_desc *desc);
59653 @@ -386,13 +386,13 @@ struct compress_tfm {
59654 int (*cot_decompress)(struct crypto_tfm *tfm,
59655 const u8 *src, unsigned int slen,
59656 u8 *dst, unsigned int *dlen);
59657 -};
59658 +} __no_const;
59659
59660 struct rng_tfm {
59661 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
59662 unsigned int dlen);
59663 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
59664 -};
59665 +} __no_const;
59666
59667 #define crt_ablkcipher crt_u.ablkcipher
59668 #define crt_aead crt_u.aead
59669 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
59670 index 7925bf0..d5143d2 100644
59671 --- a/include/linux/decompress/mm.h
59672 +++ b/include/linux/decompress/mm.h
59673 @@ -77,7 +77,7 @@ static void free(void *where)
59674 * warnings when not needed (indeed large_malloc / large_free are not
59675 * needed by inflate */
59676
59677 -#define malloc(a) kmalloc(a, GFP_KERNEL)
59678 +#define malloc(a) kmalloc((a), GFP_KERNEL)
59679 #define free(a) kfree(a)
59680
59681 #define large_malloc(a) vmalloc(a)
59682 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
59683 index e13117c..e9fc938 100644
59684 --- a/include/linux/dma-mapping.h
59685 +++ b/include/linux/dma-mapping.h
59686 @@ -46,7 +46,7 @@ struct dma_map_ops {
59687 u64 (*get_required_mask)(struct device *dev);
59688 #endif
59689 int is_phys;
59690 -};
59691 +} __do_const;
59692
59693 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
59694
59695 diff --git a/include/linux/efi.h b/include/linux/efi.h
59696 index 2362a0b..cfaf8fcc 100644
59697 --- a/include/linux/efi.h
59698 +++ b/include/linux/efi.h
59699 @@ -446,7 +446,7 @@ struct efivar_operations {
59700 efi_get_variable_t *get_variable;
59701 efi_get_next_variable_t *get_next_variable;
59702 efi_set_variable_t *set_variable;
59703 -};
59704 +} __no_const;
59705
59706 struct efivars {
59707 /*
59708 diff --git a/include/linux/elf.h b/include/linux/elf.h
59709 index 31f0508..5421c01 100644
59710 --- a/include/linux/elf.h
59711 +++ b/include/linux/elf.h
59712 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
59713 #define PT_GNU_EH_FRAME 0x6474e550
59714
59715 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
59716 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
59717 +
59718 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
59719 +
59720 +/* Constants for the e_flags field */
59721 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
59722 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
59723 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
59724 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
59725 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
59726 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
59727
59728 /*
59729 * Extended Numbering
59730 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
59731 #define DT_DEBUG 21
59732 #define DT_TEXTREL 22
59733 #define DT_JMPREL 23
59734 +#define DT_FLAGS 30
59735 + #define DF_TEXTREL 0x00000004
59736 #define DT_ENCODING 32
59737 #define OLD_DT_LOOS 0x60000000
59738 #define DT_LOOS 0x6000000d
59739 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
59740 #define PF_W 0x2
59741 #define PF_X 0x1
59742
59743 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
59744 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
59745 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
59746 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
59747 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
59748 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
59749 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
59750 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
59751 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
59752 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
59753 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
59754 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
59755 +
59756 typedef struct elf32_phdr{
59757 Elf32_Word p_type;
59758 Elf32_Off p_offset;
59759 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
59760 #define EI_OSABI 7
59761 #define EI_PAD 8
59762
59763 +#define EI_PAX 14
59764 +
59765 #define ELFMAG0 0x7f /* EI_MAG */
59766 #define ELFMAG1 'E'
59767 #define ELFMAG2 'L'
59768 @@ -423,6 +451,7 @@ extern Elf32_Dyn _DYNAMIC [];
59769 #define elf_note elf32_note
59770 #define elf_addr_t Elf32_Off
59771 #define Elf_Half Elf32_Half
59772 +#define elf_dyn Elf32_Dyn
59773
59774 #else
59775
59776 @@ -433,6 +462,7 @@ extern Elf64_Dyn _DYNAMIC [];
59777 #define elf_note elf64_note
59778 #define elf_addr_t Elf64_Off
59779 #define Elf_Half Elf64_Half
59780 +#define elf_dyn Elf64_Dyn
59781
59782 #endif
59783
59784 diff --git a/include/linux/filter.h b/include/linux/filter.h
59785 index 8eeb205..d59bfa2 100644
59786 --- a/include/linux/filter.h
59787 +++ b/include/linux/filter.h
59788 @@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
59789
59790 struct sk_buff;
59791 struct sock;
59792 +struct bpf_jit_work;
59793
59794 struct sk_filter
59795 {
59796 @@ -141,6 +142,9 @@ struct sk_filter
59797 unsigned int len; /* Number of filter blocks */
59798 unsigned int (*bpf_func)(const struct sk_buff *skb,
59799 const struct sock_filter *filter);
59800 +#ifdef CONFIG_BPF_JIT
59801 + struct bpf_jit_work *work;
59802 +#endif
59803 struct rcu_head rcu;
59804 struct sock_filter insns[0];
59805 };
59806 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
59807 index 84ccf8e..2e9b14c 100644
59808 --- a/include/linux/firewire.h
59809 +++ b/include/linux/firewire.h
59810 @@ -428,7 +428,7 @@ struct fw_iso_context {
59811 union {
59812 fw_iso_callback_t sc;
59813 fw_iso_mc_callback_t mc;
59814 - } callback;
59815 + } __no_const callback;
59816 void *callback_data;
59817 };
59818
59819 diff --git a/include/linux/fs.h b/include/linux/fs.h
59820 index 10b2288..09180e4 100644
59821 --- a/include/linux/fs.h
59822 +++ b/include/linux/fs.h
59823 @@ -1609,7 +1609,8 @@ struct file_operations {
59824 int (*setlease)(struct file *, long, struct file_lock **);
59825 long (*fallocate)(struct file *file, int mode, loff_t offset,
59826 loff_t len);
59827 -};
59828 +} __do_const;
59829 +typedef struct file_operations __no_const file_operations_no_const;
59830
59831 struct inode_operations {
59832 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
59833 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
59834 index 003dc0f..3c4ea97 100644
59835 --- a/include/linux/fs_struct.h
59836 +++ b/include/linux/fs_struct.h
59837 @@ -6,7 +6,7 @@
59838 #include <linux/seqlock.h>
59839
59840 struct fs_struct {
59841 - int users;
59842 + atomic_t users;
59843 spinlock_t lock;
59844 seqcount_t seq;
59845 int umask;
59846 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
59847 index ce31408..b1ad003 100644
59848 --- a/include/linux/fscache-cache.h
59849 +++ b/include/linux/fscache-cache.h
59850 @@ -102,7 +102,7 @@ struct fscache_operation {
59851 fscache_operation_release_t release;
59852 };
59853
59854 -extern atomic_t fscache_op_debug_id;
59855 +extern atomic_unchecked_t fscache_op_debug_id;
59856 extern void fscache_op_work_func(struct work_struct *work);
59857
59858 extern void fscache_enqueue_operation(struct fscache_operation *);
59859 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
59860 {
59861 INIT_WORK(&op->work, fscache_op_work_func);
59862 atomic_set(&op->usage, 1);
59863 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
59864 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
59865 op->processor = processor;
59866 op->release = release;
59867 INIT_LIST_HEAD(&op->pend_link);
59868 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
59869 index 2a53f10..0187fdf 100644
59870 --- a/include/linux/fsnotify.h
59871 +++ b/include/linux/fsnotify.h
59872 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
59873 */
59874 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
59875 {
59876 - return kstrdup(name, GFP_KERNEL);
59877 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
59878 }
59879
59880 /*
59881 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
59882 index 91d0e0a3..035666b 100644
59883 --- a/include/linux/fsnotify_backend.h
59884 +++ b/include/linux/fsnotify_backend.h
59885 @@ -105,6 +105,7 @@ struct fsnotify_ops {
59886 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
59887 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
59888 };
59889 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
59890
59891 /*
59892 * A group is a "thing" that wants to receive notification about filesystem
59893 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
59894 index c3da42d..c70e0df 100644
59895 --- a/include/linux/ftrace_event.h
59896 +++ b/include/linux/ftrace_event.h
59897 @@ -97,7 +97,7 @@ struct trace_event_functions {
59898 trace_print_func raw;
59899 trace_print_func hex;
59900 trace_print_func binary;
59901 -};
59902 +} __no_const;
59903
59904 struct trace_event {
59905 struct hlist_node node;
59906 @@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
59907 extern int trace_add_event_call(struct ftrace_event_call *call);
59908 extern void trace_remove_event_call(struct ftrace_event_call *call);
59909
59910 -#define is_signed_type(type) (((type)(-1)) < 0)
59911 +#define is_signed_type(type) (((type)(-1)) < (type)1)
59912
59913 int trace_set_clr_event(const char *system, const char *event, int set);
59914
59915 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
59916 index 6d18f35..ab71e2c 100644
59917 --- a/include/linux/genhd.h
59918 +++ b/include/linux/genhd.h
59919 @@ -185,7 +185,7 @@ struct gendisk {
59920 struct kobject *slave_dir;
59921
59922 struct timer_rand_state *random;
59923 - atomic_t sync_io; /* RAID */
59924 + atomic_unchecked_t sync_io; /* RAID */
59925 struct disk_events *ev;
59926 #ifdef CONFIG_BLK_DEV_INTEGRITY
59927 struct blk_integrity *integrity;
59928 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
59929 new file mode 100644
59930 index 0000000..8a130b6
59931 --- /dev/null
59932 +++ b/include/linux/gracl.h
59933 @@ -0,0 +1,319 @@
59934 +#ifndef GR_ACL_H
59935 +#define GR_ACL_H
59936 +
59937 +#include <linux/grdefs.h>
59938 +#include <linux/resource.h>
59939 +#include <linux/capability.h>
59940 +#include <linux/dcache.h>
59941 +#include <asm/resource.h>
59942 +
59943 +/* Major status information */
59944 +
59945 +#define GR_VERSION "grsecurity 2.9"
59946 +#define GRSECURITY_VERSION 0x2900
59947 +
59948 +enum {
59949 + GR_SHUTDOWN = 0,
59950 + GR_ENABLE = 1,
59951 + GR_SPROLE = 2,
59952 + GR_RELOAD = 3,
59953 + GR_SEGVMOD = 4,
59954 + GR_STATUS = 5,
59955 + GR_UNSPROLE = 6,
59956 + GR_PASSSET = 7,
59957 + GR_SPROLEPAM = 8,
59958 +};
59959 +
59960 +/* Password setup definitions
59961 + * kernel/grhash.c */
59962 +enum {
59963 + GR_PW_LEN = 128,
59964 + GR_SALT_LEN = 16,
59965 + GR_SHA_LEN = 32,
59966 +};
59967 +
59968 +enum {
59969 + GR_SPROLE_LEN = 64,
59970 +};
59971 +
59972 +enum {
59973 + GR_NO_GLOB = 0,
59974 + GR_REG_GLOB,
59975 + GR_CREATE_GLOB
59976 +};
59977 +
59978 +#define GR_NLIMITS 32
59979 +
59980 +/* Begin Data Structures */
59981 +
59982 +struct sprole_pw {
59983 + unsigned char *rolename;
59984 + unsigned char salt[GR_SALT_LEN];
59985 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
59986 +};
59987 +
59988 +struct name_entry {
59989 + __u32 key;
59990 + ino_t inode;
59991 + dev_t device;
59992 + char *name;
59993 + __u16 len;
59994 + __u8 deleted;
59995 + struct name_entry *prev;
59996 + struct name_entry *next;
59997 +};
59998 +
59999 +struct inodev_entry {
60000 + struct name_entry *nentry;
60001 + struct inodev_entry *prev;
60002 + struct inodev_entry *next;
60003 +};
60004 +
60005 +struct acl_role_db {
60006 + struct acl_role_label **r_hash;
60007 + __u32 r_size;
60008 +};
60009 +
60010 +struct inodev_db {
60011 + struct inodev_entry **i_hash;
60012 + __u32 i_size;
60013 +};
60014 +
60015 +struct name_db {
60016 + struct name_entry **n_hash;
60017 + __u32 n_size;
60018 +};
60019 +
60020 +struct crash_uid {
60021 + uid_t uid;
60022 + unsigned long expires;
60023 +};
60024 +
60025 +struct gr_hash_struct {
60026 + void **table;
60027 + void **nametable;
60028 + void *first;
60029 + __u32 table_size;
60030 + __u32 used_size;
60031 + int type;
60032 +};
60033 +
60034 +/* Userspace Grsecurity ACL data structures */
60035 +
60036 +struct acl_subject_label {
60037 + char *filename;
60038 + ino_t inode;
60039 + dev_t device;
60040 + __u32 mode;
60041 + kernel_cap_t cap_mask;
60042 + kernel_cap_t cap_lower;
60043 + kernel_cap_t cap_invert_audit;
60044 +
60045 + struct rlimit res[GR_NLIMITS];
60046 + __u32 resmask;
60047 +
60048 + __u8 user_trans_type;
60049 + __u8 group_trans_type;
60050 + uid_t *user_transitions;
60051 + gid_t *group_transitions;
60052 + __u16 user_trans_num;
60053 + __u16 group_trans_num;
60054 +
60055 + __u32 sock_families[2];
60056 + __u32 ip_proto[8];
60057 + __u32 ip_type;
60058 + struct acl_ip_label **ips;
60059 + __u32 ip_num;
60060 + __u32 inaddr_any_override;
60061 +
60062 + __u32 crashes;
60063 + unsigned long expires;
60064 +
60065 + struct acl_subject_label *parent_subject;
60066 + struct gr_hash_struct *hash;
60067 + struct acl_subject_label *prev;
60068 + struct acl_subject_label *next;
60069 +
60070 + struct acl_object_label **obj_hash;
60071 + __u32 obj_hash_size;
60072 + __u16 pax_flags;
60073 +};
60074 +
60075 +struct role_allowed_ip {
60076 + __u32 addr;
60077 + __u32 netmask;
60078 +
60079 + struct role_allowed_ip *prev;
60080 + struct role_allowed_ip *next;
60081 +};
60082 +
60083 +struct role_transition {
60084 + char *rolename;
60085 +
60086 + struct role_transition *prev;
60087 + struct role_transition *next;
60088 +};
60089 +
60090 +struct acl_role_label {
60091 + char *rolename;
60092 + uid_t uidgid;
60093 + __u16 roletype;
60094 +
60095 + __u16 auth_attempts;
60096 + unsigned long expires;
60097 +
60098 + struct acl_subject_label *root_label;
60099 + struct gr_hash_struct *hash;
60100 +
60101 + struct acl_role_label *prev;
60102 + struct acl_role_label *next;
60103 +
60104 + struct role_transition *transitions;
60105 + struct role_allowed_ip *allowed_ips;
60106 + uid_t *domain_children;
60107 + __u16 domain_child_num;
60108 +
60109 + umode_t umask;
60110 +
60111 + struct acl_subject_label **subj_hash;
60112 + __u32 subj_hash_size;
60113 +};
60114 +
60115 +struct user_acl_role_db {
60116 + struct acl_role_label **r_table;
60117 + __u32 num_pointers; /* Number of allocations to track */
60118 + __u32 num_roles; /* Number of roles */
60119 + __u32 num_domain_children; /* Number of domain children */
60120 + __u32 num_subjects; /* Number of subjects */
60121 + __u32 num_objects; /* Number of objects */
60122 +};
60123 +
60124 +struct acl_object_label {
60125 + char *filename;
60126 + ino_t inode;
60127 + dev_t device;
60128 + __u32 mode;
60129 +
60130 + struct acl_subject_label *nested;
60131 + struct acl_object_label *globbed;
60132 +
60133 + /* next two structures not used */
60134 +
60135 + struct acl_object_label *prev;
60136 + struct acl_object_label *next;
60137 +};
60138 +
60139 +struct acl_ip_label {
60140 + char *iface;
60141 + __u32 addr;
60142 + __u32 netmask;
60143 + __u16 low, high;
60144 + __u8 mode;
60145 + __u32 type;
60146 + __u32 proto[8];
60147 +
60148 + /* next two structures not used */
60149 +
60150 + struct acl_ip_label *prev;
60151 + struct acl_ip_label *next;
60152 +};
60153 +
60154 +struct gr_arg {
60155 + struct user_acl_role_db role_db;
60156 + unsigned char pw[GR_PW_LEN];
60157 + unsigned char salt[GR_SALT_LEN];
60158 + unsigned char sum[GR_SHA_LEN];
60159 + unsigned char sp_role[GR_SPROLE_LEN];
60160 + struct sprole_pw *sprole_pws;
60161 + dev_t segv_device;
60162 + ino_t segv_inode;
60163 + uid_t segv_uid;
60164 + __u16 num_sprole_pws;
60165 + __u16 mode;
60166 +};
60167 +
60168 +struct gr_arg_wrapper {
60169 + struct gr_arg *arg;
60170 + __u32 version;
60171 + __u32 size;
60172 +};
60173 +
60174 +struct subject_map {
60175 + struct acl_subject_label *user;
60176 + struct acl_subject_label *kernel;
60177 + struct subject_map *prev;
60178 + struct subject_map *next;
60179 +};
60180 +
60181 +struct acl_subj_map_db {
60182 + struct subject_map **s_hash;
60183 + __u32 s_size;
60184 +};
60185 +
60186 +/* End Data Structures Section */
60187 +
60188 +/* Hash functions generated by empirical testing by Brad Spengler
60189 + Makes good use of the low bits of the inode. Generally 0-1 times
60190 + in loop for successful match. 0-3 for unsuccessful match.
60191 + Shift/add algorithm with modulus of table size and an XOR*/
60192 +
60193 +static __inline__ unsigned int
60194 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
60195 +{
60196 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
60197 +}
60198 +
60199 + static __inline__ unsigned int
60200 +shash(const struct acl_subject_label *userp, const unsigned int sz)
60201 +{
60202 + return ((const unsigned long)userp % sz);
60203 +}
60204 +
60205 +static __inline__ unsigned int
60206 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
60207 +{
60208 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
60209 +}
60210 +
60211 +static __inline__ unsigned int
60212 +nhash(const char *name, const __u16 len, const unsigned int sz)
60213 +{
60214 + return full_name_hash((const unsigned char *)name, len) % sz;
60215 +}
60216 +
60217 +#define FOR_EACH_ROLE_START(role) \
60218 + role = role_list; \
60219 + while (role) {
60220 +
60221 +#define FOR_EACH_ROLE_END(role) \
60222 + role = role->prev; \
60223 + }
60224 +
60225 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
60226 + subj = NULL; \
60227 + iter = 0; \
60228 + while (iter < role->subj_hash_size) { \
60229 + if (subj == NULL) \
60230 + subj = role->subj_hash[iter]; \
60231 + if (subj == NULL) { \
60232 + iter++; \
60233 + continue; \
60234 + }
60235 +
60236 +#define FOR_EACH_SUBJECT_END(subj,iter) \
60237 + subj = subj->next; \
60238 + if (subj == NULL) \
60239 + iter++; \
60240 + }
60241 +
60242 +
60243 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
60244 + subj = role->hash->first; \
60245 + while (subj != NULL) {
60246 +
60247 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
60248 + subj = subj->next; \
60249 + }
60250 +
60251 +#endif
60252 +
60253 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
60254 new file mode 100644
60255 index 0000000..323ecf2
60256 --- /dev/null
60257 +++ b/include/linux/gralloc.h
60258 @@ -0,0 +1,9 @@
60259 +#ifndef __GRALLOC_H
60260 +#define __GRALLOC_H
60261 +
60262 +void acl_free_all(void);
60263 +int acl_alloc_stack_init(unsigned long size);
60264 +void *acl_alloc(unsigned long len);
60265 +void *acl_alloc_num(unsigned long num, unsigned long len);
60266 +
60267 +#endif
60268 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
60269 new file mode 100644
60270 index 0000000..b30e9bc
60271 --- /dev/null
60272 +++ b/include/linux/grdefs.h
60273 @@ -0,0 +1,140 @@
60274 +#ifndef GRDEFS_H
60275 +#define GRDEFS_H
60276 +
60277 +/* Begin grsecurity status declarations */
60278 +
60279 +enum {
60280 + GR_READY = 0x01,
60281 + GR_STATUS_INIT = 0x00 // disabled state
60282 +};
60283 +
60284 +/* Begin ACL declarations */
60285 +
60286 +/* Role flags */
60287 +
60288 +enum {
60289 + GR_ROLE_USER = 0x0001,
60290 + GR_ROLE_GROUP = 0x0002,
60291 + GR_ROLE_DEFAULT = 0x0004,
60292 + GR_ROLE_SPECIAL = 0x0008,
60293 + GR_ROLE_AUTH = 0x0010,
60294 + GR_ROLE_NOPW = 0x0020,
60295 + GR_ROLE_GOD = 0x0040,
60296 + GR_ROLE_LEARN = 0x0080,
60297 + GR_ROLE_TPE = 0x0100,
60298 + GR_ROLE_DOMAIN = 0x0200,
60299 + GR_ROLE_PAM = 0x0400,
60300 + GR_ROLE_PERSIST = 0x0800
60301 +};
60302 +
60303 +/* ACL Subject and Object mode flags */
60304 +enum {
60305 + GR_DELETED = 0x80000000
60306 +};
60307 +
60308 +/* ACL Object-only mode flags */
60309 +enum {
60310 + GR_READ = 0x00000001,
60311 + GR_APPEND = 0x00000002,
60312 + GR_WRITE = 0x00000004,
60313 + GR_EXEC = 0x00000008,
60314 + GR_FIND = 0x00000010,
60315 + GR_INHERIT = 0x00000020,
60316 + GR_SETID = 0x00000040,
60317 + GR_CREATE = 0x00000080,
60318 + GR_DELETE = 0x00000100,
60319 + GR_LINK = 0x00000200,
60320 + GR_AUDIT_READ = 0x00000400,
60321 + GR_AUDIT_APPEND = 0x00000800,
60322 + GR_AUDIT_WRITE = 0x00001000,
60323 + GR_AUDIT_EXEC = 0x00002000,
60324 + GR_AUDIT_FIND = 0x00004000,
60325 + GR_AUDIT_INHERIT= 0x00008000,
60326 + GR_AUDIT_SETID = 0x00010000,
60327 + GR_AUDIT_CREATE = 0x00020000,
60328 + GR_AUDIT_DELETE = 0x00040000,
60329 + GR_AUDIT_LINK = 0x00080000,
60330 + GR_PTRACERD = 0x00100000,
60331 + GR_NOPTRACE = 0x00200000,
60332 + GR_SUPPRESS = 0x00400000,
60333 + GR_NOLEARN = 0x00800000,
60334 + GR_INIT_TRANSFER= 0x01000000
60335 +};
60336 +
60337 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
60338 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
60339 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
60340 +
60341 +/* ACL subject-only mode flags */
60342 +enum {
60343 + GR_KILL = 0x00000001,
60344 + GR_VIEW = 0x00000002,
60345 + GR_PROTECTED = 0x00000004,
60346 + GR_LEARN = 0x00000008,
60347 + GR_OVERRIDE = 0x00000010,
60348 + /* just a placeholder, this mode is only used in userspace */
60349 + GR_DUMMY = 0x00000020,
60350 + GR_PROTSHM = 0x00000040,
60351 + GR_KILLPROC = 0x00000080,
60352 + GR_KILLIPPROC = 0x00000100,
60353 + /* just a placeholder, this mode is only used in userspace */
60354 + GR_NOTROJAN = 0x00000200,
60355 + GR_PROTPROCFD = 0x00000400,
60356 + GR_PROCACCT = 0x00000800,
60357 + GR_RELAXPTRACE = 0x00001000,
60358 + GR_NESTED = 0x00002000,
60359 + GR_INHERITLEARN = 0x00004000,
60360 + GR_PROCFIND = 0x00008000,
60361 + GR_POVERRIDE = 0x00010000,
60362 + GR_KERNELAUTH = 0x00020000,
60363 + GR_ATSECURE = 0x00040000,
60364 + GR_SHMEXEC = 0x00080000
60365 +};
60366 +
60367 +enum {
60368 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
60369 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
60370 + GR_PAX_ENABLE_MPROTECT = 0x0004,
60371 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
60372 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
60373 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
60374 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
60375 + GR_PAX_DISABLE_MPROTECT = 0x0400,
60376 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
60377 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
60378 +};
60379 +
60380 +enum {
60381 + GR_ID_USER = 0x01,
60382 + GR_ID_GROUP = 0x02,
60383 +};
60384 +
60385 +enum {
60386 + GR_ID_ALLOW = 0x01,
60387 + GR_ID_DENY = 0x02,
60388 +};
60389 +
60390 +#define GR_CRASH_RES 31
60391 +#define GR_UIDTABLE_MAX 500
60392 +
60393 +/* begin resource learning section */
60394 +enum {
60395 + GR_RLIM_CPU_BUMP = 60,
60396 + GR_RLIM_FSIZE_BUMP = 50000,
60397 + GR_RLIM_DATA_BUMP = 10000,
60398 + GR_RLIM_STACK_BUMP = 1000,
60399 + GR_RLIM_CORE_BUMP = 10000,
60400 + GR_RLIM_RSS_BUMP = 500000,
60401 + GR_RLIM_NPROC_BUMP = 1,
60402 + GR_RLIM_NOFILE_BUMP = 5,
60403 + GR_RLIM_MEMLOCK_BUMP = 50000,
60404 + GR_RLIM_AS_BUMP = 500000,
60405 + GR_RLIM_LOCKS_BUMP = 2,
60406 + GR_RLIM_SIGPENDING_BUMP = 5,
60407 + GR_RLIM_MSGQUEUE_BUMP = 10000,
60408 + GR_RLIM_NICE_BUMP = 1,
60409 + GR_RLIM_RTPRIO_BUMP = 1,
60410 + GR_RLIM_RTTIME_BUMP = 1000000
60411 +};
60412 +
60413 +#endif
60414 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
60415 new file mode 100644
60416 index 0000000..da390f1
60417 --- /dev/null
60418 +++ b/include/linux/grinternal.h
60419 @@ -0,0 +1,221 @@
60420 +#ifndef __GRINTERNAL_H
60421 +#define __GRINTERNAL_H
60422 +
60423 +#ifdef CONFIG_GRKERNSEC
60424 +
60425 +#include <linux/fs.h>
60426 +#include <linux/mnt_namespace.h>
60427 +#include <linux/nsproxy.h>
60428 +#include <linux/gracl.h>
60429 +#include <linux/grdefs.h>
60430 +#include <linux/grmsg.h>
60431 +
60432 +void gr_add_learn_entry(const char *fmt, ...)
60433 + __attribute__ ((format (printf, 1, 2)));
60434 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
60435 + const struct vfsmount *mnt);
60436 +__u32 gr_check_create(const struct dentry *new_dentry,
60437 + const struct dentry *parent,
60438 + const struct vfsmount *mnt, const __u32 mode);
60439 +int gr_check_protected_task(const struct task_struct *task);
60440 +__u32 to_gr_audit(const __u32 reqmode);
60441 +int gr_set_acls(const int type);
60442 +int gr_apply_subject_to_task(struct task_struct *task);
60443 +int gr_acl_is_enabled(void);
60444 +char gr_roletype_to_char(void);
60445 +
60446 +void gr_handle_alertkill(struct task_struct *task);
60447 +char *gr_to_filename(const struct dentry *dentry,
60448 + const struct vfsmount *mnt);
60449 +char *gr_to_filename1(const struct dentry *dentry,
60450 + const struct vfsmount *mnt);
60451 +char *gr_to_filename2(const struct dentry *dentry,
60452 + const struct vfsmount *mnt);
60453 +char *gr_to_filename3(const struct dentry *dentry,
60454 + const struct vfsmount *mnt);
60455 +
60456 +extern int grsec_enable_ptrace_readexec;
60457 +extern int grsec_enable_harden_ptrace;
60458 +extern int grsec_enable_link;
60459 +extern int grsec_enable_fifo;
60460 +extern int grsec_enable_execve;
60461 +extern int grsec_enable_shm;
60462 +extern int grsec_enable_execlog;
60463 +extern int grsec_enable_signal;
60464 +extern int grsec_enable_audit_ptrace;
60465 +extern int grsec_enable_forkfail;
60466 +extern int grsec_enable_time;
60467 +extern int grsec_enable_rofs;
60468 +extern int grsec_enable_chroot_shmat;
60469 +extern int grsec_enable_chroot_mount;
60470 +extern int grsec_enable_chroot_double;
60471 +extern int grsec_enable_chroot_pivot;
60472 +extern int grsec_enable_chroot_chdir;
60473 +extern int grsec_enable_chroot_chmod;
60474 +extern int grsec_enable_chroot_mknod;
60475 +extern int grsec_enable_chroot_fchdir;
60476 +extern int grsec_enable_chroot_nice;
60477 +extern int grsec_enable_chroot_execlog;
60478 +extern int grsec_enable_chroot_caps;
60479 +extern int grsec_enable_chroot_sysctl;
60480 +extern int grsec_enable_chroot_unix;
60481 +extern int grsec_enable_tpe;
60482 +extern int grsec_tpe_gid;
60483 +extern int grsec_enable_tpe_all;
60484 +extern int grsec_enable_tpe_invert;
60485 +extern int grsec_enable_socket_all;
60486 +extern int grsec_socket_all_gid;
60487 +extern int grsec_enable_socket_client;
60488 +extern int grsec_socket_client_gid;
60489 +extern int grsec_enable_socket_server;
60490 +extern int grsec_socket_server_gid;
60491 +extern int grsec_audit_gid;
60492 +extern int grsec_enable_group;
60493 +extern int grsec_enable_audit_textrel;
60494 +extern int grsec_enable_log_rwxmaps;
60495 +extern int grsec_enable_mount;
60496 +extern int grsec_enable_chdir;
60497 +extern int grsec_resource_logging;
60498 +extern int grsec_enable_blackhole;
60499 +extern int grsec_lastack_retries;
60500 +extern int grsec_enable_brute;
60501 +extern int grsec_lock;
60502 +
60503 +extern spinlock_t grsec_alert_lock;
60504 +extern unsigned long grsec_alert_wtime;
60505 +extern unsigned long grsec_alert_fyet;
60506 +
60507 +extern spinlock_t grsec_audit_lock;
60508 +
60509 +extern rwlock_t grsec_exec_file_lock;
60510 +
60511 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
60512 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
60513 + (tsk)->exec_file->f_vfsmnt) : "/")
60514 +
60515 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
60516 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
60517 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
60518 +
60519 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
60520 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
60521 + (tsk)->exec_file->f_vfsmnt) : "/")
60522 +
60523 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
60524 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
60525 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
60526 +
60527 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
60528 +
60529 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
60530 +
60531 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
60532 + (task)->pid, (cred)->uid, \
60533 + (cred)->euid, (cred)->gid, (cred)->egid, \
60534 + gr_parent_task_fullpath(task), \
60535 + (task)->real_parent->comm, (task)->real_parent->pid, \
60536 + (pcred)->uid, (pcred)->euid, \
60537 + (pcred)->gid, (pcred)->egid
60538 +
60539 +#define GR_CHROOT_CAPS {{ \
60540 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
60541 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
60542 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
60543 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
60544 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
60545 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
60546 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
60547 +
60548 +#define security_learn(normal_msg,args...) \
60549 +({ \
60550 + read_lock(&grsec_exec_file_lock); \
60551 + gr_add_learn_entry(normal_msg "\n", ## args); \
60552 + read_unlock(&grsec_exec_file_lock); \
60553 +})
60554 +
60555 +enum {
60556 + GR_DO_AUDIT,
60557 + GR_DONT_AUDIT,
60558 + /* used for non-audit messages that we shouldn't kill the task on */
60559 + GR_DONT_AUDIT_GOOD
60560 +};
60561 +
60562 +enum {
60563 + GR_TTYSNIFF,
60564 + GR_RBAC,
60565 + GR_RBAC_STR,
60566 + GR_STR_RBAC,
60567 + GR_RBAC_MODE2,
60568 + GR_RBAC_MODE3,
60569 + GR_FILENAME,
60570 + GR_SYSCTL_HIDDEN,
60571 + GR_NOARGS,
60572 + GR_ONE_INT,
60573 + GR_ONE_INT_TWO_STR,
60574 + GR_ONE_STR,
60575 + GR_STR_INT,
60576 + GR_TWO_STR_INT,
60577 + GR_TWO_INT,
60578 + GR_TWO_U64,
60579 + GR_THREE_INT,
60580 + GR_FIVE_INT_TWO_STR,
60581 + GR_TWO_STR,
60582 + GR_THREE_STR,
60583 + GR_FOUR_STR,
60584 + GR_STR_FILENAME,
60585 + GR_FILENAME_STR,
60586 + GR_FILENAME_TWO_INT,
60587 + GR_FILENAME_TWO_INT_STR,
60588 + GR_TEXTREL,
60589 + GR_PTRACE,
60590 + GR_RESOURCE,
60591 + GR_CAP,
60592 + GR_SIG,
60593 + GR_SIG2,
60594 + GR_CRASH1,
60595 + GR_CRASH2,
60596 + GR_PSACCT,
60597 + GR_RWXMAP
60598 +};
60599 +
60600 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
60601 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
60602 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
60603 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
60604 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
60605 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
60606 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
60607 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
60608 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
60609 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
60610 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
60611 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
60612 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
60613 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
60614 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
60615 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
60616 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
60617 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
60618 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
60619 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
60620 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
60621 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
60622 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
60623 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
60624 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
60625 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
60626 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
60627 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
60628 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
60629 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
60630 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
60631 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
60632 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
60633 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
60634 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
60635 +
60636 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
60637 +
60638 +#endif
60639 +
60640 +#endif
60641 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
60642 new file mode 100644
60643 index 0000000..ae576a1
60644 --- /dev/null
60645 +++ b/include/linux/grmsg.h
60646 @@ -0,0 +1,109 @@
60647 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
60648 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
60649 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
60650 +#define GR_STOPMOD_MSG "denied modification of module state by "
60651 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
60652 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
60653 +#define GR_IOPERM_MSG "denied use of ioperm() by "
60654 +#define GR_IOPL_MSG "denied use of iopl() by "
60655 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
60656 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
60657 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
60658 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
60659 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
60660 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
60661 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
60662 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
60663 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
60664 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
60665 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
60666 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
60667 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
60668 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
60669 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
60670 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
60671 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
60672 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
60673 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
60674 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
60675 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
60676 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
60677 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
60678 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
60679 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
60680 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
60681 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
60682 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
60683 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
60684 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
60685 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
60686 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
60687 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
60688 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
60689 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
60690 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
60691 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
60692 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
60693 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
60694 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
60695 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
60696 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
60697 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
60698 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
60699 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
60700 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
60701 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
60702 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
60703 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
60704 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
60705 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
60706 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
60707 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
60708 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
60709 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
60710 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
60711 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
60712 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
60713 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
60714 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
60715 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
60716 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
60717 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
60718 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
60719 +#define GR_NICE_CHROOT_MSG "denied priority change by "
60720 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
60721 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
60722 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
60723 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
60724 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
60725 +#define GR_TIME_MSG "time set by "
60726 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
60727 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
60728 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
60729 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
60730 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
60731 +#define GR_BIND_MSG "denied bind() by "
60732 +#define GR_CONNECT_MSG "denied connect() by "
60733 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
60734 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
60735 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
60736 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
60737 +#define GR_CAP_ACL_MSG "use of %s denied for "
60738 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
60739 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
60740 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
60741 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
60742 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
60743 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
60744 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
60745 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
60746 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
60747 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
60748 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
60749 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
60750 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
60751 +#define GR_VM86_MSG "denied use of vm86 by "
60752 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
60753 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
60754 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
60755 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
60756 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
60757 new file mode 100644
60758 index 0000000..2ccf677
60759 --- /dev/null
60760 +++ b/include/linux/grsecurity.h
60761 @@ -0,0 +1,229 @@
60762 +#ifndef GR_SECURITY_H
60763 +#define GR_SECURITY_H
60764 +#include <linux/fs.h>
60765 +#include <linux/fs_struct.h>
60766 +#include <linux/binfmts.h>
60767 +#include <linux/gracl.h>
60768 +
60769 +/* notify of brain-dead configs */
60770 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60771 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
60772 +#endif
60773 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
60774 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
60775 +#endif
60776 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
60777 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
60778 +#endif
60779 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
60780 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
60781 +#endif
60782 +
60783 +#include <linux/compat.h>
60784 +
60785 +struct user_arg_ptr {
60786 +#ifdef CONFIG_COMPAT
60787 + bool is_compat;
60788 +#endif
60789 + union {
60790 + const char __user *const __user *native;
60791 +#ifdef CONFIG_COMPAT
60792 + compat_uptr_t __user *compat;
60793 +#endif
60794 + } ptr;
60795 +};
60796 +
60797 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
60798 +void gr_handle_brute_check(void);
60799 +void gr_handle_kernel_exploit(void);
60800 +int gr_process_user_ban(void);
60801 +
60802 +char gr_roletype_to_char(void);
60803 +
60804 +int gr_acl_enable_at_secure(void);
60805 +
60806 +int gr_check_user_change(int real, int effective, int fs);
60807 +int gr_check_group_change(int real, int effective, int fs);
60808 +
60809 +void gr_del_task_from_ip_table(struct task_struct *p);
60810 +
60811 +int gr_pid_is_chrooted(struct task_struct *p);
60812 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
60813 +int gr_handle_chroot_nice(void);
60814 +int gr_handle_chroot_sysctl(const int op);
60815 +int gr_handle_chroot_setpriority(struct task_struct *p,
60816 + const int niceval);
60817 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
60818 +int gr_handle_chroot_chroot(const struct dentry *dentry,
60819 + const struct vfsmount *mnt);
60820 +void gr_handle_chroot_chdir(struct path *path);
60821 +int gr_handle_chroot_chmod(const struct dentry *dentry,
60822 + const struct vfsmount *mnt, const int mode);
60823 +int gr_handle_chroot_mknod(const struct dentry *dentry,
60824 + const struct vfsmount *mnt, const int mode);
60825 +int gr_handle_chroot_mount(const struct dentry *dentry,
60826 + const struct vfsmount *mnt,
60827 + const char *dev_name);
60828 +int gr_handle_chroot_pivot(void);
60829 +int gr_handle_chroot_unix(const pid_t pid);
60830 +
60831 +int gr_handle_rawio(const struct inode *inode);
60832 +
60833 +void gr_handle_ioperm(void);
60834 +void gr_handle_iopl(void);
60835 +
60836 +umode_t gr_acl_umask(void);
60837 +
60838 +int gr_tpe_allow(const struct file *file);
60839 +
60840 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
60841 +void gr_clear_chroot_entries(struct task_struct *task);
60842 +
60843 +void gr_log_forkfail(const int retval);
60844 +void gr_log_timechange(void);
60845 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
60846 +void gr_log_chdir(const struct dentry *dentry,
60847 + const struct vfsmount *mnt);
60848 +void gr_log_chroot_exec(const struct dentry *dentry,
60849 + const struct vfsmount *mnt);
60850 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
60851 +void gr_log_remount(const char *devname, const int retval);
60852 +void gr_log_unmount(const char *devname, const int retval);
60853 +void gr_log_mount(const char *from, const char *to, const int retval);
60854 +void gr_log_textrel(struct vm_area_struct *vma);
60855 +void gr_log_rwxmmap(struct file *file);
60856 +void gr_log_rwxmprotect(struct file *file);
60857 +
60858 +int gr_handle_follow_link(const struct inode *parent,
60859 + const struct inode *inode,
60860 + const struct dentry *dentry,
60861 + const struct vfsmount *mnt);
60862 +int gr_handle_fifo(const struct dentry *dentry,
60863 + const struct vfsmount *mnt,
60864 + const struct dentry *dir, const int flag,
60865 + const int acc_mode);
60866 +int gr_handle_hardlink(const struct dentry *dentry,
60867 + const struct vfsmount *mnt,
60868 + struct inode *inode,
60869 + const int mode, const char *to);
60870 +
60871 +int gr_is_capable(const int cap);
60872 +int gr_is_capable_nolog(const int cap);
60873 +void gr_learn_resource(const struct task_struct *task, const int limit,
60874 + const unsigned long wanted, const int gt);
60875 +void gr_copy_label(struct task_struct *tsk);
60876 +void gr_handle_crash(struct task_struct *task, const int sig);
60877 +int gr_handle_signal(const struct task_struct *p, const int sig);
60878 +int gr_check_crash_uid(const uid_t uid);
60879 +int gr_check_protected_task(const struct task_struct *task);
60880 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
60881 +int gr_acl_handle_mmap(const struct file *file,
60882 + const unsigned long prot);
60883 +int gr_acl_handle_mprotect(const struct file *file,
60884 + const unsigned long prot);
60885 +int gr_check_hidden_task(const struct task_struct *tsk);
60886 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
60887 + const struct vfsmount *mnt);
60888 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
60889 + const struct vfsmount *mnt);
60890 +__u32 gr_acl_handle_access(const struct dentry *dentry,
60891 + const struct vfsmount *mnt, const int fmode);
60892 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
60893 + const struct vfsmount *mnt, umode_t *mode);
60894 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
60895 + const struct vfsmount *mnt);
60896 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
60897 + const struct vfsmount *mnt);
60898 +int gr_handle_ptrace(struct task_struct *task, const long request);
60899 +int gr_handle_proc_ptrace(struct task_struct *task);
60900 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
60901 + const struct vfsmount *mnt);
60902 +int gr_check_crash_exec(const struct file *filp);
60903 +int gr_acl_is_enabled(void);
60904 +void gr_set_kernel_label(struct task_struct *task);
60905 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
60906 + const gid_t gid);
60907 +int gr_set_proc_label(const struct dentry *dentry,
60908 + const struct vfsmount *mnt,
60909 + const int unsafe_flags);
60910 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
60911 + const struct vfsmount *mnt);
60912 +__u32 gr_acl_handle_open(const struct dentry *dentry,
60913 + const struct vfsmount *mnt, int acc_mode);
60914 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
60915 + const struct dentry *p_dentry,
60916 + const struct vfsmount *p_mnt,
60917 + int open_flags, int acc_mode, const int imode);
60918 +void gr_handle_create(const struct dentry *dentry,
60919 + const struct vfsmount *mnt);
60920 +void gr_handle_proc_create(const struct dentry *dentry,
60921 + const struct inode *inode);
60922 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
60923 + const struct dentry *parent_dentry,
60924 + const struct vfsmount *parent_mnt,
60925 + const int mode);
60926 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
60927 + const struct dentry *parent_dentry,
60928 + const struct vfsmount *parent_mnt);
60929 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
60930 + const struct vfsmount *mnt);
60931 +void gr_handle_delete(const ino_t ino, const dev_t dev);
60932 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
60933 + const struct vfsmount *mnt);
60934 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
60935 + const struct dentry *parent_dentry,
60936 + const struct vfsmount *parent_mnt,
60937 + const char *from);
60938 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
60939 + const struct dentry *parent_dentry,
60940 + const struct vfsmount *parent_mnt,
60941 + const struct dentry *old_dentry,
60942 + const struct vfsmount *old_mnt, const char *to);
60943 +int gr_acl_handle_rename(struct dentry *new_dentry,
60944 + struct dentry *parent_dentry,
60945 + const struct vfsmount *parent_mnt,
60946 + struct dentry *old_dentry,
60947 + struct inode *old_parent_inode,
60948 + struct vfsmount *old_mnt, const char *newname);
60949 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
60950 + struct dentry *old_dentry,
60951 + struct dentry *new_dentry,
60952 + struct vfsmount *mnt, const __u8 replace);
60953 +__u32 gr_check_link(const struct dentry *new_dentry,
60954 + const struct dentry *parent_dentry,
60955 + const struct vfsmount *parent_mnt,
60956 + const struct dentry *old_dentry,
60957 + const struct vfsmount *old_mnt);
60958 +int gr_acl_handle_filldir(const struct file *file, const char *name,
60959 + const unsigned int namelen, const ino_t ino);
60960 +
60961 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
60962 + const struct vfsmount *mnt);
60963 +void gr_acl_handle_exit(void);
60964 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
60965 +int gr_acl_handle_procpidmem(const struct task_struct *task);
60966 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
60967 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
60968 +void gr_audit_ptrace(struct task_struct *task);
60969 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
60970 +
60971 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
60972 +
60973 +#ifdef CONFIG_GRKERNSEC
60974 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
60975 +void gr_handle_vm86(void);
60976 +void gr_handle_mem_readwrite(u64 from, u64 to);
60977 +
60978 +void gr_log_badprocpid(const char *entry);
60979 +
60980 +extern int grsec_enable_dmesg;
60981 +extern int grsec_disable_privio;
60982 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
60983 +extern int grsec_enable_chroot_findtask;
60984 +#endif
60985 +#ifdef CONFIG_GRKERNSEC_SETXID
60986 +extern int grsec_enable_setxid;
60987 +#endif
60988 +#endif
60989 +
60990 +#endif
60991 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
60992 new file mode 100644
60993 index 0000000..e7ffaaf
60994 --- /dev/null
60995 +++ b/include/linux/grsock.h
60996 @@ -0,0 +1,19 @@
60997 +#ifndef __GRSOCK_H
60998 +#define __GRSOCK_H
60999 +
61000 +extern void gr_attach_curr_ip(const struct sock *sk);
61001 +extern int gr_handle_sock_all(const int family, const int type,
61002 + const int protocol);
61003 +extern int gr_handle_sock_server(const struct sockaddr *sck);
61004 +extern int gr_handle_sock_server_other(const struct sock *sck);
61005 +extern int gr_handle_sock_client(const struct sockaddr *sck);
61006 +extern int gr_search_connect(struct socket * sock,
61007 + struct sockaddr_in * addr);
61008 +extern int gr_search_bind(struct socket * sock,
61009 + struct sockaddr_in * addr);
61010 +extern int gr_search_listen(struct socket * sock);
61011 +extern int gr_search_accept(struct socket * sock);
61012 +extern int gr_search_socket(const int domain, const int type,
61013 + const int protocol);
61014 +
61015 +#endif
61016 diff --git a/include/linux/hid.h b/include/linux/hid.h
61017 index c235e4e..f0cf7a0 100644
61018 --- a/include/linux/hid.h
61019 +++ b/include/linux/hid.h
61020 @@ -679,7 +679,7 @@ struct hid_ll_driver {
61021 unsigned int code, int value);
61022
61023 int (*parse)(struct hid_device *hdev);
61024 -};
61025 +} __no_const;
61026
61027 #define PM_HINT_FULLON 1<<5
61028 #define PM_HINT_NORMAL 1<<1
61029 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
61030 index 3a93f73..b19d0b3 100644
61031 --- a/include/linux/highmem.h
61032 +++ b/include/linux/highmem.h
61033 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
61034 kunmap_atomic(kaddr, KM_USER0);
61035 }
61036
61037 +static inline void sanitize_highpage(struct page *page)
61038 +{
61039 + void *kaddr;
61040 + unsigned long flags;
61041 +
61042 + local_irq_save(flags);
61043 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
61044 + clear_page(kaddr);
61045 + kunmap_atomic(kaddr, KM_CLEARPAGE);
61046 + local_irq_restore(flags);
61047 +}
61048 +
61049 static inline void zero_user_segments(struct page *page,
61050 unsigned start1, unsigned end1,
61051 unsigned start2, unsigned end2)
61052 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
61053 index 07d103a..04ec65b 100644
61054 --- a/include/linux/i2c.h
61055 +++ b/include/linux/i2c.h
61056 @@ -364,6 +364,7 @@ struct i2c_algorithm {
61057 /* To determine what the adapter supports */
61058 u32 (*functionality) (struct i2c_adapter *);
61059 };
61060 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
61061
61062 /*
61063 * i2c_adapter is the structure used to identify a physical i2c bus along
61064 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
61065 index a6deef4..c56a7f2 100644
61066 --- a/include/linux/i2o.h
61067 +++ b/include/linux/i2o.h
61068 @@ -564,7 +564,7 @@ struct i2o_controller {
61069 struct i2o_device *exec; /* Executive */
61070 #if BITS_PER_LONG == 64
61071 spinlock_t context_list_lock; /* lock for context_list */
61072 - atomic_t context_list_counter; /* needed for unique contexts */
61073 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
61074 struct list_head context_list; /* list of context id's
61075 and pointers */
61076 #endif
61077 diff --git a/include/linux/init.h b/include/linux/init.h
61078 index 9146f39..885354d 100644
61079 --- a/include/linux/init.h
61080 +++ b/include/linux/init.h
61081 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cmdline);
61082
61083 /* Each module must use one module_init(). */
61084 #define module_init(initfn) \
61085 - static inline initcall_t __inittest(void) \
61086 + static inline __used initcall_t __inittest(void) \
61087 { return initfn; } \
61088 int init_module(void) __attribute__((alias(#initfn)));
61089
61090 /* This is only required if you want to be unloadable. */
61091 #define module_exit(exitfn) \
61092 - static inline exitcall_t __exittest(void) \
61093 + static inline __used exitcall_t __exittest(void) \
61094 { return exitfn; } \
61095 void cleanup_module(void) __attribute__((alias(#exitfn)));
61096
61097 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
61098 index 32574ee..00d4ef1 100644
61099 --- a/include/linux/init_task.h
61100 +++ b/include/linux/init_task.h
61101 @@ -128,6 +128,12 @@ extern struct cred init_cred;
61102
61103 #define INIT_TASK_COMM "swapper"
61104
61105 +#ifdef CONFIG_X86
61106 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
61107 +#else
61108 +#define INIT_TASK_THREAD_INFO
61109 +#endif
61110 +
61111 /*
61112 * INIT_TASK is used to set up the first task table, touch at
61113 * your own risk!. Base=0, limit=0x1fffff (=2MB)
61114 @@ -166,6 +172,7 @@ extern struct cred init_cred;
61115 RCU_INIT_POINTER(.cred, &init_cred), \
61116 .comm = INIT_TASK_COMM, \
61117 .thread = INIT_THREAD, \
61118 + INIT_TASK_THREAD_INFO \
61119 .fs = &init_fs, \
61120 .files = &init_files, \
61121 .signal = &init_signals, \
61122 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
61123 index e6ca56d..8583707 100644
61124 --- a/include/linux/intel-iommu.h
61125 +++ b/include/linux/intel-iommu.h
61126 @@ -296,7 +296,7 @@ struct iommu_flush {
61127 u8 fm, u64 type);
61128 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
61129 unsigned int size_order, u64 type);
61130 -};
61131 +} __no_const;
61132
61133 enum {
61134 SR_DMAR_FECTL_REG,
61135 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
61136 index a64b00e..464d8bc 100644
61137 --- a/include/linux/interrupt.h
61138 +++ b/include/linux/interrupt.h
61139 @@ -441,7 +441,7 @@ enum
61140 /* map softirq index to softirq name. update 'softirq_to_name' in
61141 * kernel/softirq.c when adding a new softirq.
61142 */
61143 -extern char *softirq_to_name[NR_SOFTIRQS];
61144 +extern const char * const softirq_to_name[NR_SOFTIRQS];
61145
61146 /* softirq mask and active fields moved to irq_cpustat_t in
61147 * asm/hardirq.h to get better cache usage. KAO
61148 @@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
61149
61150 struct softirq_action
61151 {
61152 - void (*action)(struct softirq_action *);
61153 + void (*action)(void);
61154 };
61155
61156 asmlinkage void do_softirq(void);
61157 asmlinkage void __do_softirq(void);
61158 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
61159 +extern void open_softirq(int nr, void (*action)(void));
61160 extern void softirq_init(void);
61161 static inline void __raise_softirq_irqoff(unsigned int nr)
61162 {
61163 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
61164 index 3875719..4cd454c 100644
61165 --- a/include/linux/kallsyms.h
61166 +++ b/include/linux/kallsyms.h
61167 @@ -15,7 +15,8 @@
61168
61169 struct module;
61170
61171 -#ifdef CONFIG_KALLSYMS
61172 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
61173 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
61174 /* Lookup the address for a symbol. Returns 0 if not found. */
61175 unsigned long kallsyms_lookup_name(const char *name);
61176
61177 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
61178 /* Stupid that this does nothing, but I didn't create this mess. */
61179 #define __print_symbol(fmt, addr)
61180 #endif /*CONFIG_KALLSYMS*/
61181 +#else /* when included by kallsyms.c, vsnprintf.c, or
61182 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
61183 +extern void __print_symbol(const char *fmt, unsigned long address);
61184 +extern int sprint_backtrace(char *buffer, unsigned long address);
61185 +extern int sprint_symbol(char *buffer, unsigned long address);
61186 +const char *kallsyms_lookup(unsigned long addr,
61187 + unsigned long *symbolsize,
61188 + unsigned long *offset,
61189 + char **modname, char *namebuf);
61190 +#endif
61191
61192 /* This macro allows us to keep printk typechecking */
61193 static __printf(1, 2)
61194 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
61195 index fa39183..40160be 100644
61196 --- a/include/linux/kgdb.h
61197 +++ b/include/linux/kgdb.h
61198 @@ -53,7 +53,7 @@ extern int kgdb_connected;
61199 extern int kgdb_io_module_registered;
61200
61201 extern atomic_t kgdb_setting_breakpoint;
61202 -extern atomic_t kgdb_cpu_doing_single_step;
61203 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
61204
61205 extern struct task_struct *kgdb_usethread;
61206 extern struct task_struct *kgdb_contthread;
61207 @@ -251,7 +251,7 @@ struct kgdb_arch {
61208 void (*disable_hw_break)(struct pt_regs *regs);
61209 void (*remove_all_hw_break)(void);
61210 void (*correct_hw_break)(void);
61211 -};
61212 +} __do_const;
61213
61214 /**
61215 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
61216 @@ -276,7 +276,7 @@ struct kgdb_io {
61217 void (*pre_exception) (void);
61218 void (*post_exception) (void);
61219 int is_console;
61220 -};
61221 +} __do_const;
61222
61223 extern struct kgdb_arch arch_kgdb_ops;
61224
61225 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
61226 index b16f653..eb908f4 100644
61227 --- a/include/linux/kmod.h
61228 +++ b/include/linux/kmod.h
61229 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
61230 * usually useless though. */
61231 extern __printf(2, 3)
61232 int __request_module(bool wait, const char *name, ...);
61233 +extern __printf(3, 4)
61234 +int ___request_module(bool wait, char *param_name, const char *name, ...);
61235 #define request_module(mod...) __request_module(true, mod)
61236 #define request_module_nowait(mod...) __request_module(false, mod)
61237 #define try_then_request_module(x, mod...) \
61238 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
61239 index d526231..086e89b 100644
61240 --- a/include/linux/kvm_host.h
61241 +++ b/include/linux/kvm_host.h
61242 @@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
61243 void vcpu_load(struct kvm_vcpu *vcpu);
61244 void vcpu_put(struct kvm_vcpu *vcpu);
61245
61246 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61247 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61248 struct module *module);
61249 void kvm_exit(void);
61250
61251 @@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
61252 struct kvm_guest_debug *dbg);
61253 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
61254
61255 -int kvm_arch_init(void *opaque);
61256 +int kvm_arch_init(const void *opaque);
61257 void kvm_arch_exit(void);
61258
61259 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
61260 diff --git a/include/linux/libata.h b/include/linux/libata.h
61261 index cafc09a..d7e7829 100644
61262 --- a/include/linux/libata.h
61263 +++ b/include/linux/libata.h
61264 @@ -909,7 +909,7 @@ struct ata_port_operations {
61265 * fields must be pointers.
61266 */
61267 const struct ata_port_operations *inherits;
61268 -};
61269 +} __do_const;
61270
61271 struct ata_port_info {
61272 unsigned long flags;
61273 diff --git a/include/linux/mca.h b/include/linux/mca.h
61274 index 3797270..7765ede 100644
61275 --- a/include/linux/mca.h
61276 +++ b/include/linux/mca.h
61277 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
61278 int region);
61279 void * (*mca_transform_memory)(struct mca_device *,
61280 void *memory);
61281 -};
61282 +} __no_const;
61283
61284 struct mca_bus {
61285 u64 default_dma_mask;
61286 diff --git a/include/linux/memory.h b/include/linux/memory.h
61287 index 935699b..11042cc 100644
61288 --- a/include/linux/memory.h
61289 +++ b/include/linux/memory.h
61290 @@ -144,7 +144,7 @@ struct memory_accessor {
61291 size_t count);
61292 ssize_t (*write)(struct memory_accessor *, const char *buf,
61293 off_t offset, size_t count);
61294 -};
61295 +} __no_const;
61296
61297 /*
61298 * Kernel text modification mutex, used for code patching. Users of this lock
61299 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
61300 index 9970337..9444122 100644
61301 --- a/include/linux/mfd/abx500.h
61302 +++ b/include/linux/mfd/abx500.h
61303 @@ -188,6 +188,7 @@ struct abx500_ops {
61304 int (*event_registers_startup_state_get) (struct device *, u8 *);
61305 int (*startup_irq_enabled) (struct device *, unsigned int);
61306 };
61307 +typedef struct abx500_ops __no_const abx500_ops_no_const;
61308
61309 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
61310 void abx500_remove_ops(struct device *dev);
61311 diff --git a/include/linux/mm.h b/include/linux/mm.h
61312 index 4baadd1..2e0b45e 100644
61313 --- a/include/linux/mm.h
61314 +++ b/include/linux/mm.h
61315 @@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
61316
61317 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
61318 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
61319 +
61320 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61321 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
61322 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
61323 +#else
61324 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
61325 +#endif
61326 +
61327 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
61328 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
61329
61330 @@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page);
61331 int set_page_dirty_lock(struct page *page);
61332 int clear_page_dirty_for_io(struct page *page);
61333
61334 -/* Is the vma a continuation of the stack vma above it? */
61335 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
61336 -{
61337 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
61338 -}
61339 -
61340 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
61341 - unsigned long addr)
61342 -{
61343 - return (vma->vm_flags & VM_GROWSDOWN) &&
61344 - (vma->vm_start == addr) &&
61345 - !vma_growsdown(vma->vm_prev, addr);
61346 -}
61347 -
61348 -/* Is the vma a continuation of the stack vma below it? */
61349 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
61350 -{
61351 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
61352 -}
61353 -
61354 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
61355 - unsigned long addr)
61356 -{
61357 - return (vma->vm_flags & VM_GROWSUP) &&
61358 - (vma->vm_end == addr) &&
61359 - !vma_growsup(vma->vm_next, addr);
61360 -}
61361 -
61362 extern unsigned long move_page_tables(struct vm_area_struct *vma,
61363 unsigned long old_addr, struct vm_area_struct *new_vma,
61364 unsigned long new_addr, unsigned long len);
61365 @@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
61366 }
61367 #endif
61368
61369 +#ifdef CONFIG_MMU
61370 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
61371 +#else
61372 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
61373 +{
61374 + return __pgprot(0);
61375 +}
61376 +#endif
61377 +
61378 int vma_wants_writenotify(struct vm_area_struct *vma);
61379
61380 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
61381 @@ -1419,6 +1407,7 @@ out:
61382 }
61383
61384 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
61385 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
61386
61387 extern unsigned long do_brk(unsigned long, unsigned long);
61388
61389 @@ -1476,6 +1465,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
61390 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
61391 struct vm_area_struct **pprev);
61392
61393 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
61394 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
61395 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
61396 +
61397 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
61398 NULL if none. Assume start_addr < end_addr. */
61399 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
61400 @@ -1492,15 +1485,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
61401 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
61402 }
61403
61404 -#ifdef CONFIG_MMU
61405 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
61406 -#else
61407 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
61408 -{
61409 - return __pgprot(0);
61410 -}
61411 -#endif
61412 -
61413 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
61414 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
61415 unsigned long pfn, unsigned long size, pgprot_t);
61416 @@ -1614,7 +1598,7 @@ extern int unpoison_memory(unsigned long pfn);
61417 extern int sysctl_memory_failure_early_kill;
61418 extern int sysctl_memory_failure_recovery;
61419 extern void shake_page(struct page *p, int access);
61420 -extern atomic_long_t mce_bad_pages;
61421 +extern atomic_long_unchecked_t mce_bad_pages;
61422 extern int soft_offline_page(struct page *page, int flags);
61423
61424 extern void dump_page(struct page *page);
61425 @@ -1628,5 +1612,11 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
61426 unsigned int pages_per_huge_page);
61427 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
61428
61429 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
61430 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
61431 +#else
61432 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
61433 +#endif
61434 +
61435 #endif /* __KERNEL__ */
61436 #endif /* _LINUX_MM_H */
61437 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
61438 index 5b42f1b..759e4b4 100644
61439 --- a/include/linux/mm_types.h
61440 +++ b/include/linux/mm_types.h
61441 @@ -253,6 +253,8 @@ struct vm_area_struct {
61442 #ifdef CONFIG_NUMA
61443 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
61444 #endif
61445 +
61446 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
61447 };
61448
61449 struct core_thread {
61450 @@ -389,6 +391,24 @@ struct mm_struct {
61451 #ifdef CONFIG_CPUMASK_OFFSTACK
61452 struct cpumask cpumask_allocation;
61453 #endif
61454 +
61455 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
61456 + unsigned long pax_flags;
61457 +#endif
61458 +
61459 +#ifdef CONFIG_PAX_DLRESOLVE
61460 + unsigned long call_dl_resolve;
61461 +#endif
61462 +
61463 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
61464 + unsigned long call_syscall;
61465 +#endif
61466 +
61467 +#ifdef CONFIG_PAX_ASLR
61468 + unsigned long delta_mmap; /* randomized offset */
61469 + unsigned long delta_stack; /* randomized offset */
61470 +#endif
61471 +
61472 };
61473
61474 static inline void mm_init_cpumask(struct mm_struct *mm)
61475 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
61476 index 1d1b1e1..2a13c78 100644
61477 --- a/include/linux/mmu_notifier.h
61478 +++ b/include/linux/mmu_notifier.h
61479 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
61480 */
61481 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
61482 ({ \
61483 - pte_t __pte; \
61484 + pte_t ___pte; \
61485 struct vm_area_struct *___vma = __vma; \
61486 unsigned long ___address = __address; \
61487 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
61488 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
61489 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
61490 - __pte; \
61491 + ___pte; \
61492 })
61493
61494 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
61495 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
61496 index 188cb2f..d78409b 100644
61497 --- a/include/linux/mmzone.h
61498 +++ b/include/linux/mmzone.h
61499 @@ -369,7 +369,7 @@ struct zone {
61500 unsigned long flags; /* zone flags, see below */
61501
61502 /* Zone statistics */
61503 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61504 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61505
61506 /*
61507 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
61508 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
61509 index 468819c..17b9db3 100644
61510 --- a/include/linux/mod_devicetable.h
61511 +++ b/include/linux/mod_devicetable.h
61512 @@ -12,7 +12,7 @@
61513 typedef unsigned long kernel_ulong_t;
61514 #endif
61515
61516 -#define PCI_ANY_ID (~0)
61517 +#define PCI_ANY_ID ((__u16)~0)
61518
61519 struct pci_device_id {
61520 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
61521 @@ -131,7 +131,7 @@ struct usb_device_id {
61522 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
61523 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
61524
61525 -#define HID_ANY_ID (~0)
61526 +#define HID_ANY_ID (~0U)
61527
61528 struct hid_device_id {
61529 __u16 bus;
61530 diff --git a/include/linux/module.h b/include/linux/module.h
61531 index 3cb7839..511cb87 100644
61532 --- a/include/linux/module.h
61533 +++ b/include/linux/module.h
61534 @@ -17,6 +17,7 @@
61535 #include <linux/moduleparam.h>
61536 #include <linux/tracepoint.h>
61537 #include <linux/export.h>
61538 +#include <linux/fs.h>
61539
61540 #include <linux/percpu.h>
61541 #include <asm/module.h>
61542 @@ -261,19 +262,16 @@ struct module
61543 int (*init)(void);
61544
61545 /* If this is non-NULL, vfree after init() returns */
61546 - void *module_init;
61547 + void *module_init_rx, *module_init_rw;
61548
61549 /* Here is the actual code + data, vfree'd on unload. */
61550 - void *module_core;
61551 + void *module_core_rx, *module_core_rw;
61552
61553 /* Here are the sizes of the init and core sections */
61554 - unsigned int init_size, core_size;
61555 + unsigned int init_size_rw, core_size_rw;
61556
61557 /* The size of the executable code in each section. */
61558 - unsigned int init_text_size, core_text_size;
61559 -
61560 - /* Size of RO sections of the module (text+rodata) */
61561 - unsigned int init_ro_size, core_ro_size;
61562 + unsigned int init_size_rx, core_size_rx;
61563
61564 /* Arch-specific module values */
61565 struct mod_arch_specific arch;
61566 @@ -329,6 +327,10 @@ struct module
61567 #ifdef CONFIG_EVENT_TRACING
61568 struct ftrace_event_call **trace_events;
61569 unsigned int num_trace_events;
61570 + struct file_operations trace_id;
61571 + struct file_operations trace_enable;
61572 + struct file_operations trace_format;
61573 + struct file_operations trace_filter;
61574 #endif
61575 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
61576 unsigned int num_ftrace_callsites;
61577 @@ -379,16 +381,46 @@ bool is_module_address(unsigned long addr);
61578 bool is_module_percpu_address(unsigned long addr);
61579 bool is_module_text_address(unsigned long addr);
61580
61581 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
61582 +{
61583 +
61584 +#ifdef CONFIG_PAX_KERNEXEC
61585 + if (ktla_ktva(addr) >= (unsigned long)start &&
61586 + ktla_ktva(addr) < (unsigned long)start + size)
61587 + return 1;
61588 +#endif
61589 +
61590 + return ((void *)addr >= start && (void *)addr < start + size);
61591 +}
61592 +
61593 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
61594 +{
61595 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
61596 +}
61597 +
61598 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
61599 +{
61600 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
61601 +}
61602 +
61603 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
61604 +{
61605 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
61606 +}
61607 +
61608 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
61609 +{
61610 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
61611 +}
61612 +
61613 static inline int within_module_core(unsigned long addr, struct module *mod)
61614 {
61615 - return (unsigned long)mod->module_core <= addr &&
61616 - addr < (unsigned long)mod->module_core + mod->core_size;
61617 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
61618 }
61619
61620 static inline int within_module_init(unsigned long addr, struct module *mod)
61621 {
61622 - return (unsigned long)mod->module_init <= addr &&
61623 - addr < (unsigned long)mod->module_init + mod->init_size;
61624 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
61625 }
61626
61627 /* Search for module by name: must hold module_mutex. */
61628 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
61629 index b2be02e..6a9fdb1 100644
61630 --- a/include/linux/moduleloader.h
61631 +++ b/include/linux/moduleloader.h
61632 @@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
61633 sections. Returns NULL on failure. */
61634 void *module_alloc(unsigned long size);
61635
61636 +#ifdef CONFIG_PAX_KERNEXEC
61637 +void *module_alloc_exec(unsigned long size);
61638 +#else
61639 +#define module_alloc_exec(x) module_alloc(x)
61640 +#endif
61641 +
61642 /* Free memory returned from module_alloc. */
61643 void module_free(struct module *mod, void *module_region);
61644
61645 +#ifdef CONFIG_PAX_KERNEXEC
61646 +void module_free_exec(struct module *mod, void *module_region);
61647 +#else
61648 +#define module_free_exec(x, y) module_free((x), (y))
61649 +#endif
61650 +
61651 /* Apply the given relocation to the (simplified) ELF. Return -error
61652 or 0. */
61653 int apply_relocate(Elf_Shdr *sechdrs,
61654 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
61655 index 7939f63..ec6df57 100644
61656 --- a/include/linux/moduleparam.h
61657 +++ b/include/linux/moduleparam.h
61658 @@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
61659 * @len is usually just sizeof(string).
61660 */
61661 #define module_param_string(name, string, len, perm) \
61662 - static const struct kparam_string __param_string_##name \
61663 + static const struct kparam_string __param_string_##name __used \
61664 = { len, string }; \
61665 __module_param_call(MODULE_PARAM_PREFIX, name, \
61666 &param_ops_string, \
61667 @@ -395,7 +395,7 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
61668 * module_param_named() for why this might be necessary.
61669 */
61670 #define module_param_array_named(name, array, type, nump, perm) \
61671 - static const struct kparam_array __param_arr_##name \
61672 + static const struct kparam_array __param_arr_##name __used \
61673 = { .max = ARRAY_SIZE(array), .num = nump, \
61674 .ops = &param_ops_##type, \
61675 .elemsize = sizeof(array[0]), .elem = array }; \
61676 diff --git a/include/linux/namei.h b/include/linux/namei.h
61677 index ffc0213..2c1f2cb 100644
61678 --- a/include/linux/namei.h
61679 +++ b/include/linux/namei.h
61680 @@ -24,7 +24,7 @@ struct nameidata {
61681 unsigned seq;
61682 int last_type;
61683 unsigned depth;
61684 - char *saved_names[MAX_NESTED_LINKS + 1];
61685 + const char *saved_names[MAX_NESTED_LINKS + 1];
61686
61687 /* Intent data */
61688 union {
61689 @@ -94,12 +94,12 @@ extern int follow_up(struct path *);
61690 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
61691 extern void unlock_rename(struct dentry *, struct dentry *);
61692
61693 -static inline void nd_set_link(struct nameidata *nd, char *path)
61694 +static inline void nd_set_link(struct nameidata *nd, const char *path)
61695 {
61696 nd->saved_names[nd->depth] = path;
61697 }
61698
61699 -static inline char *nd_get_link(struct nameidata *nd)
61700 +static inline const char *nd_get_link(const struct nameidata *nd)
61701 {
61702 return nd->saved_names[nd->depth];
61703 }
61704 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
61705 index a82ad4d..90d15b7 100644
61706 --- a/include/linux/netdevice.h
61707 +++ b/include/linux/netdevice.h
61708 @@ -949,6 +949,7 @@ struct net_device_ops {
61709 int (*ndo_set_features)(struct net_device *dev,
61710 u32 features);
61711 };
61712 +typedef struct net_device_ops __no_const net_device_ops_no_const;
61713
61714 /*
61715 * The DEVICE structure.
61716 @@ -1088,7 +1089,7 @@ struct net_device {
61717 int iflink;
61718
61719 struct net_device_stats stats;
61720 - atomic_long_t rx_dropped; /* dropped packets by core network
61721 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
61722 * Do not use this in drivers.
61723 */
61724
61725 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
61726 new file mode 100644
61727 index 0000000..33f4af8
61728 --- /dev/null
61729 +++ b/include/linux/netfilter/xt_gradm.h
61730 @@ -0,0 +1,9 @@
61731 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
61732 +#define _LINUX_NETFILTER_XT_GRADM_H 1
61733 +
61734 +struct xt_gradm_mtinfo {
61735 + __u16 flags;
61736 + __u16 invflags;
61737 +};
61738 +
61739 +#endif
61740 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
61741 index c65a18a..0c05f3a 100644
61742 --- a/include/linux/of_pdt.h
61743 +++ b/include/linux/of_pdt.h
61744 @@ -32,7 +32,7 @@ struct of_pdt_ops {
61745
61746 /* return 0 on success; fill in 'len' with number of bytes in path */
61747 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
61748 -};
61749 +} __no_const;
61750
61751 extern void *prom_early_alloc(unsigned long size);
61752
61753 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
61754 index a4c5624..79d6d88 100644
61755 --- a/include/linux/oprofile.h
61756 +++ b/include/linux/oprofile.h
61757 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
61758 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
61759 char const * name, ulong * val);
61760
61761 -/** Create a file for read-only access to an atomic_t. */
61762 +/** Create a file for read-only access to an atomic_unchecked_t. */
61763 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
61764 - char const * name, atomic_t * val);
61765 + char const * name, atomic_unchecked_t * val);
61766
61767 /** create a directory */
61768 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
61769 diff --git a/include/linux/padata.h b/include/linux/padata.h
61770 index 4633b2f..988bc08 100644
61771 --- a/include/linux/padata.h
61772 +++ b/include/linux/padata.h
61773 @@ -129,7 +129,7 @@ struct parallel_data {
61774 struct padata_instance *pinst;
61775 struct padata_parallel_queue __percpu *pqueue;
61776 struct padata_serial_queue __percpu *squeue;
61777 - atomic_t seq_nr;
61778 + atomic_unchecked_t seq_nr;
61779 atomic_t reorder_objects;
61780 atomic_t refcnt;
61781 unsigned int max_seq_nr;
61782 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
61783 index b1f8912..c955bff 100644
61784 --- a/include/linux/perf_event.h
61785 +++ b/include/linux/perf_event.h
61786 @@ -748,8 +748,8 @@ struct perf_event {
61787
61788 enum perf_event_active_state state;
61789 unsigned int attach_state;
61790 - local64_t count;
61791 - atomic64_t child_count;
61792 + local64_t count; /* PaX: fix it one day */
61793 + atomic64_unchecked_t child_count;
61794
61795 /*
61796 * These are the total time in nanoseconds that the event
61797 @@ -800,8 +800,8 @@ struct perf_event {
61798 * These accumulate total time (in nanoseconds) that children
61799 * events have been enabled and running, respectively.
61800 */
61801 - atomic64_t child_total_time_enabled;
61802 - atomic64_t child_total_time_running;
61803 + atomic64_unchecked_t child_total_time_enabled;
61804 + atomic64_unchecked_t child_total_time_running;
61805
61806 /*
61807 * Protect attach/detach and child_list:
61808 diff --git a/include/linux/personality.h b/include/linux/personality.h
61809 index 8fc7dd1a..c19d89e 100644
61810 --- a/include/linux/personality.h
61811 +++ b/include/linux/personality.h
61812 @@ -44,6 +44,7 @@ enum {
61813 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
61814 ADDR_NO_RANDOMIZE | \
61815 ADDR_COMPAT_LAYOUT | \
61816 + ADDR_LIMIT_3GB | \
61817 MMAP_PAGE_ZERO)
61818
61819 /*
61820 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
61821 index 77257c9..51d473a 100644
61822 --- a/include/linux/pipe_fs_i.h
61823 +++ b/include/linux/pipe_fs_i.h
61824 @@ -46,9 +46,9 @@ struct pipe_buffer {
61825 struct pipe_inode_info {
61826 wait_queue_head_t wait;
61827 unsigned int nrbufs, curbuf, buffers;
61828 - unsigned int readers;
61829 - unsigned int writers;
61830 - unsigned int waiting_writers;
61831 + atomic_t readers;
61832 + atomic_t writers;
61833 + atomic_t waiting_writers;
61834 unsigned int r_counter;
61835 unsigned int w_counter;
61836 struct page *tmp_page;
61837 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
61838 index d3085e7..fd01052 100644
61839 --- a/include/linux/pm_runtime.h
61840 +++ b/include/linux/pm_runtime.h
61841 @@ -95,7 +95,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
61842
61843 static inline void pm_runtime_mark_last_busy(struct device *dev)
61844 {
61845 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
61846 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
61847 }
61848
61849 #else /* !CONFIG_PM_RUNTIME */
61850 diff --git a/include/linux/poison.h b/include/linux/poison.h
61851 index 79159de..f1233a9 100644
61852 --- a/include/linux/poison.h
61853 +++ b/include/linux/poison.h
61854 @@ -19,8 +19,8 @@
61855 * under normal circumstances, used to verify that nobody uses
61856 * non-initialized list entries.
61857 */
61858 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
61859 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
61860 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
61861 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
61862
61863 /********** include/linux/timer.h **********/
61864 /*
61865 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
61866 index 58969b2..ead129b 100644
61867 --- a/include/linux/preempt.h
61868 +++ b/include/linux/preempt.h
61869 @@ -123,7 +123,7 @@ struct preempt_ops {
61870 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
61871 void (*sched_out)(struct preempt_notifier *notifier,
61872 struct task_struct *next);
61873 -};
61874 +} __no_const;
61875
61876 /**
61877 * preempt_notifier - key for installing preemption notifiers
61878 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
61879 index 643b96c..ef55a9c 100644
61880 --- a/include/linux/proc_fs.h
61881 +++ b/include/linux/proc_fs.h
61882 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
61883 return proc_create_data(name, mode, parent, proc_fops, NULL);
61884 }
61885
61886 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
61887 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
61888 +{
61889 +#ifdef CONFIG_GRKERNSEC_PROC_USER
61890 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
61891 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61892 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
61893 +#else
61894 + return proc_create_data(name, mode, parent, proc_fops, NULL);
61895 +#endif
61896 +}
61897 +
61898 +
61899 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
61900 mode_t mode, struct proc_dir_entry *base,
61901 read_proc_t *read_proc, void * data)
61902 @@ -258,7 +271,7 @@ union proc_op {
61903 int (*proc_show)(struct seq_file *m,
61904 struct pid_namespace *ns, struct pid *pid,
61905 struct task_struct *task);
61906 -};
61907 +} __no_const;
61908
61909 struct ctl_table_header;
61910 struct ctl_table;
61911 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
61912 index 800f113..e9ee2e3 100644
61913 --- a/include/linux/ptrace.h
61914 +++ b/include/linux/ptrace.h
61915 @@ -129,10 +129,12 @@ extern void __ptrace_unlink(struct task_struct *child);
61916 extern void exit_ptrace(struct task_struct *tracer);
61917 #define PTRACE_MODE_READ 1
61918 #define PTRACE_MODE_ATTACH 2
61919 -/* Returns 0 on success, -errno on denial. */
61920 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
61921 /* Returns true on success, false on denial. */
61922 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
61923 +/* Returns true on success, false on denial. */
61924 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
61925 +/* Returns true on success, false on denial. */
61926 +extern bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode);
61927
61928 static inline int ptrace_reparented(struct task_struct *child)
61929 {
61930 diff --git a/include/linux/random.h b/include/linux/random.h
61931 index 8f74538..02a1012 100644
61932 --- a/include/linux/random.h
61933 +++ b/include/linux/random.h
61934 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
61935
61936 u32 prandom32(struct rnd_state *);
61937
61938 +static inline unsigned long pax_get_random_long(void)
61939 +{
61940 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
61941 +}
61942 +
61943 /*
61944 * Handle minimum values for seeds
61945 */
61946 static inline u32 __seed(u32 x, u32 m)
61947 {
61948 - return (x < m) ? x + m : x;
61949 + return (x <= m) ? x + m + 1 : x;
61950 }
61951
61952 /**
61953 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
61954 index e0879a7..a12f962 100644
61955 --- a/include/linux/reboot.h
61956 +++ b/include/linux/reboot.h
61957 @@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
61958 * Architecture-specific implementations of sys_reboot commands.
61959 */
61960
61961 -extern void machine_restart(char *cmd);
61962 -extern void machine_halt(void);
61963 -extern void machine_power_off(void);
61964 +extern void machine_restart(char *cmd) __noreturn;
61965 +extern void machine_halt(void) __noreturn;
61966 +extern void machine_power_off(void) __noreturn;
61967
61968 extern void machine_shutdown(void);
61969 struct pt_regs;
61970 @@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
61971 */
61972
61973 extern void kernel_restart_prepare(char *cmd);
61974 -extern void kernel_restart(char *cmd);
61975 -extern void kernel_halt(void);
61976 -extern void kernel_power_off(void);
61977 +extern void kernel_restart(char *cmd) __noreturn;
61978 +extern void kernel_halt(void) __noreturn;
61979 +extern void kernel_power_off(void) __noreturn;
61980
61981 extern int C_A_D; /* for sysctl */
61982 void ctrl_alt_del(void);
61983 @@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
61984 * Emergency restart, callable from an interrupt handler.
61985 */
61986
61987 -extern void emergency_restart(void);
61988 +extern void emergency_restart(void) __noreturn;
61989 #include <asm/emergency-restart.h>
61990
61991 #endif
61992 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
61993 index 96d465f..b084e05 100644
61994 --- a/include/linux/reiserfs_fs.h
61995 +++ b/include/linux/reiserfs_fs.h
61996 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
61997 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
61998
61999 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
62000 -#define get_generation(s) atomic_read (&fs_generation(s))
62001 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
62002 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
62003 #define __fs_changed(gen,s) (gen != get_generation (s))
62004 #define fs_changed(gen,s) \
62005 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
62006 index 52c83b6..18ed7eb 100644
62007 --- a/include/linux/reiserfs_fs_sb.h
62008 +++ b/include/linux/reiserfs_fs_sb.h
62009 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
62010 /* Comment? -Hans */
62011 wait_queue_head_t s_wait;
62012 /* To be obsoleted soon by per buffer seals.. -Hans */
62013 - atomic_t s_generation_counter; // increased by one every time the
62014 + atomic_unchecked_t s_generation_counter; // increased by one every time the
62015 // tree gets re-balanced
62016 unsigned long s_properties; /* File system properties. Currently holds
62017 on-disk FS format */
62018 diff --git a/include/linux/relay.h b/include/linux/relay.h
62019 index 14a86bc..17d0700 100644
62020 --- a/include/linux/relay.h
62021 +++ b/include/linux/relay.h
62022 @@ -159,7 +159,7 @@ struct rchan_callbacks
62023 * The callback should return 0 if successful, negative if not.
62024 */
62025 int (*remove_buf_file)(struct dentry *dentry);
62026 -};
62027 +} __no_const;
62028
62029 /*
62030 * CONFIG_RELAY kernel API, kernel/relay.c
62031 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
62032 index c6c6084..5bf1212 100644
62033 --- a/include/linux/rfkill.h
62034 +++ b/include/linux/rfkill.h
62035 @@ -147,6 +147,7 @@ struct rfkill_ops {
62036 void (*query)(struct rfkill *rfkill, void *data);
62037 int (*set_block)(void *data, bool blocked);
62038 };
62039 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
62040
62041 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
62042 /**
62043 diff --git a/include/linux/rio.h b/include/linux/rio.h
62044 index 4d50611..c6858a2 100644
62045 --- a/include/linux/rio.h
62046 +++ b/include/linux/rio.h
62047 @@ -315,7 +315,7 @@ struct rio_ops {
62048 int mbox, void *buffer, size_t len);
62049 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
62050 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
62051 -};
62052 +} __no_const;
62053
62054 #define RIO_RESOURCE_MEM 0x00000100
62055 #define RIO_RESOURCE_DOORBELL 0x00000200
62056 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
62057 index 2148b12..519b820 100644
62058 --- a/include/linux/rmap.h
62059 +++ b/include/linux/rmap.h
62060 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
62061 void anon_vma_init(void); /* create anon_vma_cachep */
62062 int anon_vma_prepare(struct vm_area_struct *);
62063 void unlink_anon_vmas(struct vm_area_struct *);
62064 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
62065 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
62066 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
62067 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
62068 void __anon_vma_link(struct vm_area_struct *);
62069
62070 static inline void anon_vma_merge(struct vm_area_struct *vma,
62071 diff --git a/include/linux/sched.h b/include/linux/sched.h
62072 index 1c4f3e9..342eb1f 100644
62073 --- a/include/linux/sched.h
62074 +++ b/include/linux/sched.h
62075 @@ -101,6 +101,7 @@ struct bio_list;
62076 struct fs_struct;
62077 struct perf_event_context;
62078 struct blk_plug;
62079 +struct linux_binprm;
62080
62081 /*
62082 * List of flags we want to share for kernel threads,
62083 @@ -380,10 +381,13 @@ struct user_namespace;
62084 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
62085
62086 extern int sysctl_max_map_count;
62087 +extern unsigned long sysctl_heap_stack_gap;
62088
62089 #include <linux/aio.h>
62090
62091 #ifdef CONFIG_MMU
62092 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
62093 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
62094 extern void arch_pick_mmap_layout(struct mm_struct *mm);
62095 extern unsigned long
62096 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
62097 @@ -629,6 +633,17 @@ struct signal_struct {
62098 #ifdef CONFIG_TASKSTATS
62099 struct taskstats *stats;
62100 #endif
62101 +
62102 +#ifdef CONFIG_GRKERNSEC
62103 + u32 curr_ip;
62104 + u32 saved_ip;
62105 + u32 gr_saddr;
62106 + u32 gr_daddr;
62107 + u16 gr_sport;
62108 + u16 gr_dport;
62109 + u8 used_accept:1;
62110 +#endif
62111 +
62112 #ifdef CONFIG_AUDIT
62113 unsigned audit_tty;
62114 struct tty_audit_buf *tty_audit_buf;
62115 @@ -710,6 +725,11 @@ struct user_struct {
62116 struct key *session_keyring; /* UID's default session keyring */
62117 #endif
62118
62119 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
62120 + unsigned int banned;
62121 + unsigned long ban_expires;
62122 +#endif
62123 +
62124 /* Hash table maintenance information */
62125 struct hlist_node uidhash_node;
62126 uid_t uid;
62127 @@ -1337,8 +1357,8 @@ struct task_struct {
62128 struct list_head thread_group;
62129
62130 struct completion *vfork_done; /* for vfork() */
62131 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
62132 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
62133 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
62134 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
62135
62136 cputime_t utime, stime, utimescaled, stimescaled;
62137 cputime_t gtime;
62138 @@ -1354,13 +1374,6 @@ struct task_struct {
62139 struct task_cputime cputime_expires;
62140 struct list_head cpu_timers[3];
62141
62142 -/* process credentials */
62143 - const struct cred __rcu *real_cred; /* objective and real subjective task
62144 - * credentials (COW) */
62145 - const struct cred __rcu *cred; /* effective (overridable) subjective task
62146 - * credentials (COW) */
62147 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
62148 -
62149 char comm[TASK_COMM_LEN]; /* executable name excluding path
62150 - access with [gs]et_task_comm (which lock
62151 it with task_lock())
62152 @@ -1377,8 +1390,16 @@ struct task_struct {
62153 #endif
62154 /* CPU-specific state of this task */
62155 struct thread_struct thread;
62156 +/* thread_info moved to task_struct */
62157 +#ifdef CONFIG_X86
62158 + struct thread_info tinfo;
62159 +#endif
62160 /* filesystem information */
62161 struct fs_struct *fs;
62162 +
62163 + const struct cred __rcu *cred; /* effective (overridable) subjective task
62164 + * credentials (COW) */
62165 +
62166 /* open file information */
62167 struct files_struct *files;
62168 /* namespaces */
62169 @@ -1425,6 +1446,11 @@ struct task_struct {
62170 struct rt_mutex_waiter *pi_blocked_on;
62171 #endif
62172
62173 +/* process credentials */
62174 + const struct cred __rcu *real_cred; /* objective and real subjective task
62175 + * credentials (COW) */
62176 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
62177 +
62178 #ifdef CONFIG_DEBUG_MUTEXES
62179 /* mutex deadlock detection */
62180 struct mutex_waiter *blocked_on;
62181 @@ -1540,6 +1566,27 @@ struct task_struct {
62182 unsigned long default_timer_slack_ns;
62183
62184 struct list_head *scm_work_list;
62185 +
62186 +#ifdef CONFIG_GRKERNSEC
62187 + /* grsecurity */
62188 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62189 + u64 exec_id;
62190 +#endif
62191 +#ifdef CONFIG_GRKERNSEC_SETXID
62192 + const struct cred *delayed_cred;
62193 +#endif
62194 + struct dentry *gr_chroot_dentry;
62195 + struct acl_subject_label *acl;
62196 + struct acl_role_label *role;
62197 + struct file *exec_file;
62198 + u16 acl_role_id;
62199 + /* is this the task that authenticated to the special role */
62200 + u8 acl_sp_role;
62201 + u8 is_writable;
62202 + u8 brute;
62203 + u8 gr_is_chrooted;
62204 +#endif
62205 +
62206 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
62207 /* Index of current stored address in ret_stack */
62208 int curr_ret_stack;
62209 @@ -1574,6 +1621,51 @@ struct task_struct {
62210 #endif
62211 };
62212
62213 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
62214 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
62215 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
62216 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
62217 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
62218 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
62219 +
62220 +#ifdef CONFIG_PAX_SOFTMODE
62221 +extern int pax_softmode;
62222 +#endif
62223 +
62224 +extern int pax_check_flags(unsigned long *);
62225 +
62226 +/* if tsk != current then task_lock must be held on it */
62227 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
62228 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
62229 +{
62230 + if (likely(tsk->mm))
62231 + return tsk->mm->pax_flags;
62232 + else
62233 + return 0UL;
62234 +}
62235 +
62236 +/* if tsk != current then task_lock must be held on it */
62237 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
62238 +{
62239 + if (likely(tsk->mm)) {
62240 + tsk->mm->pax_flags = flags;
62241 + return 0;
62242 + }
62243 + return -EINVAL;
62244 +}
62245 +#endif
62246 +
62247 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62248 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
62249 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
62250 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
62251 +#endif
62252 +
62253 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
62254 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
62255 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
62256 +extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
62257 +
62258 /* Future-safe accessor for struct task_struct's cpus_allowed. */
62259 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
62260
62261 @@ -2081,7 +2173,9 @@ void yield(void);
62262 extern struct exec_domain default_exec_domain;
62263
62264 union thread_union {
62265 +#ifndef CONFIG_X86
62266 struct thread_info thread_info;
62267 +#endif
62268 unsigned long stack[THREAD_SIZE/sizeof(long)];
62269 };
62270
62271 @@ -2114,6 +2208,7 @@ extern struct pid_namespace init_pid_ns;
62272 */
62273
62274 extern struct task_struct *find_task_by_vpid(pid_t nr);
62275 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
62276 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
62277 struct pid_namespace *ns);
62278
62279 @@ -2235,6 +2330,12 @@ static inline void mmdrop(struct mm_struct * mm)
62280 extern void mmput(struct mm_struct *);
62281 /* Grab a reference to a task's mm, if it is not already going away */
62282 extern struct mm_struct *get_task_mm(struct task_struct *task);
62283 +/*
62284 + * Grab a reference to a task's mm, if it is not already going away
62285 + * and ptrace_may_access with the mode parameter passed to it
62286 + * succeeds.
62287 + */
62288 +extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
62289 /* Remove the current tasks stale references to the old mm_struct */
62290 extern void mm_release(struct task_struct *, struct mm_struct *);
62291 /* Allocate a new mm structure and copy contents from tsk->mm */
62292 @@ -2251,7 +2352,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
62293 extern void exit_itimers(struct signal_struct *);
62294 extern void flush_itimer_signals(void);
62295
62296 -extern NORET_TYPE void do_group_exit(int);
62297 +extern __noreturn void do_group_exit(int);
62298
62299 extern void daemonize(const char *, ...);
62300 extern int allow_signal(int);
62301 @@ -2416,13 +2517,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
62302
62303 #endif
62304
62305 -static inline int object_is_on_stack(void *obj)
62306 +static inline int object_starts_on_stack(void *obj)
62307 {
62308 - void *stack = task_stack_page(current);
62309 + const void *stack = task_stack_page(current);
62310
62311 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
62312 }
62313
62314 +#ifdef CONFIG_PAX_USERCOPY
62315 +extern int object_is_on_stack(const void *obj, unsigned long len);
62316 +#endif
62317 +
62318 extern void thread_info_cache_init(void);
62319
62320 #ifdef CONFIG_DEBUG_STACK_USAGE
62321 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
62322 index 899fbb4..1cb4138 100644
62323 --- a/include/linux/screen_info.h
62324 +++ b/include/linux/screen_info.h
62325 @@ -43,7 +43,8 @@ struct screen_info {
62326 __u16 pages; /* 0x32 */
62327 __u16 vesa_attributes; /* 0x34 */
62328 __u32 capabilities; /* 0x36 */
62329 - __u8 _reserved[6]; /* 0x3a */
62330 + __u16 vesapm_size; /* 0x3a */
62331 + __u8 _reserved[4]; /* 0x3c */
62332 } __attribute__((packed));
62333
62334 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
62335 diff --git a/include/linux/security.h b/include/linux/security.h
62336 index e8c619d..e0cbd1c 100644
62337 --- a/include/linux/security.h
62338 +++ b/include/linux/security.h
62339 @@ -37,6 +37,7 @@
62340 #include <linux/xfrm.h>
62341 #include <linux/slab.h>
62342 #include <linux/xattr.h>
62343 +#include <linux/grsecurity.h>
62344 #include <net/flow.h>
62345
62346 /* Maximum number of letters for an LSM name string */
62347 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
62348 index 0b69a46..b2ffa4c 100644
62349 --- a/include/linux/seq_file.h
62350 +++ b/include/linux/seq_file.h
62351 @@ -24,6 +24,9 @@ struct seq_file {
62352 struct mutex lock;
62353 const struct seq_operations *op;
62354 int poll_event;
62355 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62356 + u64 exec_id;
62357 +#endif
62358 void *private;
62359 };
62360
62361 @@ -33,6 +36,7 @@ struct seq_operations {
62362 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
62363 int (*show) (struct seq_file *m, void *v);
62364 };
62365 +typedef struct seq_operations __no_const seq_operations_no_const;
62366
62367 #define SEQ_SKIP 1
62368
62369 diff --git a/include/linux/shm.h b/include/linux/shm.h
62370 index 92808b8..c28cac4 100644
62371 --- a/include/linux/shm.h
62372 +++ b/include/linux/shm.h
62373 @@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
62374
62375 /* The task created the shm object. NULL if the task is dead. */
62376 struct task_struct *shm_creator;
62377 +#ifdef CONFIG_GRKERNSEC
62378 + time_t shm_createtime;
62379 + pid_t shm_lapid;
62380 +#endif
62381 };
62382
62383 /* shm_mode upper byte flags */
62384 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
62385 index fe86488..1563c1c 100644
62386 --- a/include/linux/skbuff.h
62387 +++ b/include/linux/skbuff.h
62388 @@ -642,7 +642,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
62389 */
62390 static inline int skb_queue_empty(const struct sk_buff_head *list)
62391 {
62392 - return list->next == (struct sk_buff *)list;
62393 + return list->next == (const struct sk_buff *)list;
62394 }
62395
62396 /**
62397 @@ -655,7 +655,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
62398 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
62399 const struct sk_buff *skb)
62400 {
62401 - return skb->next == (struct sk_buff *)list;
62402 + return skb->next == (const struct sk_buff *)list;
62403 }
62404
62405 /**
62406 @@ -668,7 +668,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
62407 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
62408 const struct sk_buff *skb)
62409 {
62410 - return skb->prev == (struct sk_buff *)list;
62411 + return skb->prev == (const struct sk_buff *)list;
62412 }
62413
62414 /**
62415 @@ -1523,7 +1523,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
62416 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
62417 */
62418 #ifndef NET_SKB_PAD
62419 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
62420 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
62421 #endif
62422
62423 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
62424 diff --git a/include/linux/slab.h b/include/linux/slab.h
62425 index 573c809..e84c132 100644
62426 --- a/include/linux/slab.h
62427 +++ b/include/linux/slab.h
62428 @@ -11,12 +11,20 @@
62429
62430 #include <linux/gfp.h>
62431 #include <linux/types.h>
62432 +#include <linux/err.h>
62433
62434 /*
62435 * Flags to pass to kmem_cache_create().
62436 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
62437 */
62438 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
62439 +
62440 +#ifdef CONFIG_PAX_USERCOPY
62441 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
62442 +#else
62443 +#define SLAB_USERCOPY 0x00000000UL
62444 +#endif
62445 +
62446 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
62447 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
62448 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
62449 @@ -87,10 +95,13 @@
62450 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
62451 * Both make kfree a no-op.
62452 */
62453 -#define ZERO_SIZE_PTR ((void *)16)
62454 +#define ZERO_SIZE_PTR \
62455 +({ \
62456 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
62457 + (void *)(-MAX_ERRNO-1L); \
62458 +})
62459
62460 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
62461 - (unsigned long)ZERO_SIZE_PTR)
62462 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
62463
62464 /*
62465 * struct kmem_cache related prototypes
62466 @@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
62467 void kfree(const void *);
62468 void kzfree(const void *);
62469 size_t ksize(const void *);
62470 +void check_object_size(const void *ptr, unsigned long n, bool to);
62471
62472 /*
62473 * Allocator specific definitions. These are mainly used to establish optimized
62474 @@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
62475
62476 void __init kmem_cache_init_late(void);
62477
62478 +#define kmalloc(x, y) \
62479 +({ \
62480 + void *___retval; \
62481 + intoverflow_t ___x = (intoverflow_t)x; \
62482 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
62483 + ___retval = NULL; \
62484 + else \
62485 + ___retval = kmalloc((size_t)___x, (y)); \
62486 + ___retval; \
62487 +})
62488 +
62489 +#define kmalloc_node(x, y, z) \
62490 +({ \
62491 + void *___retval; \
62492 + intoverflow_t ___x = (intoverflow_t)x; \
62493 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
62494 + ___retval = NULL; \
62495 + else \
62496 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
62497 + ___retval; \
62498 +})
62499 +
62500 +#define kzalloc(x, y) \
62501 +({ \
62502 + void *___retval; \
62503 + intoverflow_t ___x = (intoverflow_t)x; \
62504 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
62505 + ___retval = NULL; \
62506 + else \
62507 + ___retval = kzalloc((size_t)___x, (y)); \
62508 + ___retval; \
62509 +})
62510 +
62511 +#define __krealloc(x, y, z) \
62512 +({ \
62513 + void *___retval; \
62514 + intoverflow_t ___y = (intoverflow_t)y; \
62515 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
62516 + ___retval = NULL; \
62517 + else \
62518 + ___retval = __krealloc((x), (size_t)___y, (z)); \
62519 + ___retval; \
62520 +})
62521 +
62522 +#define krealloc(x, y, z) \
62523 +({ \
62524 + void *___retval; \
62525 + intoverflow_t ___y = (intoverflow_t)y; \
62526 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
62527 + ___retval = NULL; \
62528 + else \
62529 + ___retval = krealloc((x), (size_t)___y, (z)); \
62530 + ___retval; \
62531 +})
62532 +
62533 #endif /* _LINUX_SLAB_H */
62534 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
62535 index d00e0ba..1b3bf7b 100644
62536 --- a/include/linux/slab_def.h
62537 +++ b/include/linux/slab_def.h
62538 @@ -68,10 +68,10 @@ struct kmem_cache {
62539 unsigned long node_allocs;
62540 unsigned long node_frees;
62541 unsigned long node_overflow;
62542 - atomic_t allochit;
62543 - atomic_t allocmiss;
62544 - atomic_t freehit;
62545 - atomic_t freemiss;
62546 + atomic_unchecked_t allochit;
62547 + atomic_unchecked_t allocmiss;
62548 + atomic_unchecked_t freehit;
62549 + atomic_unchecked_t freemiss;
62550
62551 /*
62552 * If debugging is enabled, then the allocator can add additional
62553 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
62554 index a32bcfd..53b71f4 100644
62555 --- a/include/linux/slub_def.h
62556 +++ b/include/linux/slub_def.h
62557 @@ -89,7 +89,7 @@ struct kmem_cache {
62558 struct kmem_cache_order_objects max;
62559 struct kmem_cache_order_objects min;
62560 gfp_t allocflags; /* gfp flags to use on each alloc */
62561 - int refcount; /* Refcount for slab cache destroy */
62562 + atomic_t refcount; /* Refcount for slab cache destroy */
62563 void (*ctor)(void *);
62564 int inuse; /* Offset to metadata */
62565 int align; /* Alignment */
62566 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
62567 }
62568
62569 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
62570 -void *__kmalloc(size_t size, gfp_t flags);
62571 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
62572
62573 static __always_inline void *
62574 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
62575 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
62576 index de8832d..0147b46 100644
62577 --- a/include/linux/sonet.h
62578 +++ b/include/linux/sonet.h
62579 @@ -61,7 +61,7 @@ struct sonet_stats {
62580 #include <linux/atomic.h>
62581
62582 struct k_sonet_stats {
62583 -#define __HANDLE_ITEM(i) atomic_t i
62584 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
62585 __SONET_ITEMS
62586 #undef __HANDLE_ITEM
62587 };
62588 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
62589 index 3d8f9c4..69f1c0a 100644
62590 --- a/include/linux/sunrpc/clnt.h
62591 +++ b/include/linux/sunrpc/clnt.h
62592 @@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
62593 {
62594 switch (sap->sa_family) {
62595 case AF_INET:
62596 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
62597 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
62598 case AF_INET6:
62599 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
62600 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
62601 }
62602 return 0;
62603 }
62604 @@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
62605 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
62606 const struct sockaddr *src)
62607 {
62608 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
62609 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
62610 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
62611
62612 dsin->sin_family = ssin->sin_family;
62613 @@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
62614 if (sa->sa_family != AF_INET6)
62615 return 0;
62616
62617 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
62618 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
62619 }
62620
62621 #endif /* __KERNEL__ */
62622 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
62623 index e775689..9e206d9 100644
62624 --- a/include/linux/sunrpc/sched.h
62625 +++ b/include/linux/sunrpc/sched.h
62626 @@ -105,6 +105,7 @@ struct rpc_call_ops {
62627 void (*rpc_call_done)(struct rpc_task *, void *);
62628 void (*rpc_release)(void *);
62629 };
62630 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
62631
62632 struct rpc_task_setup {
62633 struct rpc_task *task;
62634 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
62635 index c14fe86..393245e 100644
62636 --- a/include/linux/sunrpc/svc_rdma.h
62637 +++ b/include/linux/sunrpc/svc_rdma.h
62638 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
62639 extern unsigned int svcrdma_max_requests;
62640 extern unsigned int svcrdma_max_req_size;
62641
62642 -extern atomic_t rdma_stat_recv;
62643 -extern atomic_t rdma_stat_read;
62644 -extern atomic_t rdma_stat_write;
62645 -extern atomic_t rdma_stat_sq_starve;
62646 -extern atomic_t rdma_stat_rq_starve;
62647 -extern atomic_t rdma_stat_rq_poll;
62648 -extern atomic_t rdma_stat_rq_prod;
62649 -extern atomic_t rdma_stat_sq_poll;
62650 -extern atomic_t rdma_stat_sq_prod;
62651 +extern atomic_unchecked_t rdma_stat_recv;
62652 +extern atomic_unchecked_t rdma_stat_read;
62653 +extern atomic_unchecked_t rdma_stat_write;
62654 +extern atomic_unchecked_t rdma_stat_sq_starve;
62655 +extern atomic_unchecked_t rdma_stat_rq_starve;
62656 +extern atomic_unchecked_t rdma_stat_rq_poll;
62657 +extern atomic_unchecked_t rdma_stat_rq_prod;
62658 +extern atomic_unchecked_t rdma_stat_sq_poll;
62659 +extern atomic_unchecked_t rdma_stat_sq_prod;
62660
62661 #define RPCRDMA_VERSION 1
62662
62663 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
62664 index 703cfa33..0b8ca72ac 100644
62665 --- a/include/linux/sysctl.h
62666 +++ b/include/linux/sysctl.h
62667 @@ -155,7 +155,11 @@ enum
62668 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
62669 };
62670
62671 -
62672 +#ifdef CONFIG_PAX_SOFTMODE
62673 +enum {
62674 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
62675 +};
62676 +#endif
62677
62678 /* CTL_VM names: */
62679 enum
62680 @@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
62681
62682 extern int proc_dostring(struct ctl_table *, int,
62683 void __user *, size_t *, loff_t *);
62684 +extern int proc_dostring_modpriv(struct ctl_table *, int,
62685 + void __user *, size_t *, loff_t *);
62686 extern int proc_dointvec(struct ctl_table *, int,
62687 void __user *, size_t *, loff_t *);
62688 extern int proc_dointvec_minmax(struct ctl_table *, int,
62689 diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
62690 index a71a292..51bd91d 100644
62691 --- a/include/linux/tracehook.h
62692 +++ b/include/linux/tracehook.h
62693 @@ -54,12 +54,12 @@ struct linux_binprm;
62694 /*
62695 * ptrace report for syscall entry and exit looks identical.
62696 */
62697 -static inline void ptrace_report_syscall(struct pt_regs *regs)
62698 +static inline int ptrace_report_syscall(struct pt_regs *regs)
62699 {
62700 int ptrace = current->ptrace;
62701
62702 if (!(ptrace & PT_PTRACED))
62703 - return;
62704 + return 0;
62705
62706 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
62707
62708 @@ -72,6 +72,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
62709 send_sig(current->exit_code, current, 1);
62710 current->exit_code = 0;
62711 }
62712 +
62713 + return fatal_signal_pending(current);
62714 }
62715
62716 /**
62717 @@ -96,8 +98,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
62718 static inline __must_check int tracehook_report_syscall_entry(
62719 struct pt_regs *regs)
62720 {
62721 - ptrace_report_syscall(regs);
62722 - return 0;
62723 + return ptrace_report_syscall(regs);
62724 }
62725
62726 /**
62727 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
62728 index ff7dc08..893e1bd 100644
62729 --- a/include/linux/tty_ldisc.h
62730 +++ b/include/linux/tty_ldisc.h
62731 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
62732
62733 struct module *owner;
62734
62735 - int refcount;
62736 + atomic_t refcount;
62737 };
62738
62739 struct tty_ldisc {
62740 diff --git a/include/linux/types.h b/include/linux/types.h
62741 index 57a9723..dbe234a 100644
62742 --- a/include/linux/types.h
62743 +++ b/include/linux/types.h
62744 @@ -213,10 +213,26 @@ typedef struct {
62745 int counter;
62746 } atomic_t;
62747
62748 +#ifdef CONFIG_PAX_REFCOUNT
62749 +typedef struct {
62750 + int counter;
62751 +} atomic_unchecked_t;
62752 +#else
62753 +typedef atomic_t atomic_unchecked_t;
62754 +#endif
62755 +
62756 #ifdef CONFIG_64BIT
62757 typedef struct {
62758 long counter;
62759 } atomic64_t;
62760 +
62761 +#ifdef CONFIG_PAX_REFCOUNT
62762 +typedef struct {
62763 + long counter;
62764 +} atomic64_unchecked_t;
62765 +#else
62766 +typedef atomic64_t atomic64_unchecked_t;
62767 +#endif
62768 #endif
62769
62770 struct list_head {
62771 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
62772 index 5ca0951..ab496a5 100644
62773 --- a/include/linux/uaccess.h
62774 +++ b/include/linux/uaccess.h
62775 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
62776 long ret; \
62777 mm_segment_t old_fs = get_fs(); \
62778 \
62779 - set_fs(KERNEL_DS); \
62780 pagefault_disable(); \
62781 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
62782 - pagefault_enable(); \
62783 + set_fs(KERNEL_DS); \
62784 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
62785 set_fs(old_fs); \
62786 + pagefault_enable(); \
62787 ret; \
62788 })
62789
62790 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
62791 index 99c1b4d..bb94261 100644
62792 --- a/include/linux/unaligned/access_ok.h
62793 +++ b/include/linux/unaligned/access_ok.h
62794 @@ -6,32 +6,32 @@
62795
62796 static inline u16 get_unaligned_le16(const void *p)
62797 {
62798 - return le16_to_cpup((__le16 *)p);
62799 + return le16_to_cpup((const __le16 *)p);
62800 }
62801
62802 static inline u32 get_unaligned_le32(const void *p)
62803 {
62804 - return le32_to_cpup((__le32 *)p);
62805 + return le32_to_cpup((const __le32 *)p);
62806 }
62807
62808 static inline u64 get_unaligned_le64(const void *p)
62809 {
62810 - return le64_to_cpup((__le64 *)p);
62811 + return le64_to_cpup((const __le64 *)p);
62812 }
62813
62814 static inline u16 get_unaligned_be16(const void *p)
62815 {
62816 - return be16_to_cpup((__be16 *)p);
62817 + return be16_to_cpup((const __be16 *)p);
62818 }
62819
62820 static inline u32 get_unaligned_be32(const void *p)
62821 {
62822 - return be32_to_cpup((__be32 *)p);
62823 + return be32_to_cpup((const __be32 *)p);
62824 }
62825
62826 static inline u64 get_unaligned_be64(const void *p)
62827 {
62828 - return be64_to_cpup((__be64 *)p);
62829 + return be64_to_cpup((const __be64 *)p);
62830 }
62831
62832 static inline void put_unaligned_le16(u16 val, void *p)
62833 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
62834 index e5a40c3..20ab0f6 100644
62835 --- a/include/linux/usb/renesas_usbhs.h
62836 +++ b/include/linux/usb/renesas_usbhs.h
62837 @@ -39,7 +39,7 @@ enum {
62838 */
62839 struct renesas_usbhs_driver_callback {
62840 int (*notify_hotplug)(struct platform_device *pdev);
62841 -};
62842 +} __no_const;
62843
62844 /*
62845 * callback functions for platform
62846 @@ -89,7 +89,7 @@ struct renesas_usbhs_platform_callback {
62847 * VBUS control is needed for Host
62848 */
62849 int (*set_vbus)(struct platform_device *pdev, int enable);
62850 -};
62851 +} __no_const;
62852
62853 /*
62854 * parameters for renesas usbhs
62855 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
62856 index 6f8fbcf..8259001 100644
62857 --- a/include/linux/vermagic.h
62858 +++ b/include/linux/vermagic.h
62859 @@ -25,9 +25,35 @@
62860 #define MODULE_ARCH_VERMAGIC ""
62861 #endif
62862
62863 +#ifdef CONFIG_PAX_REFCOUNT
62864 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
62865 +#else
62866 +#define MODULE_PAX_REFCOUNT ""
62867 +#endif
62868 +
62869 +#ifdef CONSTIFY_PLUGIN
62870 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
62871 +#else
62872 +#define MODULE_CONSTIFY_PLUGIN ""
62873 +#endif
62874 +
62875 +#ifdef STACKLEAK_PLUGIN
62876 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
62877 +#else
62878 +#define MODULE_STACKLEAK_PLUGIN ""
62879 +#endif
62880 +
62881 +#ifdef CONFIG_GRKERNSEC
62882 +#define MODULE_GRSEC "GRSEC "
62883 +#else
62884 +#define MODULE_GRSEC ""
62885 +#endif
62886 +
62887 #define VERMAGIC_STRING \
62888 UTS_RELEASE " " \
62889 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
62890 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
62891 - MODULE_ARCH_VERMAGIC
62892 + MODULE_ARCH_VERMAGIC \
62893 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
62894 + MODULE_GRSEC
62895
62896 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
62897 index 4bde182..aec92c1 100644
62898 --- a/include/linux/vmalloc.h
62899 +++ b/include/linux/vmalloc.h
62900 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
62901 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
62902 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
62903 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
62904 +
62905 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
62906 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
62907 +#endif
62908 +
62909 /* bits [20..32] reserved for arch specific ioremap internals */
62910
62911 /*
62912 @@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
62913 # endif
62914 #endif
62915
62916 +#define vmalloc(x) \
62917 +({ \
62918 + void *___retval; \
62919 + intoverflow_t ___x = (intoverflow_t)x; \
62920 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
62921 + ___retval = NULL; \
62922 + else \
62923 + ___retval = vmalloc((unsigned long)___x); \
62924 + ___retval; \
62925 +})
62926 +
62927 +#define vzalloc(x) \
62928 +({ \
62929 + void *___retval; \
62930 + intoverflow_t ___x = (intoverflow_t)x; \
62931 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
62932 + ___retval = NULL; \
62933 + else \
62934 + ___retval = vzalloc((unsigned long)___x); \
62935 + ___retval; \
62936 +})
62937 +
62938 +#define __vmalloc(x, y, z) \
62939 +({ \
62940 + void *___retval; \
62941 + intoverflow_t ___x = (intoverflow_t)x; \
62942 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
62943 + ___retval = NULL; \
62944 + else \
62945 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
62946 + ___retval; \
62947 +})
62948 +
62949 +#define vmalloc_user(x) \
62950 +({ \
62951 + void *___retval; \
62952 + intoverflow_t ___x = (intoverflow_t)x; \
62953 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
62954 + ___retval = NULL; \
62955 + else \
62956 + ___retval = vmalloc_user((unsigned long)___x); \
62957 + ___retval; \
62958 +})
62959 +
62960 +#define vmalloc_exec(x) \
62961 +({ \
62962 + void *___retval; \
62963 + intoverflow_t ___x = (intoverflow_t)x; \
62964 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
62965 + ___retval = NULL; \
62966 + else \
62967 + ___retval = vmalloc_exec((unsigned long)___x); \
62968 + ___retval; \
62969 +})
62970 +
62971 +#define vmalloc_node(x, y) \
62972 +({ \
62973 + void *___retval; \
62974 + intoverflow_t ___x = (intoverflow_t)x; \
62975 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
62976 + ___retval = NULL; \
62977 + else \
62978 + ___retval = vmalloc_node((unsigned long)___x, (y));\
62979 + ___retval; \
62980 +})
62981 +
62982 +#define vzalloc_node(x, y) \
62983 +({ \
62984 + void *___retval; \
62985 + intoverflow_t ___x = (intoverflow_t)x; \
62986 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
62987 + ___retval = NULL; \
62988 + else \
62989 + ___retval = vzalloc_node((unsigned long)___x, (y));\
62990 + ___retval; \
62991 +})
62992 +
62993 +#define vmalloc_32(x) \
62994 +({ \
62995 + void *___retval; \
62996 + intoverflow_t ___x = (intoverflow_t)x; \
62997 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
62998 + ___retval = NULL; \
62999 + else \
63000 + ___retval = vmalloc_32((unsigned long)___x); \
63001 + ___retval; \
63002 +})
63003 +
63004 +#define vmalloc_32_user(x) \
63005 +({ \
63006 +void *___retval; \
63007 + intoverflow_t ___x = (intoverflow_t)x; \
63008 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
63009 + ___retval = NULL; \
63010 + else \
63011 + ___retval = vmalloc_32_user((unsigned long)___x);\
63012 + ___retval; \
63013 +})
63014 +
63015 #endif /* _LINUX_VMALLOC_H */
63016 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
63017 index 65efb92..137adbb 100644
63018 --- a/include/linux/vmstat.h
63019 +++ b/include/linux/vmstat.h
63020 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
63021 /*
63022 * Zone based page accounting with per cpu differentials.
63023 */
63024 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63025 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63026
63027 static inline void zone_page_state_add(long x, struct zone *zone,
63028 enum zone_stat_item item)
63029 {
63030 - atomic_long_add(x, &zone->vm_stat[item]);
63031 - atomic_long_add(x, &vm_stat[item]);
63032 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
63033 + atomic_long_add_unchecked(x, &vm_stat[item]);
63034 }
63035
63036 static inline unsigned long global_page_state(enum zone_stat_item item)
63037 {
63038 - long x = atomic_long_read(&vm_stat[item]);
63039 + long x = atomic_long_read_unchecked(&vm_stat[item]);
63040 #ifdef CONFIG_SMP
63041 if (x < 0)
63042 x = 0;
63043 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
63044 static inline unsigned long zone_page_state(struct zone *zone,
63045 enum zone_stat_item item)
63046 {
63047 - long x = atomic_long_read(&zone->vm_stat[item]);
63048 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63049 #ifdef CONFIG_SMP
63050 if (x < 0)
63051 x = 0;
63052 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
63053 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
63054 enum zone_stat_item item)
63055 {
63056 - long x = atomic_long_read(&zone->vm_stat[item]);
63057 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63058
63059 #ifdef CONFIG_SMP
63060 int cpu;
63061 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
63062
63063 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
63064 {
63065 - atomic_long_inc(&zone->vm_stat[item]);
63066 - atomic_long_inc(&vm_stat[item]);
63067 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
63068 + atomic_long_inc_unchecked(&vm_stat[item]);
63069 }
63070
63071 static inline void __inc_zone_page_state(struct page *page,
63072 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
63073
63074 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
63075 {
63076 - atomic_long_dec(&zone->vm_stat[item]);
63077 - atomic_long_dec(&vm_stat[item]);
63078 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
63079 + atomic_long_dec_unchecked(&vm_stat[item]);
63080 }
63081
63082 static inline void __dec_zone_page_state(struct page *page,
63083 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
63084 index e5d1220..ef6e406 100644
63085 --- a/include/linux/xattr.h
63086 +++ b/include/linux/xattr.h
63087 @@ -57,6 +57,11 @@
63088 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
63089 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
63090
63091 +/* User namespace */
63092 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
63093 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
63094 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
63095 +
63096 #ifdef __KERNEL__
63097
63098 #include <linux/types.h>
63099 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
63100 index 4aeff96..b378cdc 100644
63101 --- a/include/media/saa7146_vv.h
63102 +++ b/include/media/saa7146_vv.h
63103 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
63104 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
63105
63106 /* the extension can override this */
63107 - struct v4l2_ioctl_ops ops;
63108 + v4l2_ioctl_ops_no_const ops;
63109 /* pointer to the saa7146 core ops */
63110 const struct v4l2_ioctl_ops *core_ops;
63111
63112 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
63113 index c7c40f1..4f01585 100644
63114 --- a/include/media/v4l2-dev.h
63115 +++ b/include/media/v4l2-dev.h
63116 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
63117
63118
63119 struct v4l2_file_operations {
63120 - struct module *owner;
63121 + struct module * const owner;
63122 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
63123 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
63124 unsigned int (*poll) (struct file *, struct poll_table_struct *);
63125 @@ -68,6 +68,7 @@ struct v4l2_file_operations {
63126 int (*open) (struct file *);
63127 int (*release) (struct file *);
63128 };
63129 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
63130
63131 /*
63132 * Newer version of video_device, handled by videodev2.c
63133 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
63134 index 4d1c74a..65e1221 100644
63135 --- a/include/media/v4l2-ioctl.h
63136 +++ b/include/media/v4l2-ioctl.h
63137 @@ -274,7 +274,7 @@ struct v4l2_ioctl_ops {
63138 long (*vidioc_default) (struct file *file, void *fh,
63139 bool valid_prio, int cmd, void *arg);
63140 };
63141 -
63142 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
63143
63144 /* v4l debugging and diagnostics */
63145
63146 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
63147 index 8d55251..dfe5b0a 100644
63148 --- a/include/net/caif/caif_hsi.h
63149 +++ b/include/net/caif/caif_hsi.h
63150 @@ -98,7 +98,7 @@ struct cfhsi_drv {
63151 void (*rx_done_cb) (struct cfhsi_drv *drv);
63152 void (*wake_up_cb) (struct cfhsi_drv *drv);
63153 void (*wake_down_cb) (struct cfhsi_drv *drv);
63154 -};
63155 +} __no_const;
63156
63157 /* Structure implemented by HSI device. */
63158 struct cfhsi_dev {
63159 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
63160 index 9e5425b..8136ffc 100644
63161 --- a/include/net/caif/cfctrl.h
63162 +++ b/include/net/caif/cfctrl.h
63163 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
63164 void (*radioset_rsp)(void);
63165 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
63166 struct cflayer *client_layer);
63167 -};
63168 +} __no_const;
63169
63170 /* Link Setup Parameters for CAIF-Links. */
63171 struct cfctrl_link_param {
63172 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
63173 struct cfctrl {
63174 struct cfsrvl serv;
63175 struct cfctrl_rsp res;
63176 - atomic_t req_seq_no;
63177 - atomic_t rsp_seq_no;
63178 + atomic_unchecked_t req_seq_no;
63179 + atomic_unchecked_t rsp_seq_no;
63180 struct list_head list;
63181 /* Protects from simultaneous access to first_req list */
63182 spinlock_t info_list_lock;
63183 diff --git a/include/net/flow.h b/include/net/flow.h
63184 index 2a7eefd..3250f3b 100644
63185 --- a/include/net/flow.h
63186 +++ b/include/net/flow.h
63187 @@ -218,6 +218,6 @@ extern struct flow_cache_object *flow_cache_lookup(
63188
63189 extern void flow_cache_flush(void);
63190 extern void flow_cache_flush_deferred(void);
63191 -extern atomic_t flow_cache_genid;
63192 +extern atomic_unchecked_t flow_cache_genid;
63193
63194 #endif
63195 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
63196 index e9ff3fc..9d3e5c7 100644
63197 --- a/include/net/inetpeer.h
63198 +++ b/include/net/inetpeer.h
63199 @@ -48,8 +48,8 @@ struct inet_peer {
63200 */
63201 union {
63202 struct {
63203 - atomic_t rid; /* Frag reception counter */
63204 - atomic_t ip_id_count; /* IP ID for the next packet */
63205 + atomic_unchecked_t rid; /* Frag reception counter */
63206 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
63207 __u32 tcp_ts;
63208 __u32 tcp_ts_stamp;
63209 };
63210 @@ -113,11 +113,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
63211 more++;
63212 inet_peer_refcheck(p);
63213 do {
63214 - old = atomic_read(&p->ip_id_count);
63215 + old = atomic_read_unchecked(&p->ip_id_count);
63216 new = old + more;
63217 if (!new)
63218 new = 1;
63219 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
63220 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
63221 return new;
63222 }
63223
63224 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
63225 index 10422ef..662570f 100644
63226 --- a/include/net/ip_fib.h
63227 +++ b/include/net/ip_fib.h
63228 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
63229
63230 #define FIB_RES_SADDR(net, res) \
63231 ((FIB_RES_NH(res).nh_saddr_genid == \
63232 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
63233 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
63234 FIB_RES_NH(res).nh_saddr : \
63235 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
63236 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
63237 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
63238 index e5a7b9a..f4fc44b 100644
63239 --- a/include/net/ip_vs.h
63240 +++ b/include/net/ip_vs.h
63241 @@ -509,7 +509,7 @@ struct ip_vs_conn {
63242 struct ip_vs_conn *control; /* Master control connection */
63243 atomic_t n_control; /* Number of controlled ones */
63244 struct ip_vs_dest *dest; /* real server */
63245 - atomic_t in_pkts; /* incoming packet counter */
63246 + atomic_unchecked_t in_pkts; /* incoming packet counter */
63247
63248 /* packet transmitter for different forwarding methods. If it
63249 mangles the packet, it must return NF_DROP or better NF_STOLEN,
63250 @@ -647,7 +647,7 @@ struct ip_vs_dest {
63251 __be16 port; /* port number of the server */
63252 union nf_inet_addr addr; /* IP address of the server */
63253 volatile unsigned flags; /* dest status flags */
63254 - atomic_t conn_flags; /* flags to copy to conn */
63255 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
63256 atomic_t weight; /* server weight */
63257
63258 atomic_t refcnt; /* reference counter */
63259 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
63260 index 69b610a..fe3962c 100644
63261 --- a/include/net/irda/ircomm_core.h
63262 +++ b/include/net/irda/ircomm_core.h
63263 @@ -51,7 +51,7 @@ typedef struct {
63264 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
63265 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
63266 struct ircomm_info *);
63267 -} call_t;
63268 +} __no_const call_t;
63269
63270 struct ircomm_cb {
63271 irda_queue_t queue;
63272 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
63273 index 59ba38bc..d515662 100644
63274 --- a/include/net/irda/ircomm_tty.h
63275 +++ b/include/net/irda/ircomm_tty.h
63276 @@ -35,6 +35,7 @@
63277 #include <linux/termios.h>
63278 #include <linux/timer.h>
63279 #include <linux/tty.h> /* struct tty_struct */
63280 +#include <asm/local.h>
63281
63282 #include <net/irda/irias_object.h>
63283 #include <net/irda/ircomm_core.h>
63284 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
63285 unsigned short close_delay;
63286 unsigned short closing_wait; /* time to wait before closing */
63287
63288 - int open_count;
63289 - int blocked_open; /* # of blocked opens */
63290 + local_t open_count;
63291 + local_t blocked_open; /* # of blocked opens */
63292
63293 /* Protect concurent access to :
63294 * o self->open_count
63295 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
63296 index f2419cf..473679f 100644
63297 --- a/include/net/iucv/af_iucv.h
63298 +++ b/include/net/iucv/af_iucv.h
63299 @@ -139,7 +139,7 @@ struct iucv_sock {
63300 struct iucv_sock_list {
63301 struct hlist_head head;
63302 rwlock_t lock;
63303 - atomic_t autobind_name;
63304 + atomic_unchecked_t autobind_name;
63305 };
63306
63307 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
63308 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
63309 index 2720884..3aa5c25 100644
63310 --- a/include/net/neighbour.h
63311 +++ b/include/net/neighbour.h
63312 @@ -122,7 +122,7 @@ struct neigh_ops {
63313 void (*error_report)(struct neighbour *, struct sk_buff *);
63314 int (*output)(struct neighbour *, struct sk_buff *);
63315 int (*connected_output)(struct neighbour *, struct sk_buff *);
63316 -};
63317 +} __do_const;
63318
63319 struct pneigh_entry {
63320 struct pneigh_entry *next;
63321 diff --git a/include/net/netlink.h b/include/net/netlink.h
63322 index cb1f350..3279d2c 100644
63323 --- a/include/net/netlink.h
63324 +++ b/include/net/netlink.h
63325 @@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
63326 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
63327 {
63328 if (mark)
63329 - skb_trim(skb, (unsigned char *) mark - skb->data);
63330 + skb_trim(skb, (const unsigned char *) mark - skb->data);
63331 }
63332
63333 /**
63334 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
63335 index d786b4f..4c3dd41 100644
63336 --- a/include/net/netns/ipv4.h
63337 +++ b/include/net/netns/ipv4.h
63338 @@ -56,8 +56,8 @@ struct netns_ipv4 {
63339
63340 unsigned int sysctl_ping_group_range[2];
63341
63342 - atomic_t rt_genid;
63343 - atomic_t dev_addr_genid;
63344 + atomic_unchecked_t rt_genid;
63345 + atomic_unchecked_t dev_addr_genid;
63346
63347 #ifdef CONFIG_IP_MROUTE
63348 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
63349 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
63350 index 6a72a58..e6a127d 100644
63351 --- a/include/net/sctp/sctp.h
63352 +++ b/include/net/sctp/sctp.h
63353 @@ -318,9 +318,9 @@ do { \
63354
63355 #else /* SCTP_DEBUG */
63356
63357 -#define SCTP_DEBUG_PRINTK(whatever...)
63358 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
63359 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
63360 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
63361 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
63362 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
63363 #define SCTP_ENABLE_DEBUG
63364 #define SCTP_DISABLE_DEBUG
63365 #define SCTP_ASSERT(expr, str, func)
63366 diff --git a/include/net/sock.h b/include/net/sock.h
63367 index 32e3937..87a1dbc 100644
63368 --- a/include/net/sock.h
63369 +++ b/include/net/sock.h
63370 @@ -277,7 +277,7 @@ struct sock {
63371 #ifdef CONFIG_RPS
63372 __u32 sk_rxhash;
63373 #endif
63374 - atomic_t sk_drops;
63375 + atomic_unchecked_t sk_drops;
63376 int sk_rcvbuf;
63377
63378 struct sk_filter __rcu *sk_filter;
63379 @@ -1402,7 +1402,7 @@ static inline void sk_nocaps_add(struct sock *sk, int flags)
63380 }
63381
63382 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
63383 - char __user *from, char *to,
63384 + char __user *from, unsigned char *to,
63385 int copy, int offset)
63386 {
63387 if (skb->ip_summed == CHECKSUM_NONE) {
63388 diff --git a/include/net/tcp.h b/include/net/tcp.h
63389 index bb18c4d..bb87972 100644
63390 --- a/include/net/tcp.h
63391 +++ b/include/net/tcp.h
63392 @@ -1409,7 +1409,7 @@ struct tcp_seq_afinfo {
63393 char *name;
63394 sa_family_t family;
63395 const struct file_operations *seq_fops;
63396 - struct seq_operations seq_ops;
63397 + seq_operations_no_const seq_ops;
63398 };
63399
63400 struct tcp_iter_state {
63401 diff --git a/include/net/udp.h b/include/net/udp.h
63402 index 3b285f4..0219639 100644
63403 --- a/include/net/udp.h
63404 +++ b/include/net/udp.h
63405 @@ -237,7 +237,7 @@ struct udp_seq_afinfo {
63406 sa_family_t family;
63407 struct udp_table *udp_table;
63408 const struct file_operations *seq_fops;
63409 - struct seq_operations seq_ops;
63410 + seq_operations_no_const seq_ops;
63411 };
63412
63413 struct udp_iter_state {
63414 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
63415 index b203e14..1df3991 100644
63416 --- a/include/net/xfrm.h
63417 +++ b/include/net/xfrm.h
63418 @@ -505,7 +505,7 @@ struct xfrm_policy {
63419 struct timer_list timer;
63420
63421 struct flow_cache_object flo;
63422 - atomic_t genid;
63423 + atomic_unchecked_t genid;
63424 u32 priority;
63425 u32 index;
63426 struct xfrm_mark mark;
63427 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
63428 index 1a046b1..ee0bef0 100644
63429 --- a/include/rdma/iw_cm.h
63430 +++ b/include/rdma/iw_cm.h
63431 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
63432 int backlog);
63433
63434 int (*destroy_listen)(struct iw_cm_id *cm_id);
63435 -};
63436 +} __no_const;
63437
63438 /**
63439 * iw_create_cm_id - Create an IW CM identifier.
63440 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
63441 index 5d1a758..1dbf795 100644
63442 --- a/include/scsi/libfc.h
63443 +++ b/include/scsi/libfc.h
63444 @@ -748,6 +748,7 @@ struct libfc_function_template {
63445 */
63446 void (*disc_stop_final) (struct fc_lport *);
63447 };
63448 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
63449
63450 /**
63451 * struct fc_disc - Discovery context
63452 @@ -851,7 +852,7 @@ struct fc_lport {
63453 struct fc_vport *vport;
63454
63455 /* Operational Information */
63456 - struct libfc_function_template tt;
63457 + libfc_function_template_no_const tt;
63458 u8 link_up;
63459 u8 qfull;
63460 enum fc_lport_state state;
63461 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
63462 index 5591ed5..13eb457 100644
63463 --- a/include/scsi/scsi_device.h
63464 +++ b/include/scsi/scsi_device.h
63465 @@ -161,9 +161,9 @@ struct scsi_device {
63466 unsigned int max_device_blocked; /* what device_blocked counts down from */
63467 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
63468
63469 - atomic_t iorequest_cnt;
63470 - atomic_t iodone_cnt;
63471 - atomic_t ioerr_cnt;
63472 + atomic_unchecked_t iorequest_cnt;
63473 + atomic_unchecked_t iodone_cnt;
63474 + atomic_unchecked_t ioerr_cnt;
63475
63476 struct device sdev_gendev,
63477 sdev_dev;
63478 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
63479 index 2a65167..91e01f8 100644
63480 --- a/include/scsi/scsi_transport_fc.h
63481 +++ b/include/scsi/scsi_transport_fc.h
63482 @@ -711,7 +711,7 @@ struct fc_function_template {
63483 unsigned long show_host_system_hostname:1;
63484
63485 unsigned long disable_target_scan:1;
63486 -};
63487 +} __do_const;
63488
63489
63490 /**
63491 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
63492 index 030b87c..98a6954 100644
63493 --- a/include/sound/ak4xxx-adda.h
63494 +++ b/include/sound/ak4xxx-adda.h
63495 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
63496 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
63497 unsigned char val);
63498 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
63499 -};
63500 +} __no_const;
63501
63502 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
63503
63504 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
63505 index 8c05e47..2b5df97 100644
63506 --- a/include/sound/hwdep.h
63507 +++ b/include/sound/hwdep.h
63508 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
63509 struct snd_hwdep_dsp_status *status);
63510 int (*dsp_load)(struct snd_hwdep *hw,
63511 struct snd_hwdep_dsp_image *image);
63512 -};
63513 +} __no_const;
63514
63515 struct snd_hwdep {
63516 struct snd_card *card;
63517 diff --git a/include/sound/info.h b/include/sound/info.h
63518 index 5492cc4..1a65278 100644
63519 --- a/include/sound/info.h
63520 +++ b/include/sound/info.h
63521 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
63522 struct snd_info_buffer *buffer);
63523 void (*write)(struct snd_info_entry *entry,
63524 struct snd_info_buffer *buffer);
63525 -};
63526 +} __no_const;
63527
63528 struct snd_info_entry_ops {
63529 int (*open)(struct snd_info_entry *entry,
63530 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
63531 index 0cf91b2..b70cae4 100644
63532 --- a/include/sound/pcm.h
63533 +++ b/include/sound/pcm.h
63534 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
63535 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
63536 int (*ack)(struct snd_pcm_substream *substream);
63537 };
63538 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
63539
63540 /*
63541 *
63542 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
63543 index af1b49e..a5d55a5 100644
63544 --- a/include/sound/sb16_csp.h
63545 +++ b/include/sound/sb16_csp.h
63546 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
63547 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
63548 int (*csp_stop) (struct snd_sb_csp * p);
63549 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
63550 -};
63551 +} __no_const;
63552
63553 /*
63554 * CSP private data
63555 diff --git a/include/sound/soc.h b/include/sound/soc.h
63556 index 11cfb59..e3f93f4 100644
63557 --- a/include/sound/soc.h
63558 +++ b/include/sound/soc.h
63559 @@ -683,7 +683,7 @@ struct snd_soc_platform_driver {
63560 /* platform IO - used for platform DAPM */
63561 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
63562 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
63563 -};
63564 +} __do_const;
63565
63566 struct snd_soc_platform {
63567 const char *name;
63568 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
63569 index 444cd6b..3327cc5 100644
63570 --- a/include/sound/ymfpci.h
63571 +++ b/include/sound/ymfpci.h
63572 @@ -358,7 +358,7 @@ struct snd_ymfpci {
63573 spinlock_t reg_lock;
63574 spinlock_t voice_lock;
63575 wait_queue_head_t interrupt_sleep;
63576 - atomic_t interrupt_sleep_count;
63577 + atomic_unchecked_t interrupt_sleep_count;
63578 struct snd_info_entry *proc_entry;
63579 const struct firmware *dsp_microcode;
63580 const struct firmware *controller_microcode;
63581 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
63582 index 94bbec3..3a8c6b0 100644
63583 --- a/include/target/target_core_base.h
63584 +++ b/include/target/target_core_base.h
63585 @@ -346,7 +346,7 @@ struct t10_reservation_ops {
63586 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
63587 int (*t10_pr_register)(struct se_cmd *);
63588 int (*t10_pr_clear)(struct se_cmd *);
63589 -};
63590 +} __no_const;
63591
63592 struct t10_reservation {
63593 /* Reservation effects all target ports */
63594 @@ -465,8 +465,8 @@ struct se_cmd {
63595 atomic_t t_se_count;
63596 atomic_t t_task_cdbs_left;
63597 atomic_t t_task_cdbs_ex_left;
63598 - atomic_t t_task_cdbs_sent;
63599 - atomic_t t_transport_aborted;
63600 + atomic_unchecked_t t_task_cdbs_sent;
63601 + atomic_unchecked_t t_transport_aborted;
63602 atomic_t t_transport_active;
63603 atomic_t t_transport_complete;
63604 atomic_t t_transport_queue_active;
63605 @@ -705,7 +705,7 @@ struct se_device {
63606 /* Active commands on this virtual SE device */
63607 atomic_t simple_cmds;
63608 atomic_t depth_left;
63609 - atomic_t dev_ordered_id;
63610 + atomic_unchecked_t dev_ordered_id;
63611 atomic_t execute_tasks;
63612 atomic_t dev_ordered_sync;
63613 atomic_t dev_qf_count;
63614 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
63615 index 1c09820..7f5ec79 100644
63616 --- a/include/trace/events/irq.h
63617 +++ b/include/trace/events/irq.h
63618 @@ -36,7 +36,7 @@ struct softirq_action;
63619 */
63620 TRACE_EVENT(irq_handler_entry,
63621
63622 - TP_PROTO(int irq, struct irqaction *action),
63623 + TP_PROTO(int irq, const struct irqaction *action),
63624
63625 TP_ARGS(irq, action),
63626
63627 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
63628 */
63629 TRACE_EVENT(irq_handler_exit,
63630
63631 - TP_PROTO(int irq, struct irqaction *action, int ret),
63632 + TP_PROTO(int irq, const struct irqaction *action, int ret),
63633
63634 TP_ARGS(irq, action, ret),
63635
63636 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
63637 index c41f308..6918de3 100644
63638 --- a/include/video/udlfb.h
63639 +++ b/include/video/udlfb.h
63640 @@ -52,10 +52,10 @@ struct dlfb_data {
63641 u32 pseudo_palette[256];
63642 int blank_mode; /*one of FB_BLANK_ */
63643 /* blit-only rendering path metrics, exposed through sysfs */
63644 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
63645 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
63646 - atomic_t bytes_sent; /* to usb, after compression including overhead */
63647 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
63648 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
63649 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
63650 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
63651 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
63652 };
63653
63654 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
63655 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
63656 index 0993a22..32ba2fe 100644
63657 --- a/include/video/uvesafb.h
63658 +++ b/include/video/uvesafb.h
63659 @@ -177,6 +177,7 @@ struct uvesafb_par {
63660 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
63661 u8 pmi_setpal; /* PMI for palette changes */
63662 u16 *pmi_base; /* protected mode interface location */
63663 + u8 *pmi_code; /* protected mode code location */
63664 void *pmi_start;
63665 void *pmi_pal;
63666 u8 *vbe_state_orig; /*
63667 diff --git a/init/Kconfig b/init/Kconfig
63668 index 43298f9..2f56c12 100644
63669 --- a/init/Kconfig
63670 +++ b/init/Kconfig
63671 @@ -1214,7 +1214,7 @@ config SLUB_DEBUG
63672
63673 config COMPAT_BRK
63674 bool "Disable heap randomization"
63675 - default y
63676 + default n
63677 help
63678 Randomizing heap placement makes heap exploits harder, but it
63679 also breaks ancient binaries (including anything libc5 based).
63680 diff --git a/init/do_mounts.c b/init/do_mounts.c
63681 index db6e5ee..7677ff7 100644
63682 --- a/init/do_mounts.c
63683 +++ b/init/do_mounts.c
63684 @@ -325,11 +325,11 @@ static void __init get_fs_names(char *page)
63685
63686 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
63687 {
63688 - int err = sys_mount(name, "/root", fs, flags, data);
63689 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
63690 if (err)
63691 return err;
63692
63693 - sys_chdir((const char __user __force *)"/root");
63694 + sys_chdir((const char __force_user*)"/root");
63695 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
63696 printk(KERN_INFO
63697 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
63698 @@ -448,18 +448,18 @@ void __init change_floppy(char *fmt, ...)
63699 va_start(args, fmt);
63700 vsprintf(buf, fmt, args);
63701 va_end(args);
63702 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
63703 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
63704 if (fd >= 0) {
63705 sys_ioctl(fd, FDEJECT, 0);
63706 sys_close(fd);
63707 }
63708 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
63709 - fd = sys_open("/dev/console", O_RDWR, 0);
63710 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
63711 if (fd >= 0) {
63712 sys_ioctl(fd, TCGETS, (long)&termios);
63713 termios.c_lflag &= ~ICANON;
63714 sys_ioctl(fd, TCSETSF, (long)&termios);
63715 - sys_read(fd, &c, 1);
63716 + sys_read(fd, (char __user *)&c, 1);
63717 termios.c_lflag |= ICANON;
63718 sys_ioctl(fd, TCSETSF, (long)&termios);
63719 sys_close(fd);
63720 @@ -553,6 +553,6 @@ void __init prepare_namespace(void)
63721 mount_root();
63722 out:
63723 devtmpfs_mount("dev");
63724 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
63725 - sys_chroot((const char __user __force *)".");
63726 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
63727 + sys_chroot((const char __force_user *)".");
63728 }
63729 diff --git a/init/do_mounts.h b/init/do_mounts.h
63730 index f5b978a..69dbfe8 100644
63731 --- a/init/do_mounts.h
63732 +++ b/init/do_mounts.h
63733 @@ -15,15 +15,15 @@ extern int root_mountflags;
63734
63735 static inline int create_dev(char *name, dev_t dev)
63736 {
63737 - sys_unlink(name);
63738 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
63739 + sys_unlink((char __force_user *)name);
63740 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
63741 }
63742
63743 #if BITS_PER_LONG == 32
63744 static inline u32 bstat(char *name)
63745 {
63746 struct stat64 stat;
63747 - if (sys_stat64(name, &stat) != 0)
63748 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
63749 return 0;
63750 if (!S_ISBLK(stat.st_mode))
63751 return 0;
63752 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
63753 static inline u32 bstat(char *name)
63754 {
63755 struct stat stat;
63756 - if (sys_newstat(name, &stat) != 0)
63757 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
63758 return 0;
63759 if (!S_ISBLK(stat.st_mode))
63760 return 0;
63761 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
63762 index 3098a38..253064e 100644
63763 --- a/init/do_mounts_initrd.c
63764 +++ b/init/do_mounts_initrd.c
63765 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
63766 create_dev("/dev/root.old", Root_RAM0);
63767 /* mount initrd on rootfs' /root */
63768 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
63769 - sys_mkdir("/old", 0700);
63770 - root_fd = sys_open("/", 0, 0);
63771 - old_fd = sys_open("/old", 0, 0);
63772 + sys_mkdir((const char __force_user *)"/old", 0700);
63773 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
63774 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
63775 /* move initrd over / and chdir/chroot in initrd root */
63776 - sys_chdir("/root");
63777 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
63778 - sys_chroot(".");
63779 + sys_chdir((const char __force_user *)"/root");
63780 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
63781 + sys_chroot((const char __force_user *)".");
63782
63783 /*
63784 * In case that a resume from disk is carried out by linuxrc or one of
63785 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
63786
63787 /* move initrd to rootfs' /old */
63788 sys_fchdir(old_fd);
63789 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
63790 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
63791 /* switch root and cwd back to / of rootfs */
63792 sys_fchdir(root_fd);
63793 - sys_chroot(".");
63794 + sys_chroot((const char __force_user *)".");
63795 sys_close(old_fd);
63796 sys_close(root_fd);
63797
63798 if (new_decode_dev(real_root_dev) == Root_RAM0) {
63799 - sys_chdir("/old");
63800 + sys_chdir((const char __force_user *)"/old");
63801 return;
63802 }
63803
63804 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
63805 mount_root();
63806
63807 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
63808 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
63809 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
63810 if (!error)
63811 printk("okay\n");
63812 else {
63813 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
63814 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
63815 if (error == -ENOENT)
63816 printk("/initrd does not exist. Ignored.\n");
63817 else
63818 printk("failed\n");
63819 printk(KERN_NOTICE "Unmounting old root\n");
63820 - sys_umount("/old", MNT_DETACH);
63821 + sys_umount((char __force_user *)"/old", MNT_DETACH);
63822 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
63823 if (fd < 0) {
63824 error = fd;
63825 @@ -116,11 +116,11 @@ int __init initrd_load(void)
63826 * mounted in the normal path.
63827 */
63828 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
63829 - sys_unlink("/initrd.image");
63830 + sys_unlink((const char __force_user *)"/initrd.image");
63831 handle_initrd();
63832 return 1;
63833 }
63834 }
63835 - sys_unlink("/initrd.image");
63836 + sys_unlink((const char __force_user *)"/initrd.image");
63837 return 0;
63838 }
63839 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
63840 index 32c4799..c27ee74 100644
63841 --- a/init/do_mounts_md.c
63842 +++ b/init/do_mounts_md.c
63843 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
63844 partitioned ? "_d" : "", minor,
63845 md_setup_args[ent].device_names);
63846
63847 - fd = sys_open(name, 0, 0);
63848 + fd = sys_open((char __force_user *)name, 0, 0);
63849 if (fd < 0) {
63850 printk(KERN_ERR "md: open failed - cannot start "
63851 "array %s\n", name);
63852 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
63853 * array without it
63854 */
63855 sys_close(fd);
63856 - fd = sys_open(name, 0, 0);
63857 + fd = sys_open((char __force_user *)name, 0, 0);
63858 sys_ioctl(fd, BLKRRPART, 0);
63859 }
63860 sys_close(fd);
63861 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
63862
63863 wait_for_device_probe();
63864
63865 - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
63866 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
63867 if (fd >= 0) {
63868 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
63869 sys_close(fd);
63870 diff --git a/init/initramfs.c b/init/initramfs.c
63871 index 2531811..040d4d4 100644
63872 --- a/init/initramfs.c
63873 +++ b/init/initramfs.c
63874 @@ -74,7 +74,7 @@ static void __init free_hash(void)
63875 }
63876 }
63877
63878 -static long __init do_utime(char __user *filename, time_t mtime)
63879 +static long __init do_utime(__force char __user *filename, time_t mtime)
63880 {
63881 struct timespec t[2];
63882
63883 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
63884 struct dir_entry *de, *tmp;
63885 list_for_each_entry_safe(de, tmp, &dir_list, list) {
63886 list_del(&de->list);
63887 - do_utime(de->name, de->mtime);
63888 + do_utime((char __force_user *)de->name, de->mtime);
63889 kfree(de->name);
63890 kfree(de);
63891 }
63892 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
63893 if (nlink >= 2) {
63894 char *old = find_link(major, minor, ino, mode, collected);
63895 if (old)
63896 - return (sys_link(old, collected) < 0) ? -1 : 1;
63897 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
63898 }
63899 return 0;
63900 }
63901 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
63902 {
63903 struct stat st;
63904
63905 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
63906 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
63907 if (S_ISDIR(st.st_mode))
63908 - sys_rmdir(path);
63909 + sys_rmdir((char __force_user *)path);
63910 else
63911 - sys_unlink(path);
63912 + sys_unlink((char __force_user *)path);
63913 }
63914 }
63915
63916 @@ -305,7 +305,7 @@ static int __init do_name(void)
63917 int openflags = O_WRONLY|O_CREAT;
63918 if (ml != 1)
63919 openflags |= O_TRUNC;
63920 - wfd = sys_open(collected, openflags, mode);
63921 + wfd = sys_open((char __force_user *)collected, openflags, mode);
63922
63923 if (wfd >= 0) {
63924 sys_fchown(wfd, uid, gid);
63925 @@ -317,17 +317,17 @@ static int __init do_name(void)
63926 }
63927 }
63928 } else if (S_ISDIR(mode)) {
63929 - sys_mkdir(collected, mode);
63930 - sys_chown(collected, uid, gid);
63931 - sys_chmod(collected, mode);
63932 + sys_mkdir((char __force_user *)collected, mode);
63933 + sys_chown((char __force_user *)collected, uid, gid);
63934 + sys_chmod((char __force_user *)collected, mode);
63935 dir_add(collected, mtime);
63936 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
63937 S_ISFIFO(mode) || S_ISSOCK(mode)) {
63938 if (maybe_link() == 0) {
63939 - sys_mknod(collected, mode, rdev);
63940 - sys_chown(collected, uid, gid);
63941 - sys_chmod(collected, mode);
63942 - do_utime(collected, mtime);
63943 + sys_mknod((char __force_user *)collected, mode, rdev);
63944 + sys_chown((char __force_user *)collected, uid, gid);
63945 + sys_chmod((char __force_user *)collected, mode);
63946 + do_utime((char __force_user *)collected, mtime);
63947 }
63948 }
63949 return 0;
63950 @@ -336,15 +336,15 @@ static int __init do_name(void)
63951 static int __init do_copy(void)
63952 {
63953 if (count >= body_len) {
63954 - sys_write(wfd, victim, body_len);
63955 + sys_write(wfd, (char __force_user *)victim, body_len);
63956 sys_close(wfd);
63957 - do_utime(vcollected, mtime);
63958 + do_utime((char __force_user *)vcollected, mtime);
63959 kfree(vcollected);
63960 eat(body_len);
63961 state = SkipIt;
63962 return 0;
63963 } else {
63964 - sys_write(wfd, victim, count);
63965 + sys_write(wfd, (char __force_user *)victim, count);
63966 body_len -= count;
63967 eat(count);
63968 return 1;
63969 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
63970 {
63971 collected[N_ALIGN(name_len) + body_len] = '\0';
63972 clean_path(collected, 0);
63973 - sys_symlink(collected + N_ALIGN(name_len), collected);
63974 - sys_lchown(collected, uid, gid);
63975 - do_utime(collected, mtime);
63976 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
63977 + sys_lchown((char __force_user *)collected, uid, gid);
63978 + do_utime((char __force_user *)collected, mtime);
63979 state = SkipIt;
63980 next_state = Reset;
63981 return 0;
63982 diff --git a/init/main.c b/init/main.c
63983 index 217ed23..ec5406f 100644
63984 --- a/init/main.c
63985 +++ b/init/main.c
63986 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
63987 extern void tc_init(void);
63988 #endif
63989
63990 +extern void grsecurity_init(void);
63991 +
63992 /*
63993 * Debug helper: via this flag we know that we are in 'early bootup code'
63994 * where only the boot processor is running with IRQ disabled. This means
63995 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
63996
63997 __setup("reset_devices", set_reset_devices);
63998
63999 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
64000 +extern char pax_enter_kernel_user[];
64001 +extern char pax_exit_kernel_user[];
64002 +extern pgdval_t clone_pgd_mask;
64003 +#endif
64004 +
64005 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
64006 +static int __init setup_pax_nouderef(char *str)
64007 +{
64008 +#ifdef CONFIG_X86_32
64009 + unsigned int cpu;
64010 + struct desc_struct *gdt;
64011 +
64012 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
64013 + gdt = get_cpu_gdt_table(cpu);
64014 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
64015 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
64016 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
64017 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
64018 + }
64019 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
64020 +#else
64021 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
64022 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
64023 + clone_pgd_mask = ~(pgdval_t)0UL;
64024 +#endif
64025 +
64026 + return 0;
64027 +}
64028 +early_param("pax_nouderef", setup_pax_nouderef);
64029 +#endif
64030 +
64031 +#ifdef CONFIG_PAX_SOFTMODE
64032 +int pax_softmode;
64033 +
64034 +static int __init setup_pax_softmode(char *str)
64035 +{
64036 + get_option(&str, &pax_softmode);
64037 + return 1;
64038 +}
64039 +__setup("pax_softmode=", setup_pax_softmode);
64040 +#endif
64041 +
64042 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
64043 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
64044 static const char *panic_later, *panic_param;
64045 @@ -681,6 +726,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
64046 {
64047 int count = preempt_count();
64048 int ret;
64049 + const char *msg1 = "", *msg2 = "";
64050
64051 if (initcall_debug)
64052 ret = do_one_initcall_debug(fn);
64053 @@ -693,15 +739,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
64054 sprintf(msgbuf, "error code %d ", ret);
64055
64056 if (preempt_count() != count) {
64057 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
64058 + msg1 = " preemption imbalance";
64059 preempt_count() = count;
64060 }
64061 if (irqs_disabled()) {
64062 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
64063 + msg2 = " disabled interrupts";
64064 local_irq_enable();
64065 }
64066 - if (msgbuf[0]) {
64067 - printk("initcall %pF returned with %s\n", fn, msgbuf);
64068 + if (msgbuf[0] || *msg1 || *msg2) {
64069 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
64070 }
64071
64072 return ret;
64073 @@ -820,7 +866,7 @@ static int __init kernel_init(void * unused)
64074 do_basic_setup();
64075
64076 /* Open the /dev/console on the rootfs, this should never fail */
64077 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
64078 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
64079 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
64080
64081 (void) sys_dup(0);
64082 @@ -833,11 +879,13 @@ static int __init kernel_init(void * unused)
64083 if (!ramdisk_execute_command)
64084 ramdisk_execute_command = "/init";
64085
64086 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
64087 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
64088 ramdisk_execute_command = NULL;
64089 prepare_namespace();
64090 }
64091
64092 + grsecurity_init();
64093 +
64094 /*
64095 * Ok, we have completed the initial bootup, and
64096 * we're essentially up and running. Get rid of the
64097 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
64098 index 5b4293d..f179875 100644
64099 --- a/ipc/mqueue.c
64100 +++ b/ipc/mqueue.c
64101 @@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
64102 mq_bytes = (mq_msg_tblsz +
64103 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
64104
64105 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
64106 spin_lock(&mq_lock);
64107 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
64108 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
64109 diff --git a/ipc/msg.c b/ipc/msg.c
64110 index 7385de2..a8180e08 100644
64111 --- a/ipc/msg.c
64112 +++ b/ipc/msg.c
64113 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
64114 return security_msg_queue_associate(msq, msgflg);
64115 }
64116
64117 +static struct ipc_ops msg_ops = {
64118 + .getnew = newque,
64119 + .associate = msg_security,
64120 + .more_checks = NULL
64121 +};
64122 +
64123 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
64124 {
64125 struct ipc_namespace *ns;
64126 - struct ipc_ops msg_ops;
64127 struct ipc_params msg_params;
64128
64129 ns = current->nsproxy->ipc_ns;
64130
64131 - msg_ops.getnew = newque;
64132 - msg_ops.associate = msg_security;
64133 - msg_ops.more_checks = NULL;
64134 -
64135 msg_params.key = key;
64136 msg_params.flg = msgflg;
64137
64138 diff --git a/ipc/sem.c b/ipc/sem.c
64139 index 5215a81..cfc0cac 100644
64140 --- a/ipc/sem.c
64141 +++ b/ipc/sem.c
64142 @@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
64143 return 0;
64144 }
64145
64146 +static struct ipc_ops sem_ops = {
64147 + .getnew = newary,
64148 + .associate = sem_security,
64149 + .more_checks = sem_more_checks
64150 +};
64151 +
64152 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
64153 {
64154 struct ipc_namespace *ns;
64155 - struct ipc_ops sem_ops;
64156 struct ipc_params sem_params;
64157
64158 ns = current->nsproxy->ipc_ns;
64159 @@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
64160 if (nsems < 0 || nsems > ns->sc_semmsl)
64161 return -EINVAL;
64162
64163 - sem_ops.getnew = newary;
64164 - sem_ops.associate = sem_security;
64165 - sem_ops.more_checks = sem_more_checks;
64166 -
64167 sem_params.key = key;
64168 sem_params.flg = semflg;
64169 sem_params.u.nsems = nsems;
64170 diff --git a/ipc/shm.c b/ipc/shm.c
64171 index b76be5b..859e750 100644
64172 --- a/ipc/shm.c
64173 +++ b/ipc/shm.c
64174 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
64175 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
64176 #endif
64177
64178 +#ifdef CONFIG_GRKERNSEC
64179 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64180 + const time_t shm_createtime, const uid_t cuid,
64181 + const int shmid);
64182 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64183 + const time_t shm_createtime);
64184 +#endif
64185 +
64186 void shm_init_ns(struct ipc_namespace *ns)
64187 {
64188 ns->shm_ctlmax = SHMMAX;
64189 @@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
64190 shp->shm_lprid = 0;
64191 shp->shm_atim = shp->shm_dtim = 0;
64192 shp->shm_ctim = get_seconds();
64193 +#ifdef CONFIG_GRKERNSEC
64194 + {
64195 + struct timespec timeval;
64196 + do_posix_clock_monotonic_gettime(&timeval);
64197 +
64198 + shp->shm_createtime = timeval.tv_sec;
64199 + }
64200 +#endif
64201 shp->shm_segsz = size;
64202 shp->shm_nattch = 0;
64203 shp->shm_file = file;
64204 @@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
64205 return 0;
64206 }
64207
64208 +static struct ipc_ops shm_ops = {
64209 + .getnew = newseg,
64210 + .associate = shm_security,
64211 + .more_checks = shm_more_checks
64212 +};
64213 +
64214 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
64215 {
64216 struct ipc_namespace *ns;
64217 - struct ipc_ops shm_ops;
64218 struct ipc_params shm_params;
64219
64220 ns = current->nsproxy->ipc_ns;
64221
64222 - shm_ops.getnew = newseg;
64223 - shm_ops.associate = shm_security;
64224 - shm_ops.more_checks = shm_more_checks;
64225 -
64226 shm_params.key = key;
64227 shm_params.flg = shmflg;
64228 shm_params.u.size = size;
64229 @@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
64230 f_mode = FMODE_READ | FMODE_WRITE;
64231 }
64232 if (shmflg & SHM_EXEC) {
64233 +
64234 +#ifdef CONFIG_PAX_MPROTECT
64235 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
64236 + goto out;
64237 +#endif
64238 +
64239 prot |= PROT_EXEC;
64240 acc_mode |= S_IXUGO;
64241 }
64242 @@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
64243 if (err)
64244 goto out_unlock;
64245
64246 +#ifdef CONFIG_GRKERNSEC
64247 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
64248 + shp->shm_perm.cuid, shmid) ||
64249 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
64250 + err = -EACCES;
64251 + goto out_unlock;
64252 + }
64253 +#endif
64254 +
64255 path = shp->shm_file->f_path;
64256 path_get(&path);
64257 shp->shm_nattch++;
64258 +#ifdef CONFIG_GRKERNSEC
64259 + shp->shm_lapid = current->pid;
64260 +#endif
64261 size = i_size_read(path.dentry->d_inode);
64262 shm_unlock(shp);
64263
64264 diff --git a/kernel/acct.c b/kernel/acct.c
64265 index fa7eb3d..7faf116 100644
64266 --- a/kernel/acct.c
64267 +++ b/kernel/acct.c
64268 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
64269 */
64270 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
64271 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
64272 - file->f_op->write(file, (char *)&ac,
64273 + file->f_op->write(file, (char __force_user *)&ac,
64274 sizeof(acct_t), &file->f_pos);
64275 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
64276 set_fs(fs);
64277 diff --git a/kernel/audit.c b/kernel/audit.c
64278 index 09fae26..ed71d5b 100644
64279 --- a/kernel/audit.c
64280 +++ b/kernel/audit.c
64281 @@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
64282 3) suppressed due to audit_rate_limit
64283 4) suppressed due to audit_backlog_limit
64284 */
64285 -static atomic_t audit_lost = ATOMIC_INIT(0);
64286 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
64287
64288 /* The netlink socket. */
64289 static struct sock *audit_sock;
64290 @@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
64291 unsigned long now;
64292 int print;
64293
64294 - atomic_inc(&audit_lost);
64295 + atomic_inc_unchecked(&audit_lost);
64296
64297 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
64298
64299 @@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
64300 printk(KERN_WARNING
64301 "audit: audit_lost=%d audit_rate_limit=%d "
64302 "audit_backlog_limit=%d\n",
64303 - atomic_read(&audit_lost),
64304 + atomic_read_unchecked(&audit_lost),
64305 audit_rate_limit,
64306 audit_backlog_limit);
64307 audit_panic(message);
64308 @@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
64309 status_set.pid = audit_pid;
64310 status_set.rate_limit = audit_rate_limit;
64311 status_set.backlog_limit = audit_backlog_limit;
64312 - status_set.lost = atomic_read(&audit_lost);
64313 + status_set.lost = atomic_read_unchecked(&audit_lost);
64314 status_set.backlog = skb_queue_len(&audit_skb_queue);
64315 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
64316 &status_set, sizeof(status_set));
64317 @@ -1260,12 +1260,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
64318 avail = audit_expand(ab,
64319 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
64320 if (!avail)
64321 - goto out;
64322 + goto out_va_end;
64323 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
64324 }
64325 - va_end(args2);
64326 if (len > 0)
64327 skb_put(skb, len);
64328 +out_va_end:
64329 + va_end(args2);
64330 out:
64331 return;
64332 }
64333 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
64334 index 47b7fc1..c003c33 100644
64335 --- a/kernel/auditsc.c
64336 +++ b/kernel/auditsc.c
64337 @@ -1166,8 +1166,8 @@ static void audit_log_execve_info(struct audit_context *context,
64338 struct audit_buffer **ab,
64339 struct audit_aux_data_execve *axi)
64340 {
64341 - int i;
64342 - size_t len, len_sent = 0;
64343 + int i, len;
64344 + size_t len_sent = 0;
64345 const char __user *p;
64346 char *buf;
64347
64348 @@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
64349 }
64350
64351 /* global counter which is incremented every time something logs in */
64352 -static atomic_t session_id = ATOMIC_INIT(0);
64353 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
64354
64355 /**
64356 * audit_set_loginuid - set a task's audit_context loginuid
64357 @@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
64358 */
64359 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
64360 {
64361 - unsigned int sessionid = atomic_inc_return(&session_id);
64362 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
64363 struct audit_context *context = task->audit_context;
64364
64365 if (context && context->in_syscall) {
64366 diff --git a/kernel/capability.c b/kernel/capability.c
64367 index b463871..fa3ea1f 100644
64368 --- a/kernel/capability.c
64369 +++ b/kernel/capability.c
64370 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
64371 * before modification is attempted and the application
64372 * fails.
64373 */
64374 + if (tocopy > ARRAY_SIZE(kdata))
64375 + return -EFAULT;
64376 +
64377 if (copy_to_user(dataptr, kdata, tocopy
64378 * sizeof(struct __user_cap_data_struct))) {
64379 return -EFAULT;
64380 @@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
64381 BUG();
64382 }
64383
64384 - if (security_capable(ns, current_cred(), cap) == 0) {
64385 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
64386 current->flags |= PF_SUPERPRIV;
64387 return true;
64388 }
64389 @@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *ns, int cap)
64390 }
64391 EXPORT_SYMBOL(ns_capable);
64392
64393 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
64394 +{
64395 + if (unlikely(!cap_valid(cap))) {
64396 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
64397 + BUG();
64398 + }
64399 +
64400 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
64401 + current->flags |= PF_SUPERPRIV;
64402 + return true;
64403 + }
64404 + return false;
64405 +}
64406 +EXPORT_SYMBOL(ns_capable_nolog);
64407 +
64408 +bool capable_nolog(int cap)
64409 +{
64410 + return ns_capable_nolog(&init_user_ns, cap);
64411 +}
64412 +EXPORT_SYMBOL(capable_nolog);
64413 +
64414 /**
64415 * task_ns_capable - Determine whether current task has a superior
64416 * capability targeted at a specific task's user namespace.
64417 @@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct *t, int cap)
64418 }
64419 EXPORT_SYMBOL(task_ns_capable);
64420
64421 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
64422 +{
64423 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
64424 +}
64425 +EXPORT_SYMBOL(task_ns_capable_nolog);
64426 +
64427 /**
64428 * nsown_capable - Check superior capability to one's own user_ns
64429 * @cap: The capability in question
64430 diff --git a/kernel/compat.c b/kernel/compat.c
64431 index f346ced..aa2b1f4 100644
64432 --- a/kernel/compat.c
64433 +++ b/kernel/compat.c
64434 @@ -13,6 +13,7 @@
64435
64436 #include <linux/linkage.h>
64437 #include <linux/compat.h>
64438 +#include <linux/module.h>
64439 #include <linux/errno.h>
64440 #include <linux/time.h>
64441 #include <linux/signal.h>
64442 @@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
64443 mm_segment_t oldfs;
64444 long ret;
64445
64446 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
64447 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
64448 oldfs = get_fs();
64449 set_fs(KERNEL_DS);
64450 ret = hrtimer_nanosleep_restart(restart);
64451 @@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
64452 oldfs = get_fs();
64453 set_fs(KERNEL_DS);
64454 ret = hrtimer_nanosleep(&tu,
64455 - rmtp ? (struct timespec __user *)&rmt : NULL,
64456 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
64457 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
64458 set_fs(oldfs);
64459
64460 @@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
64461 mm_segment_t old_fs = get_fs();
64462
64463 set_fs(KERNEL_DS);
64464 - ret = sys_sigpending((old_sigset_t __user *) &s);
64465 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
64466 set_fs(old_fs);
64467 if (ret == 0)
64468 ret = put_user(s, set);
64469 @@ -332,8 +333,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
64470 old_fs = get_fs();
64471 set_fs(KERNEL_DS);
64472 ret = sys_sigprocmask(how,
64473 - set ? (old_sigset_t __user *) &s : NULL,
64474 - oset ? (old_sigset_t __user *) &s : NULL);
64475 + set ? (old_sigset_t __force_user *) &s : NULL,
64476 + oset ? (old_sigset_t __force_user *) &s : NULL);
64477 set_fs(old_fs);
64478 if (ret == 0)
64479 if (oset)
64480 @@ -370,7 +371,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
64481 mm_segment_t old_fs = get_fs();
64482
64483 set_fs(KERNEL_DS);
64484 - ret = sys_old_getrlimit(resource, &r);
64485 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
64486 set_fs(old_fs);
64487
64488 if (!ret) {
64489 @@ -442,7 +443,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
64490 mm_segment_t old_fs = get_fs();
64491
64492 set_fs(KERNEL_DS);
64493 - ret = sys_getrusage(who, (struct rusage __user *) &r);
64494 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
64495 set_fs(old_fs);
64496
64497 if (ret)
64498 @@ -469,8 +470,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
64499 set_fs (KERNEL_DS);
64500 ret = sys_wait4(pid,
64501 (stat_addr ?
64502 - (unsigned int __user *) &status : NULL),
64503 - options, (struct rusage __user *) &r);
64504 + (unsigned int __force_user *) &status : NULL),
64505 + options, (struct rusage __force_user *) &r);
64506 set_fs (old_fs);
64507
64508 if (ret > 0) {
64509 @@ -495,8 +496,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
64510 memset(&info, 0, sizeof(info));
64511
64512 set_fs(KERNEL_DS);
64513 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
64514 - uru ? (struct rusage __user *)&ru : NULL);
64515 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
64516 + uru ? (struct rusage __force_user *)&ru : NULL);
64517 set_fs(old_fs);
64518
64519 if ((ret < 0) || (info.si_signo == 0))
64520 @@ -626,8 +627,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
64521 oldfs = get_fs();
64522 set_fs(KERNEL_DS);
64523 err = sys_timer_settime(timer_id, flags,
64524 - (struct itimerspec __user *) &newts,
64525 - (struct itimerspec __user *) &oldts);
64526 + (struct itimerspec __force_user *) &newts,
64527 + (struct itimerspec __force_user *) &oldts);
64528 set_fs(oldfs);
64529 if (!err && old && put_compat_itimerspec(old, &oldts))
64530 return -EFAULT;
64531 @@ -644,7 +645,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
64532 oldfs = get_fs();
64533 set_fs(KERNEL_DS);
64534 err = sys_timer_gettime(timer_id,
64535 - (struct itimerspec __user *) &ts);
64536 + (struct itimerspec __force_user *) &ts);
64537 set_fs(oldfs);
64538 if (!err && put_compat_itimerspec(setting, &ts))
64539 return -EFAULT;
64540 @@ -663,7 +664,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
64541 oldfs = get_fs();
64542 set_fs(KERNEL_DS);
64543 err = sys_clock_settime(which_clock,
64544 - (struct timespec __user *) &ts);
64545 + (struct timespec __force_user *) &ts);
64546 set_fs(oldfs);
64547 return err;
64548 }
64549 @@ -678,7 +679,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
64550 oldfs = get_fs();
64551 set_fs(KERNEL_DS);
64552 err = sys_clock_gettime(which_clock,
64553 - (struct timespec __user *) &ts);
64554 + (struct timespec __force_user *) &ts);
64555 set_fs(oldfs);
64556 if (!err && put_compat_timespec(&ts, tp))
64557 return -EFAULT;
64558 @@ -698,7 +699,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
64559
64560 oldfs = get_fs();
64561 set_fs(KERNEL_DS);
64562 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
64563 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
64564 set_fs(oldfs);
64565
64566 err = compat_put_timex(utp, &txc);
64567 @@ -718,7 +719,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
64568 oldfs = get_fs();
64569 set_fs(KERNEL_DS);
64570 err = sys_clock_getres(which_clock,
64571 - (struct timespec __user *) &ts);
64572 + (struct timespec __force_user *) &ts);
64573 set_fs(oldfs);
64574 if (!err && tp && put_compat_timespec(&ts, tp))
64575 return -EFAULT;
64576 @@ -730,9 +731,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
64577 long err;
64578 mm_segment_t oldfs;
64579 struct timespec tu;
64580 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
64581 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
64582
64583 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
64584 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
64585 oldfs = get_fs();
64586 set_fs(KERNEL_DS);
64587 err = clock_nanosleep_restart(restart);
64588 @@ -764,8 +765,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
64589 oldfs = get_fs();
64590 set_fs(KERNEL_DS);
64591 err = sys_clock_nanosleep(which_clock, flags,
64592 - (struct timespec __user *) &in,
64593 - (struct timespec __user *) &out);
64594 + (struct timespec __force_user *) &in,
64595 + (struct timespec __force_user *) &out);
64596 set_fs(oldfs);
64597
64598 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
64599 diff --git a/kernel/configs.c b/kernel/configs.c
64600 index 42e8fa0..9e7406b 100644
64601 --- a/kernel/configs.c
64602 +++ b/kernel/configs.c
64603 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
64604 struct proc_dir_entry *entry;
64605
64606 /* create the current config file */
64607 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
64608 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
64609 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
64610 + &ikconfig_file_ops);
64611 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64612 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
64613 + &ikconfig_file_ops);
64614 +#endif
64615 +#else
64616 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
64617 &ikconfig_file_ops);
64618 +#endif
64619 +
64620 if (!entry)
64621 return -ENOMEM;
64622
64623 diff --git a/kernel/cred.c b/kernel/cred.c
64624 index 5791612..a3c04dc 100644
64625 --- a/kernel/cred.c
64626 +++ b/kernel/cred.c
64627 @@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
64628 validate_creds(cred);
64629 put_cred(cred);
64630 }
64631 +
64632 +#ifdef CONFIG_GRKERNSEC_SETXID
64633 + cred = (struct cred *) tsk->delayed_cred;
64634 + if (cred) {
64635 + tsk->delayed_cred = NULL;
64636 + validate_creds(cred);
64637 + put_cred(cred);
64638 + }
64639 +#endif
64640 }
64641
64642 /**
64643 @@ -470,7 +479,7 @@ error_put:
64644 * Always returns 0 thus allowing this function to be tail-called at the end
64645 * of, say, sys_setgid().
64646 */
64647 -int commit_creds(struct cred *new)
64648 +static int __commit_creds(struct cred *new)
64649 {
64650 struct task_struct *task = current;
64651 const struct cred *old = task->real_cred;
64652 @@ -489,6 +498,8 @@ int commit_creds(struct cred *new)
64653
64654 get_cred(new); /* we will require a ref for the subj creds too */
64655
64656 + gr_set_role_label(task, new->uid, new->gid);
64657 +
64658 /* dumpability changes */
64659 if (old->euid != new->euid ||
64660 old->egid != new->egid ||
64661 @@ -538,6 +549,92 @@ int commit_creds(struct cred *new)
64662 put_cred(old);
64663 return 0;
64664 }
64665 +#ifdef CONFIG_GRKERNSEC_SETXID
64666 +extern int set_user(struct cred *new);
64667 +
64668 +void gr_delayed_cred_worker(void)
64669 +{
64670 + const struct cred *new = current->delayed_cred;
64671 + struct cred *ncred;
64672 +
64673 + current->delayed_cred = NULL;
64674 +
64675 + if (current_uid() && new != NULL) {
64676 + // from doing get_cred on it when queueing this
64677 + put_cred(new);
64678 + return;
64679 + } else if (new == NULL)
64680 + return;
64681 +
64682 + ncred = prepare_creds();
64683 + if (!ncred)
64684 + goto die;
64685 + // uids
64686 + ncred->uid = new->uid;
64687 + ncred->euid = new->euid;
64688 + ncred->suid = new->suid;
64689 + ncred->fsuid = new->fsuid;
64690 + // gids
64691 + ncred->gid = new->gid;
64692 + ncred->egid = new->egid;
64693 + ncred->sgid = new->sgid;
64694 + ncred->fsgid = new->fsgid;
64695 + // groups
64696 + if (set_groups(ncred, new->group_info) < 0) {
64697 + abort_creds(ncred);
64698 + goto die;
64699 + }
64700 + // caps
64701 + ncred->securebits = new->securebits;
64702 + ncred->cap_inheritable = new->cap_inheritable;
64703 + ncred->cap_permitted = new->cap_permitted;
64704 + ncred->cap_effective = new->cap_effective;
64705 + ncred->cap_bset = new->cap_bset;
64706 +
64707 + if (set_user(ncred)) {
64708 + abort_creds(ncred);
64709 + goto die;
64710 + }
64711 +
64712 + // from doing get_cred on it when queueing this
64713 + put_cred(new);
64714 +
64715 + __commit_creds(ncred);
64716 + return;
64717 +die:
64718 + // from doing get_cred on it when queueing this
64719 + put_cred(new);
64720 + do_group_exit(SIGKILL);
64721 +}
64722 +#endif
64723 +
64724 +int commit_creds(struct cred *new)
64725 +{
64726 +#ifdef CONFIG_GRKERNSEC_SETXID
64727 + struct task_struct *t;
64728 +
64729 + /* we won't get called with tasklist_lock held for writing
64730 + and interrupts disabled as the cred struct in that case is
64731 + init_cred
64732 + */
64733 + if (grsec_enable_setxid && !current_is_single_threaded() &&
64734 + !current_uid() && new->uid) {
64735 + rcu_read_lock();
64736 + read_lock(&tasklist_lock);
64737 + for (t = next_thread(current); t != current;
64738 + t = next_thread(t)) {
64739 + if (t->delayed_cred == NULL) {
64740 + t->delayed_cred = get_cred(new);
64741 + set_tsk_need_resched(t);
64742 + }
64743 + }
64744 + read_unlock(&tasklist_lock);
64745 + rcu_read_unlock();
64746 + }
64747 +#endif
64748 + return __commit_creds(new);
64749 +}
64750 +
64751 EXPORT_SYMBOL(commit_creds);
64752
64753 /**
64754 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
64755 index 0d7c087..01b8cef 100644
64756 --- a/kernel/debug/debug_core.c
64757 +++ b/kernel/debug/debug_core.c
64758 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
64759 */
64760 static atomic_t masters_in_kgdb;
64761 static atomic_t slaves_in_kgdb;
64762 -static atomic_t kgdb_break_tasklet_var;
64763 +static atomic_unchecked_t kgdb_break_tasklet_var;
64764 atomic_t kgdb_setting_breakpoint;
64765
64766 struct task_struct *kgdb_usethread;
64767 @@ -129,7 +129,7 @@ int kgdb_single_step;
64768 static pid_t kgdb_sstep_pid;
64769
64770 /* to keep track of the CPU which is doing the single stepping*/
64771 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
64772 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
64773
64774 /*
64775 * If you are debugging a problem where roundup (the collection of
64776 @@ -542,7 +542,7 @@ return_normal:
64777 * kernel will only try for the value of sstep_tries before
64778 * giving up and continuing on.
64779 */
64780 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
64781 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
64782 (kgdb_info[cpu].task &&
64783 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
64784 atomic_set(&kgdb_active, -1);
64785 @@ -636,8 +636,8 @@ cpu_master_loop:
64786 }
64787
64788 kgdb_restore:
64789 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
64790 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
64791 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
64792 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
64793 if (kgdb_info[sstep_cpu].task)
64794 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
64795 else
64796 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void)
64797 static void kgdb_tasklet_bpt(unsigned long ing)
64798 {
64799 kgdb_breakpoint();
64800 - atomic_set(&kgdb_break_tasklet_var, 0);
64801 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
64802 }
64803
64804 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
64805
64806 void kgdb_schedule_breakpoint(void)
64807 {
64808 - if (atomic_read(&kgdb_break_tasklet_var) ||
64809 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
64810 atomic_read(&kgdb_active) != -1 ||
64811 atomic_read(&kgdb_setting_breakpoint))
64812 return;
64813 - atomic_inc(&kgdb_break_tasklet_var);
64814 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
64815 tasklet_schedule(&kgdb_tasklet_breakpoint);
64816 }
64817 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
64818 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
64819 index 63786e7..0780cac 100644
64820 --- a/kernel/debug/kdb/kdb_main.c
64821 +++ b/kernel/debug/kdb/kdb_main.c
64822 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
64823 list_for_each_entry(mod, kdb_modules, list) {
64824
64825 kdb_printf("%-20s%8u 0x%p ", mod->name,
64826 - mod->core_size, (void *)mod);
64827 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
64828 #ifdef CONFIG_MODULE_UNLOAD
64829 kdb_printf("%4d ", module_refcount(mod));
64830 #endif
64831 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
64832 kdb_printf(" (Loading)");
64833 else
64834 kdb_printf(" (Live)");
64835 - kdb_printf(" 0x%p", mod->module_core);
64836 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
64837
64838 #ifdef CONFIG_MODULE_UNLOAD
64839 {
64840 diff --git a/kernel/events/core.c b/kernel/events/core.c
64841 index 58690af..d903d75 100644
64842 --- a/kernel/events/core.c
64843 +++ b/kernel/events/core.c
64844 @@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
64845 return 0;
64846 }
64847
64848 -static atomic64_t perf_event_id;
64849 +static atomic64_unchecked_t perf_event_id;
64850
64851 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
64852 enum event_type_t event_type);
64853 @@ -2540,7 +2540,7 @@ static void __perf_event_read(void *info)
64854
64855 static inline u64 perf_event_count(struct perf_event *event)
64856 {
64857 - return local64_read(&event->count) + atomic64_read(&event->child_count);
64858 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
64859 }
64860
64861 static u64 perf_event_read(struct perf_event *event)
64862 @@ -3065,9 +3065,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
64863 mutex_lock(&event->child_mutex);
64864 total += perf_event_read(event);
64865 *enabled += event->total_time_enabled +
64866 - atomic64_read(&event->child_total_time_enabled);
64867 + atomic64_read_unchecked(&event->child_total_time_enabled);
64868 *running += event->total_time_running +
64869 - atomic64_read(&event->child_total_time_running);
64870 + atomic64_read_unchecked(&event->child_total_time_running);
64871
64872 list_for_each_entry(child, &event->child_list, child_list) {
64873 total += perf_event_read(child);
64874 @@ -3474,10 +3474,10 @@ void perf_event_update_userpage(struct perf_event *event)
64875 userpg->offset -= local64_read(&event->hw.prev_count);
64876
64877 userpg->time_enabled = enabled +
64878 - atomic64_read(&event->child_total_time_enabled);
64879 + atomic64_read_unchecked(&event->child_total_time_enabled);
64880
64881 userpg->time_running = running +
64882 - atomic64_read(&event->child_total_time_running);
64883 + atomic64_read_unchecked(&event->child_total_time_running);
64884
64885 barrier();
64886 ++userpg->lock;
64887 @@ -3906,11 +3906,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
64888 values[n++] = perf_event_count(event);
64889 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64890 values[n++] = enabled +
64891 - atomic64_read(&event->child_total_time_enabled);
64892 + atomic64_read_unchecked(&event->child_total_time_enabled);
64893 }
64894 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64895 values[n++] = running +
64896 - atomic64_read(&event->child_total_time_running);
64897 + atomic64_read_unchecked(&event->child_total_time_running);
64898 }
64899 if (read_format & PERF_FORMAT_ID)
64900 values[n++] = primary_event_id(event);
64901 @@ -4561,12 +4561,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
64902 * need to add enough zero bytes after the string to handle
64903 * the 64bit alignment we do later.
64904 */
64905 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
64906 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
64907 if (!buf) {
64908 name = strncpy(tmp, "//enomem", sizeof(tmp));
64909 goto got_name;
64910 }
64911 - name = d_path(&file->f_path, buf, PATH_MAX);
64912 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
64913 if (IS_ERR(name)) {
64914 name = strncpy(tmp, "//toolong", sizeof(tmp));
64915 goto got_name;
64916 @@ -5921,7 +5921,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
64917 event->parent = parent_event;
64918
64919 event->ns = get_pid_ns(current->nsproxy->pid_ns);
64920 - event->id = atomic64_inc_return(&perf_event_id);
64921 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
64922
64923 event->state = PERF_EVENT_STATE_INACTIVE;
64924
64925 @@ -6443,10 +6443,10 @@ static void sync_child_event(struct perf_event *child_event,
64926 /*
64927 * Add back the child's count to the parent's count:
64928 */
64929 - atomic64_add(child_val, &parent_event->child_count);
64930 - atomic64_add(child_event->total_time_enabled,
64931 + atomic64_add_unchecked(child_val, &parent_event->child_count);
64932 + atomic64_add_unchecked(child_event->total_time_enabled,
64933 &parent_event->child_total_time_enabled);
64934 - atomic64_add(child_event->total_time_running,
64935 + atomic64_add_unchecked(child_event->total_time_running,
64936 &parent_event->child_total_time_running);
64937
64938 /*
64939 diff --git a/kernel/exit.c b/kernel/exit.c
64940 index e6e01b9..0a21b0a 100644
64941 --- a/kernel/exit.c
64942 +++ b/kernel/exit.c
64943 @@ -57,6 +57,10 @@
64944 #include <asm/pgtable.h>
64945 #include <asm/mmu_context.h>
64946
64947 +#ifdef CONFIG_GRKERNSEC
64948 +extern rwlock_t grsec_exec_file_lock;
64949 +#endif
64950 +
64951 static void exit_mm(struct task_struct * tsk);
64952
64953 static void __unhash_process(struct task_struct *p, bool group_dead)
64954 @@ -168,6 +172,10 @@ void release_task(struct task_struct * p)
64955 struct task_struct *leader;
64956 int zap_leader;
64957 repeat:
64958 +#ifdef CONFIG_NET
64959 + gr_del_task_from_ip_table(p);
64960 +#endif
64961 +
64962 /* don't need to get the RCU readlock here - the process is dead and
64963 * can't be modifying its own credentials. But shut RCU-lockdep up */
64964 rcu_read_lock();
64965 @@ -380,7 +388,7 @@ int allow_signal(int sig)
64966 * know it'll be handled, so that they don't get converted to
64967 * SIGKILL or just silently dropped.
64968 */
64969 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
64970 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
64971 recalc_sigpending();
64972 spin_unlock_irq(&current->sighand->siglock);
64973 return 0;
64974 @@ -416,6 +424,17 @@ void daemonize(const char *name, ...)
64975 vsnprintf(current->comm, sizeof(current->comm), name, args);
64976 va_end(args);
64977
64978 +#ifdef CONFIG_GRKERNSEC
64979 + write_lock(&grsec_exec_file_lock);
64980 + if (current->exec_file) {
64981 + fput(current->exec_file);
64982 + current->exec_file = NULL;
64983 + }
64984 + write_unlock(&grsec_exec_file_lock);
64985 +#endif
64986 +
64987 + gr_set_kernel_label(current);
64988 +
64989 /*
64990 * If we were started as result of loading a module, close all of the
64991 * user space pages. We don't need them, and if we didn't close them
64992 @@ -893,6 +912,8 @@ NORET_TYPE void do_exit(long code)
64993 struct task_struct *tsk = current;
64994 int group_dead;
64995
64996 + set_fs(USER_DS);
64997 +
64998 profile_task_exit(tsk);
64999
65000 WARN_ON(blk_needs_flush_plug(tsk));
65001 @@ -909,7 +930,6 @@ NORET_TYPE void do_exit(long code)
65002 * mm_release()->clear_child_tid() from writing to a user-controlled
65003 * kernel address.
65004 */
65005 - set_fs(USER_DS);
65006
65007 ptrace_event(PTRACE_EVENT_EXIT, code);
65008
65009 @@ -971,6 +991,9 @@ NORET_TYPE void do_exit(long code)
65010 tsk->exit_code = code;
65011 taskstats_exit(tsk, group_dead);
65012
65013 + gr_acl_handle_psacct(tsk, code);
65014 + gr_acl_handle_exit();
65015 +
65016 exit_mm(tsk);
65017
65018 if (group_dead)
65019 @@ -1068,7 +1091,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
65020 * Take down every thread in the group. This is called by fatal signals
65021 * as well as by sys_exit_group (below).
65022 */
65023 -NORET_TYPE void
65024 +__noreturn void
65025 do_group_exit(int exit_code)
65026 {
65027 struct signal_struct *sig = current->signal;
65028 diff --git a/kernel/fork.c b/kernel/fork.c
65029 index 0acf42c0..9e40e2e 100644
65030 --- a/kernel/fork.c
65031 +++ b/kernel/fork.c
65032 @@ -281,7 +281,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
65033 *stackend = STACK_END_MAGIC; /* for overflow detection */
65034
65035 #ifdef CONFIG_CC_STACKPROTECTOR
65036 - tsk->stack_canary = get_random_int();
65037 + tsk->stack_canary = pax_get_random_long();
65038 #endif
65039
65040 /*
65041 @@ -305,13 +305,77 @@ out:
65042 }
65043
65044 #ifdef CONFIG_MMU
65045 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
65046 +{
65047 + struct vm_area_struct *tmp;
65048 + unsigned long charge;
65049 + struct mempolicy *pol;
65050 + struct file *file;
65051 +
65052 + charge = 0;
65053 + if (mpnt->vm_flags & VM_ACCOUNT) {
65054 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65055 + if (security_vm_enough_memory(len))
65056 + goto fail_nomem;
65057 + charge = len;
65058 + }
65059 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65060 + if (!tmp)
65061 + goto fail_nomem;
65062 + *tmp = *mpnt;
65063 + tmp->vm_mm = mm;
65064 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
65065 + pol = mpol_dup(vma_policy(mpnt));
65066 + if (IS_ERR(pol))
65067 + goto fail_nomem_policy;
65068 + vma_set_policy(tmp, pol);
65069 + if (anon_vma_fork(tmp, mpnt))
65070 + goto fail_nomem_anon_vma_fork;
65071 + tmp->vm_flags &= ~VM_LOCKED;
65072 + tmp->vm_next = tmp->vm_prev = NULL;
65073 + tmp->vm_mirror = NULL;
65074 + file = tmp->vm_file;
65075 + if (file) {
65076 + struct inode *inode = file->f_path.dentry->d_inode;
65077 + struct address_space *mapping = file->f_mapping;
65078 +
65079 + get_file(file);
65080 + if (tmp->vm_flags & VM_DENYWRITE)
65081 + atomic_dec(&inode->i_writecount);
65082 + mutex_lock(&mapping->i_mmap_mutex);
65083 + if (tmp->vm_flags & VM_SHARED)
65084 + mapping->i_mmap_writable++;
65085 + flush_dcache_mmap_lock(mapping);
65086 + /* insert tmp into the share list, just after mpnt */
65087 + vma_prio_tree_add(tmp, mpnt);
65088 + flush_dcache_mmap_unlock(mapping);
65089 + mutex_unlock(&mapping->i_mmap_mutex);
65090 + }
65091 +
65092 + /*
65093 + * Clear hugetlb-related page reserves for children. This only
65094 + * affects MAP_PRIVATE mappings. Faults generated by the child
65095 + * are not guaranteed to succeed, even if read-only
65096 + */
65097 + if (is_vm_hugetlb_page(tmp))
65098 + reset_vma_resv_huge_pages(tmp);
65099 +
65100 + return tmp;
65101 +
65102 +fail_nomem_anon_vma_fork:
65103 + mpol_put(pol);
65104 +fail_nomem_policy:
65105 + kmem_cache_free(vm_area_cachep, tmp);
65106 +fail_nomem:
65107 + vm_unacct_memory(charge);
65108 + return NULL;
65109 +}
65110 +
65111 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65112 {
65113 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
65114 struct rb_node **rb_link, *rb_parent;
65115 int retval;
65116 - unsigned long charge;
65117 - struct mempolicy *pol;
65118
65119 down_write(&oldmm->mmap_sem);
65120 flush_cache_dup_mm(oldmm);
65121 @@ -323,8 +387,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65122 mm->locked_vm = 0;
65123 mm->mmap = NULL;
65124 mm->mmap_cache = NULL;
65125 - mm->free_area_cache = oldmm->mmap_base;
65126 - mm->cached_hole_size = ~0UL;
65127 + mm->free_area_cache = oldmm->free_area_cache;
65128 + mm->cached_hole_size = oldmm->cached_hole_size;
65129 mm->map_count = 0;
65130 cpumask_clear(mm_cpumask(mm));
65131 mm->mm_rb = RB_ROOT;
65132 @@ -340,8 +404,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65133
65134 prev = NULL;
65135 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
65136 - struct file *file;
65137 -
65138 if (mpnt->vm_flags & VM_DONTCOPY) {
65139 long pages = vma_pages(mpnt);
65140 mm->total_vm -= pages;
65141 @@ -349,53 +411,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65142 -pages);
65143 continue;
65144 }
65145 - charge = 0;
65146 - if (mpnt->vm_flags & VM_ACCOUNT) {
65147 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65148 - if (security_vm_enough_memory(len))
65149 - goto fail_nomem;
65150 - charge = len;
65151 + tmp = dup_vma(mm, mpnt);
65152 + if (!tmp) {
65153 + retval = -ENOMEM;
65154 + goto out;
65155 }
65156 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65157 - if (!tmp)
65158 - goto fail_nomem;
65159 - *tmp = *mpnt;
65160 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
65161 - pol = mpol_dup(vma_policy(mpnt));
65162 - retval = PTR_ERR(pol);
65163 - if (IS_ERR(pol))
65164 - goto fail_nomem_policy;
65165 - vma_set_policy(tmp, pol);
65166 - tmp->vm_mm = mm;
65167 - if (anon_vma_fork(tmp, mpnt))
65168 - goto fail_nomem_anon_vma_fork;
65169 - tmp->vm_flags &= ~VM_LOCKED;
65170 - tmp->vm_next = tmp->vm_prev = NULL;
65171 - file = tmp->vm_file;
65172 - if (file) {
65173 - struct inode *inode = file->f_path.dentry->d_inode;
65174 - struct address_space *mapping = file->f_mapping;
65175 -
65176 - get_file(file);
65177 - if (tmp->vm_flags & VM_DENYWRITE)
65178 - atomic_dec(&inode->i_writecount);
65179 - mutex_lock(&mapping->i_mmap_mutex);
65180 - if (tmp->vm_flags & VM_SHARED)
65181 - mapping->i_mmap_writable++;
65182 - flush_dcache_mmap_lock(mapping);
65183 - /* insert tmp into the share list, just after mpnt */
65184 - vma_prio_tree_add(tmp, mpnt);
65185 - flush_dcache_mmap_unlock(mapping);
65186 - mutex_unlock(&mapping->i_mmap_mutex);
65187 - }
65188 -
65189 - /*
65190 - * Clear hugetlb-related page reserves for children. This only
65191 - * affects MAP_PRIVATE mappings. Faults generated by the child
65192 - * are not guaranteed to succeed, even if read-only
65193 - */
65194 - if (is_vm_hugetlb_page(tmp))
65195 - reset_vma_resv_huge_pages(tmp);
65196
65197 /*
65198 * Link in the new vma and copy the page table entries.
65199 @@ -418,6 +438,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65200 if (retval)
65201 goto out;
65202 }
65203 +
65204 +#ifdef CONFIG_PAX_SEGMEXEC
65205 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
65206 + struct vm_area_struct *mpnt_m;
65207 +
65208 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
65209 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
65210 +
65211 + if (!mpnt->vm_mirror)
65212 + continue;
65213 +
65214 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
65215 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
65216 + mpnt->vm_mirror = mpnt_m;
65217 + } else {
65218 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
65219 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
65220 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
65221 + mpnt->vm_mirror->vm_mirror = mpnt;
65222 + }
65223 + }
65224 + BUG_ON(mpnt_m);
65225 + }
65226 +#endif
65227 +
65228 /* a new mm has just been created */
65229 arch_dup_mmap(oldmm, mm);
65230 retval = 0;
65231 @@ -426,14 +471,6 @@ out:
65232 flush_tlb_mm(oldmm);
65233 up_write(&oldmm->mmap_sem);
65234 return retval;
65235 -fail_nomem_anon_vma_fork:
65236 - mpol_put(pol);
65237 -fail_nomem_policy:
65238 - kmem_cache_free(vm_area_cachep, tmp);
65239 -fail_nomem:
65240 - retval = -ENOMEM;
65241 - vm_unacct_memory(charge);
65242 - goto out;
65243 }
65244
65245 static inline int mm_alloc_pgd(struct mm_struct *mm)
65246 @@ -645,6 +682,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)
65247 }
65248 EXPORT_SYMBOL_GPL(get_task_mm);
65249
65250 +struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
65251 +{
65252 + struct mm_struct *mm;
65253 + int err;
65254 +
65255 + err = mutex_lock_killable(&task->signal->cred_guard_mutex);
65256 + if (err)
65257 + return ERR_PTR(err);
65258 +
65259 + mm = get_task_mm(task);
65260 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
65261 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
65262 + mmput(mm);
65263 + mm = ERR_PTR(-EACCES);
65264 + }
65265 + mutex_unlock(&task->signal->cred_guard_mutex);
65266 +
65267 + return mm;
65268 +}
65269 +
65270 /* Please note the differences between mmput and mm_release.
65271 * mmput is called whenever we stop holding onto a mm_struct,
65272 * error success whatever.
65273 @@ -830,13 +887,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
65274 spin_unlock(&fs->lock);
65275 return -EAGAIN;
65276 }
65277 - fs->users++;
65278 + atomic_inc(&fs->users);
65279 spin_unlock(&fs->lock);
65280 return 0;
65281 }
65282 tsk->fs = copy_fs_struct(fs);
65283 if (!tsk->fs)
65284 return -ENOMEM;
65285 + gr_set_chroot_entries(tsk, &tsk->fs->root);
65286 return 0;
65287 }
65288
65289 @@ -1100,6 +1158,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
65290 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
65291 #endif
65292 retval = -EAGAIN;
65293 +
65294 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
65295 +
65296 if (atomic_read(&p->real_cred->user->processes) >=
65297 task_rlimit(p, RLIMIT_NPROC)) {
65298 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
65299 @@ -1259,6 +1320,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
65300 if (clone_flags & CLONE_THREAD)
65301 p->tgid = current->tgid;
65302
65303 + gr_copy_label(p);
65304 +
65305 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
65306 /*
65307 * Clear TID on mm_release()?
65308 @@ -1421,6 +1484,8 @@ bad_fork_cleanup_count:
65309 bad_fork_free:
65310 free_task(p);
65311 fork_out:
65312 + gr_log_forkfail(retval);
65313 +
65314 return ERR_PTR(retval);
65315 }
65316
65317 @@ -1521,6 +1586,8 @@ long do_fork(unsigned long clone_flags,
65318 if (clone_flags & CLONE_PARENT_SETTID)
65319 put_user(nr, parent_tidptr);
65320
65321 + gr_handle_brute_check();
65322 +
65323 if (clone_flags & CLONE_VFORK) {
65324 p->vfork_done = &vfork;
65325 init_completion(&vfork);
65326 @@ -1630,7 +1697,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
65327 return 0;
65328
65329 /* don't need lock here; in the worst case we'll do useless copy */
65330 - if (fs->users == 1)
65331 + if (atomic_read(&fs->users) == 1)
65332 return 0;
65333
65334 *new_fsp = copy_fs_struct(fs);
65335 @@ -1719,7 +1786,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
65336 fs = current->fs;
65337 spin_lock(&fs->lock);
65338 current->fs = new_fs;
65339 - if (--fs->users)
65340 + gr_set_chroot_entries(current, &current->fs->root);
65341 + if (atomic_dec_return(&fs->users))
65342 new_fs = NULL;
65343 else
65344 new_fs = fs;
65345 diff --git a/kernel/futex.c b/kernel/futex.c
65346 index 1614be2..37abc7e 100644
65347 --- a/kernel/futex.c
65348 +++ b/kernel/futex.c
65349 @@ -54,6 +54,7 @@
65350 #include <linux/mount.h>
65351 #include <linux/pagemap.h>
65352 #include <linux/syscalls.h>
65353 +#include <linux/ptrace.h>
65354 #include <linux/signal.h>
65355 #include <linux/export.h>
65356 #include <linux/magic.h>
65357 @@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
65358 struct page *page, *page_head;
65359 int err, ro = 0;
65360
65361 +#ifdef CONFIG_PAX_SEGMEXEC
65362 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
65363 + return -EFAULT;
65364 +#endif
65365 +
65366 /*
65367 * The futex address must be "naturally" aligned.
65368 */
65369 @@ -2459,6 +2465,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
65370 if (!p)
65371 goto err_unlock;
65372 ret = -EPERM;
65373 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65374 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
65375 + goto err_unlock;
65376 +#endif
65377 pcred = __task_cred(p);
65378 /* If victim is in different user_ns, then uids are not
65379 comparable, so we must have CAP_SYS_PTRACE */
65380 @@ -2724,6 +2734,7 @@ static int __init futex_init(void)
65381 {
65382 u32 curval;
65383 int i;
65384 + mm_segment_t oldfs;
65385
65386 /*
65387 * This will fail and we want it. Some arch implementations do
65388 @@ -2735,8 +2746,11 @@ static int __init futex_init(void)
65389 * implementation, the non-functional ones will return
65390 * -ENOSYS.
65391 */
65392 + oldfs = get_fs();
65393 + set_fs(USER_DS);
65394 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
65395 futex_cmpxchg_enabled = 1;
65396 + set_fs(oldfs);
65397
65398 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
65399 plist_head_init(&futex_queues[i].chain);
65400 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
65401 index 5f9e689..582d46d 100644
65402 --- a/kernel/futex_compat.c
65403 +++ b/kernel/futex_compat.c
65404 @@ -10,6 +10,7 @@
65405 #include <linux/compat.h>
65406 #include <linux/nsproxy.h>
65407 #include <linux/futex.h>
65408 +#include <linux/ptrace.h>
65409
65410 #include <asm/uaccess.h>
65411
65412 @@ -136,7 +137,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
65413 {
65414 struct compat_robust_list_head __user *head;
65415 unsigned long ret;
65416 - const struct cred *cred = current_cred(), *pcred;
65417 + const struct cred *cred = current_cred();
65418 + const struct cred *pcred;
65419
65420 if (!futex_cmpxchg_enabled)
65421 return -ENOSYS;
65422 @@ -152,6 +154,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
65423 if (!p)
65424 goto err_unlock;
65425 ret = -EPERM;
65426 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65427 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
65428 + goto err_unlock;
65429 +#endif
65430 pcred = __task_cred(p);
65431 /* If victim is in different user_ns, then uids are not
65432 comparable, so we must have CAP_SYS_PTRACE */
65433 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
65434 index 9b22d03..6295b62 100644
65435 --- a/kernel/gcov/base.c
65436 +++ b/kernel/gcov/base.c
65437 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
65438 }
65439
65440 #ifdef CONFIG_MODULES
65441 -static inline int within(void *addr, void *start, unsigned long size)
65442 -{
65443 - return ((addr >= start) && (addr < start + size));
65444 -}
65445 -
65446 /* Update list and generate events when modules are unloaded. */
65447 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
65448 void *data)
65449 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
65450 prev = NULL;
65451 /* Remove entries located in module from linked list. */
65452 for (info = gcov_info_head; info; info = info->next) {
65453 - if (within(info, mod->module_core, mod->core_size)) {
65454 + if (within_module_core_rw((unsigned long)info, mod)) {
65455 if (prev)
65456 prev->next = info->next;
65457 else
65458 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
65459 index ae34bf5..4e2f3d0 100644
65460 --- a/kernel/hrtimer.c
65461 +++ b/kernel/hrtimer.c
65462 @@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
65463 local_irq_restore(flags);
65464 }
65465
65466 -static void run_hrtimer_softirq(struct softirq_action *h)
65467 +static void run_hrtimer_softirq(void)
65468 {
65469 hrtimer_peek_ahead_timers();
65470 }
65471 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
65472 index 66ff710..05a5128 100644
65473 --- a/kernel/jump_label.c
65474 +++ b/kernel/jump_label.c
65475 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
65476
65477 size = (((unsigned long)stop - (unsigned long)start)
65478 / sizeof(struct jump_entry));
65479 + pax_open_kernel();
65480 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
65481 + pax_close_kernel();
65482 }
65483
65484 static void jump_label_update(struct jump_label_key *key, int enable);
65485 @@ -303,10 +305,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
65486 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
65487 struct jump_entry *iter;
65488
65489 + pax_open_kernel();
65490 for (iter = iter_start; iter < iter_stop; iter++) {
65491 if (within_module_init(iter->code, mod))
65492 iter->code = 0;
65493 }
65494 + pax_close_kernel();
65495 }
65496
65497 static int
65498 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
65499 index 079f1d3..a407562 100644
65500 --- a/kernel/kallsyms.c
65501 +++ b/kernel/kallsyms.c
65502 @@ -11,6 +11,9 @@
65503 * Changed the compression method from stem compression to "table lookup"
65504 * compression (see scripts/kallsyms.c for a more complete description)
65505 */
65506 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65507 +#define __INCLUDED_BY_HIDESYM 1
65508 +#endif
65509 #include <linux/kallsyms.h>
65510 #include <linux/module.h>
65511 #include <linux/init.h>
65512 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
65513
65514 static inline int is_kernel_inittext(unsigned long addr)
65515 {
65516 + if (system_state != SYSTEM_BOOTING)
65517 + return 0;
65518 +
65519 if (addr >= (unsigned long)_sinittext
65520 && addr <= (unsigned long)_einittext)
65521 return 1;
65522 return 0;
65523 }
65524
65525 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65526 +#ifdef CONFIG_MODULES
65527 +static inline int is_module_text(unsigned long addr)
65528 +{
65529 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
65530 + return 1;
65531 +
65532 + addr = ktla_ktva(addr);
65533 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
65534 +}
65535 +#else
65536 +static inline int is_module_text(unsigned long addr)
65537 +{
65538 + return 0;
65539 +}
65540 +#endif
65541 +#endif
65542 +
65543 static inline int is_kernel_text(unsigned long addr)
65544 {
65545 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
65546 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
65547
65548 static inline int is_kernel(unsigned long addr)
65549 {
65550 +
65551 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65552 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
65553 + return 1;
65554 +
65555 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
65556 +#else
65557 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
65558 +#endif
65559 +
65560 return 1;
65561 return in_gate_area_no_mm(addr);
65562 }
65563
65564 static int is_ksym_addr(unsigned long addr)
65565 {
65566 +
65567 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65568 + if (is_module_text(addr))
65569 + return 0;
65570 +#endif
65571 +
65572 if (all_var)
65573 return is_kernel(addr);
65574
65575 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
65576
65577 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
65578 {
65579 - iter->name[0] = '\0';
65580 iter->nameoff = get_symbol_offset(new_pos);
65581 iter->pos = new_pos;
65582 }
65583 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
65584 {
65585 struct kallsym_iter *iter = m->private;
65586
65587 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65588 + if (current_uid())
65589 + return 0;
65590 +#endif
65591 +
65592 /* Some debugging symbols have no name. Ignore them. */
65593 if (!iter->name[0])
65594 return 0;
65595 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
65596 struct kallsym_iter *iter;
65597 int ret;
65598
65599 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
65600 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
65601 if (!iter)
65602 return -ENOMEM;
65603 reset_iter(iter, 0);
65604 diff --git a/kernel/kexec.c b/kernel/kexec.c
65605 index dc7bc08..4601964 100644
65606 --- a/kernel/kexec.c
65607 +++ b/kernel/kexec.c
65608 @@ -1048,7 +1048,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
65609 unsigned long flags)
65610 {
65611 struct compat_kexec_segment in;
65612 - struct kexec_segment out, __user *ksegments;
65613 + struct kexec_segment out;
65614 + struct kexec_segment __user *ksegments;
65615 unsigned long i, result;
65616
65617 /* Don't allow clients that don't understand the native
65618 diff --git a/kernel/kmod.c b/kernel/kmod.c
65619 index a4bea97..7a1ae9a 100644
65620 --- a/kernel/kmod.c
65621 +++ b/kernel/kmod.c
65622 @@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
65623 * If module auto-loading support is disabled then this function
65624 * becomes a no-operation.
65625 */
65626 -int __request_module(bool wait, const char *fmt, ...)
65627 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
65628 {
65629 - va_list args;
65630 char module_name[MODULE_NAME_LEN];
65631 unsigned int max_modprobes;
65632 int ret;
65633 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
65634 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
65635 static char *envp[] = { "HOME=/",
65636 "TERM=linux",
65637 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
65638 @@ -88,9 +87,7 @@ int __request_module(bool wait, const char *fmt, ...)
65639 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
65640 static int kmod_loop_msg;
65641
65642 - va_start(args, fmt);
65643 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
65644 - va_end(args);
65645 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
65646 if (ret >= MODULE_NAME_LEN)
65647 return -ENAMETOOLONG;
65648
65649 @@ -98,6 +95,20 @@ int __request_module(bool wait, const char *fmt, ...)
65650 if (ret)
65651 return ret;
65652
65653 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65654 + if (!current_uid()) {
65655 + /* hack to workaround consolekit/udisks stupidity */
65656 + read_lock(&tasklist_lock);
65657 + if (!strcmp(current->comm, "mount") &&
65658 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
65659 + read_unlock(&tasklist_lock);
65660 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
65661 + return -EPERM;
65662 + }
65663 + read_unlock(&tasklist_lock);
65664 + }
65665 +#endif
65666 +
65667 /* If modprobe needs a service that is in a module, we get a recursive
65668 * loop. Limit the number of running kmod threads to max_threads/2 or
65669 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
65670 @@ -133,6 +144,47 @@ int __request_module(bool wait, const char *fmt, ...)
65671 atomic_dec(&kmod_concurrent);
65672 return ret;
65673 }
65674 +
65675 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
65676 +{
65677 + va_list args;
65678 + int ret;
65679 +
65680 + va_start(args, fmt);
65681 + ret = ____request_module(wait, module_param, fmt, args);
65682 + va_end(args);
65683 +
65684 + return ret;
65685 +}
65686 +
65687 +int __request_module(bool wait, const char *fmt, ...)
65688 +{
65689 + va_list args;
65690 + int ret;
65691 +
65692 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65693 + if (current_uid()) {
65694 + char module_param[MODULE_NAME_LEN];
65695 +
65696 + memset(module_param, 0, sizeof(module_param));
65697 +
65698 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
65699 +
65700 + va_start(args, fmt);
65701 + ret = ____request_module(wait, module_param, fmt, args);
65702 + va_end(args);
65703 +
65704 + return ret;
65705 + }
65706 +#endif
65707 +
65708 + va_start(args, fmt);
65709 + ret = ____request_module(wait, NULL, fmt, args);
65710 + va_end(args);
65711 +
65712 + return ret;
65713 +}
65714 +
65715 EXPORT_SYMBOL(__request_module);
65716 #endif /* CONFIG_MODULES */
65717
65718 @@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
65719 *
65720 * Thus the __user pointer cast is valid here.
65721 */
65722 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
65723 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
65724
65725 /*
65726 * If ret is 0, either ____call_usermodehelper failed and the
65727 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
65728 index bc90b87..43c7d8c 100644
65729 --- a/kernel/kprobes.c
65730 +++ b/kernel/kprobes.c
65731 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
65732 * kernel image and loaded module images reside. This is required
65733 * so x86_64 can correctly handle the %rip-relative fixups.
65734 */
65735 - kip->insns = module_alloc(PAGE_SIZE);
65736 + kip->insns = module_alloc_exec(PAGE_SIZE);
65737 if (!kip->insns) {
65738 kfree(kip);
65739 return NULL;
65740 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
65741 */
65742 if (!list_is_singular(&kip->list)) {
65743 list_del(&kip->list);
65744 - module_free(NULL, kip->insns);
65745 + module_free_exec(NULL, kip->insns);
65746 kfree(kip);
65747 }
65748 return 1;
65749 @@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
65750 {
65751 int i, err = 0;
65752 unsigned long offset = 0, size = 0;
65753 - char *modname, namebuf[128];
65754 + char *modname, namebuf[KSYM_NAME_LEN];
65755 const char *symbol_name;
65756 void *addr;
65757 struct kprobe_blackpoint *kb;
65758 @@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
65759 const char *sym = NULL;
65760 unsigned int i = *(loff_t *) v;
65761 unsigned long offset = 0;
65762 - char *modname, namebuf[128];
65763 + char *modname, namebuf[KSYM_NAME_LEN];
65764
65765 head = &kprobe_table[i];
65766 preempt_disable();
65767 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
65768 index b2e08c9..01d8049 100644
65769 --- a/kernel/lockdep.c
65770 +++ b/kernel/lockdep.c
65771 @@ -592,6 +592,10 @@ static int static_obj(void *obj)
65772 end = (unsigned long) &_end,
65773 addr = (unsigned long) obj;
65774
65775 +#ifdef CONFIG_PAX_KERNEXEC
65776 + start = ktla_ktva(start);
65777 +#endif
65778 +
65779 /*
65780 * static variable?
65781 */
65782 @@ -731,6 +735,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
65783 if (!static_obj(lock->key)) {
65784 debug_locks_off();
65785 printk("INFO: trying to register non-static key.\n");
65786 + printk("lock:%pS key:%pS.\n", lock, lock->key);
65787 printk("the code is fine but needs lockdep annotation.\n");
65788 printk("turning off the locking correctness validator.\n");
65789 dump_stack();
65790 @@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
65791 if (!class)
65792 return 0;
65793 }
65794 - atomic_inc((atomic_t *)&class->ops);
65795 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
65796 if (very_verbose(class)) {
65797 printk("\nacquire class [%p] %s", class->key, class->name);
65798 if (class->name_version > 1)
65799 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
65800 index 91c32a0..b2c71c5 100644
65801 --- a/kernel/lockdep_proc.c
65802 +++ b/kernel/lockdep_proc.c
65803 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
65804
65805 static void print_name(struct seq_file *m, struct lock_class *class)
65806 {
65807 - char str[128];
65808 + char str[KSYM_NAME_LEN];
65809 const char *name = class->name;
65810
65811 if (!name) {
65812 diff --git a/kernel/module.c b/kernel/module.c
65813 index 178333c..04e3408 100644
65814 --- a/kernel/module.c
65815 +++ b/kernel/module.c
65816 @@ -58,6 +58,7 @@
65817 #include <linux/jump_label.h>
65818 #include <linux/pfn.h>
65819 #include <linux/bsearch.h>
65820 +#include <linux/grsecurity.h>
65821
65822 #define CREATE_TRACE_POINTS
65823 #include <trace/events/module.h>
65824 @@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
65825
65826 /* Bounds of module allocation, for speeding __module_address.
65827 * Protected by module_mutex. */
65828 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
65829 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
65830 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
65831
65832 int register_module_notifier(struct notifier_block * nb)
65833 {
65834 @@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
65835 return true;
65836
65837 list_for_each_entry_rcu(mod, &modules, list) {
65838 - struct symsearch arr[] = {
65839 + struct symsearch modarr[] = {
65840 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
65841 NOT_GPL_ONLY, false },
65842 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
65843 @@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
65844 #endif
65845 };
65846
65847 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
65848 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
65849 return true;
65850 }
65851 return false;
65852 @@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
65853 static int percpu_modalloc(struct module *mod,
65854 unsigned long size, unsigned long align)
65855 {
65856 - if (align > PAGE_SIZE) {
65857 + if (align-1 >= PAGE_SIZE) {
65858 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
65859 mod->name, align, PAGE_SIZE);
65860 align = PAGE_SIZE;
65861 @@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
65862 */
65863 #ifdef CONFIG_SYSFS
65864
65865 -#ifdef CONFIG_KALLSYMS
65866 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65867 static inline bool sect_empty(const Elf_Shdr *sect)
65868 {
65869 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
65870 @@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base,
65871
65872 static void unset_module_core_ro_nx(struct module *mod)
65873 {
65874 - set_page_attributes(mod->module_core + mod->core_text_size,
65875 - mod->module_core + mod->core_size,
65876 + set_page_attributes(mod->module_core_rw,
65877 + mod->module_core_rw + mod->core_size_rw,
65878 set_memory_x);
65879 - set_page_attributes(mod->module_core,
65880 - mod->module_core + mod->core_ro_size,
65881 + set_page_attributes(mod->module_core_rx,
65882 + mod->module_core_rx + mod->core_size_rx,
65883 set_memory_rw);
65884 }
65885
65886 static void unset_module_init_ro_nx(struct module *mod)
65887 {
65888 - set_page_attributes(mod->module_init + mod->init_text_size,
65889 - mod->module_init + mod->init_size,
65890 + set_page_attributes(mod->module_init_rw,
65891 + mod->module_init_rw + mod->init_size_rw,
65892 set_memory_x);
65893 - set_page_attributes(mod->module_init,
65894 - mod->module_init + mod->init_ro_size,
65895 + set_page_attributes(mod->module_init_rx,
65896 + mod->module_init_rx + mod->init_size_rx,
65897 set_memory_rw);
65898 }
65899
65900 @@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
65901
65902 mutex_lock(&module_mutex);
65903 list_for_each_entry_rcu(mod, &modules, list) {
65904 - if ((mod->module_core) && (mod->core_text_size)) {
65905 - set_page_attributes(mod->module_core,
65906 - mod->module_core + mod->core_text_size,
65907 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
65908 + set_page_attributes(mod->module_core_rx,
65909 + mod->module_core_rx + mod->core_size_rx,
65910 set_memory_rw);
65911 }
65912 - if ((mod->module_init) && (mod->init_text_size)) {
65913 - set_page_attributes(mod->module_init,
65914 - mod->module_init + mod->init_text_size,
65915 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
65916 + set_page_attributes(mod->module_init_rx,
65917 + mod->module_init_rx + mod->init_size_rx,
65918 set_memory_rw);
65919 }
65920 }
65921 @@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
65922
65923 mutex_lock(&module_mutex);
65924 list_for_each_entry_rcu(mod, &modules, list) {
65925 - if ((mod->module_core) && (mod->core_text_size)) {
65926 - set_page_attributes(mod->module_core,
65927 - mod->module_core + mod->core_text_size,
65928 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
65929 + set_page_attributes(mod->module_core_rx,
65930 + mod->module_core_rx + mod->core_size_rx,
65931 set_memory_ro);
65932 }
65933 - if ((mod->module_init) && (mod->init_text_size)) {
65934 - set_page_attributes(mod->module_init,
65935 - mod->module_init + mod->init_text_size,
65936 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
65937 + set_page_attributes(mod->module_init_rx,
65938 + mod->module_init_rx + mod->init_size_rx,
65939 set_memory_ro);
65940 }
65941 }
65942 @@ -1748,16 +1750,19 @@ static void free_module(struct module *mod)
65943
65944 /* This may be NULL, but that's OK */
65945 unset_module_init_ro_nx(mod);
65946 - module_free(mod, mod->module_init);
65947 + module_free(mod, mod->module_init_rw);
65948 + module_free_exec(mod, mod->module_init_rx);
65949 kfree(mod->args);
65950 percpu_modfree(mod);
65951
65952 /* Free lock-classes: */
65953 - lockdep_free_key_range(mod->module_core, mod->core_size);
65954 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
65955 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
65956
65957 /* Finally, free the core (containing the module structure) */
65958 unset_module_core_ro_nx(mod);
65959 - module_free(mod, mod->module_core);
65960 + module_free_exec(mod, mod->module_core_rx);
65961 + module_free(mod, mod->module_core_rw);
65962
65963 #ifdef CONFIG_MPU
65964 update_protections(current->mm);
65965 @@ -1826,10 +1831,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
65966 unsigned int i;
65967 int ret = 0;
65968 const struct kernel_symbol *ksym;
65969 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65970 + int is_fs_load = 0;
65971 + int register_filesystem_found = 0;
65972 + char *p;
65973 +
65974 + p = strstr(mod->args, "grsec_modharden_fs");
65975 + if (p) {
65976 + char *endptr = p + strlen("grsec_modharden_fs");
65977 + /* copy \0 as well */
65978 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
65979 + is_fs_load = 1;
65980 + }
65981 +#endif
65982
65983 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
65984 const char *name = info->strtab + sym[i].st_name;
65985
65986 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65987 + /* it's a real shame this will never get ripped and copied
65988 + upstream! ;(
65989 + */
65990 + if (is_fs_load && !strcmp(name, "register_filesystem"))
65991 + register_filesystem_found = 1;
65992 +#endif
65993 +
65994 switch (sym[i].st_shndx) {
65995 case SHN_COMMON:
65996 /* We compiled with -fno-common. These are not
65997 @@ -1850,7 +1876,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
65998 ksym = resolve_symbol_wait(mod, info, name);
65999 /* Ok if resolved. */
66000 if (ksym && !IS_ERR(ksym)) {
66001 + pax_open_kernel();
66002 sym[i].st_value = ksym->value;
66003 + pax_close_kernel();
66004 break;
66005 }
66006
66007 @@ -1869,11 +1897,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66008 secbase = (unsigned long)mod_percpu(mod);
66009 else
66010 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
66011 + pax_open_kernel();
66012 sym[i].st_value += secbase;
66013 + pax_close_kernel();
66014 break;
66015 }
66016 }
66017
66018 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66019 + if (is_fs_load && !register_filesystem_found) {
66020 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
66021 + ret = -EPERM;
66022 + }
66023 +#endif
66024 +
66025 return ret;
66026 }
66027
66028 @@ -1977,22 +2014,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
66029 || s->sh_entsize != ~0UL
66030 || strstarts(sname, ".init"))
66031 continue;
66032 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
66033 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66034 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
66035 + else
66036 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
66037 DEBUGP("\t%s\n", name);
66038 }
66039 - switch (m) {
66040 - case 0: /* executable */
66041 - mod->core_size = debug_align(mod->core_size);
66042 - mod->core_text_size = mod->core_size;
66043 - break;
66044 - case 1: /* RO: text and ro-data */
66045 - mod->core_size = debug_align(mod->core_size);
66046 - mod->core_ro_size = mod->core_size;
66047 - break;
66048 - case 3: /* whole core */
66049 - mod->core_size = debug_align(mod->core_size);
66050 - break;
66051 - }
66052 }
66053
66054 DEBUGP("Init section allocation order:\n");
66055 @@ -2006,23 +2033,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
66056 || s->sh_entsize != ~0UL
66057 || !strstarts(sname, ".init"))
66058 continue;
66059 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
66060 - | INIT_OFFSET_MASK);
66061 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66062 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
66063 + else
66064 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
66065 + s->sh_entsize |= INIT_OFFSET_MASK;
66066 DEBUGP("\t%s\n", sname);
66067 }
66068 - switch (m) {
66069 - case 0: /* executable */
66070 - mod->init_size = debug_align(mod->init_size);
66071 - mod->init_text_size = mod->init_size;
66072 - break;
66073 - case 1: /* RO: text and ro-data */
66074 - mod->init_size = debug_align(mod->init_size);
66075 - mod->init_ro_size = mod->init_size;
66076 - break;
66077 - case 3: /* whole init */
66078 - mod->init_size = debug_align(mod->init_size);
66079 - break;
66080 - }
66081 }
66082 }
66083
66084 @@ -2187,7 +2204,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
66085
66086 /* Put symbol section at end of init part of module. */
66087 symsect->sh_flags |= SHF_ALLOC;
66088 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
66089 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
66090 info->index.sym) | INIT_OFFSET_MASK;
66091 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
66092
66093 @@ -2204,19 +2221,19 @@ static void layout_symtab(struct module *mod, struct load_info *info)
66094 }
66095
66096 /* Append room for core symbols at end of core part. */
66097 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
66098 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
66099 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
66100 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
66101
66102 /* Put string table section at end of init part of module. */
66103 strsect->sh_flags |= SHF_ALLOC;
66104 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
66105 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
66106 info->index.str) | INIT_OFFSET_MASK;
66107 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
66108
66109 /* Append room for core symbols' strings at end of core part. */
66110 - info->stroffs = mod->core_size;
66111 + info->stroffs = mod->core_size_rx;
66112 __set_bit(0, info->strmap);
66113 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
66114 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
66115 }
66116
66117 static void add_kallsyms(struct module *mod, const struct load_info *info)
66118 @@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
66119 /* Make sure we get permanent strtab: don't use info->strtab. */
66120 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
66121
66122 + pax_open_kernel();
66123 +
66124 /* Set types up while we still have access to sections. */
66125 for (i = 0; i < mod->num_symtab; i++)
66126 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
66127
66128 - mod->core_symtab = dst = mod->module_core + info->symoffs;
66129 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
66130 src = mod->symtab;
66131 *dst = *src;
66132 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
66133 @@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
66134 }
66135 mod->core_num_syms = ndst;
66136
66137 - mod->core_strtab = s = mod->module_core + info->stroffs;
66138 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
66139 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
66140 if (test_bit(i, info->strmap))
66141 *++s = mod->strtab[i];
66142 +
66143 + pax_close_kernel();
66144 }
66145 #else
66146 static inline void layout_symtab(struct module *mod, struct load_info *info)
66147 @@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long size)
66148 return size == 0 ? NULL : vmalloc_exec(size);
66149 }
66150
66151 -static void *module_alloc_update_bounds(unsigned long size)
66152 +static void *module_alloc_update_bounds_rw(unsigned long size)
66153 {
66154 void *ret = module_alloc(size);
66155
66156 if (ret) {
66157 mutex_lock(&module_mutex);
66158 /* Update module bounds. */
66159 - if ((unsigned long)ret < module_addr_min)
66160 - module_addr_min = (unsigned long)ret;
66161 - if ((unsigned long)ret + size > module_addr_max)
66162 - module_addr_max = (unsigned long)ret + size;
66163 + if ((unsigned long)ret < module_addr_min_rw)
66164 + module_addr_min_rw = (unsigned long)ret;
66165 + if ((unsigned long)ret + size > module_addr_max_rw)
66166 + module_addr_max_rw = (unsigned long)ret + size;
66167 + mutex_unlock(&module_mutex);
66168 + }
66169 + return ret;
66170 +}
66171 +
66172 +static void *module_alloc_update_bounds_rx(unsigned long size)
66173 +{
66174 + void *ret = module_alloc_exec(size);
66175 +
66176 + if (ret) {
66177 + mutex_lock(&module_mutex);
66178 + /* Update module bounds. */
66179 + if ((unsigned long)ret < module_addr_min_rx)
66180 + module_addr_min_rx = (unsigned long)ret;
66181 + if ((unsigned long)ret + size > module_addr_max_rx)
66182 + module_addr_max_rx = (unsigned long)ret + size;
66183 mutex_unlock(&module_mutex);
66184 }
66185 return ret;
66186 @@ -2474,8 +2511,14 @@ static struct module *setup_load_info(struct load_info *info)
66187 static int check_modinfo(struct module *mod, struct load_info *info)
66188 {
66189 const char *modmagic = get_modinfo(info, "vermagic");
66190 + const char *license = get_modinfo(info, "license");
66191 int err;
66192
66193 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
66194 + if (!license || !license_is_gpl_compatible(license))
66195 + return -ENOEXEC;
66196 +#endif
66197 +
66198 /* This is allowed: modprobe --force will invalidate it. */
66199 if (!modmagic) {
66200 err = try_to_force_load(mod, "bad vermagic");
66201 @@ -2498,7 +2541,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
66202 }
66203
66204 /* Set up license info based on the info section */
66205 - set_license(mod, get_modinfo(info, "license"));
66206 + set_license(mod, license);
66207
66208 return 0;
66209 }
66210 @@ -2592,7 +2635,7 @@ static int move_module(struct module *mod, struct load_info *info)
66211 void *ptr;
66212
66213 /* Do the allocs. */
66214 - ptr = module_alloc_update_bounds(mod->core_size);
66215 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
66216 /*
66217 * The pointer to this block is stored in the module structure
66218 * which is inside the block. Just mark it as not being a
66219 @@ -2602,23 +2645,50 @@ static int move_module(struct module *mod, struct load_info *info)
66220 if (!ptr)
66221 return -ENOMEM;
66222
66223 - memset(ptr, 0, mod->core_size);
66224 - mod->module_core = ptr;
66225 + memset(ptr, 0, mod->core_size_rw);
66226 + mod->module_core_rw = ptr;
66227
66228 - ptr = module_alloc_update_bounds(mod->init_size);
66229 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
66230 /*
66231 * The pointer to this block is stored in the module structure
66232 * which is inside the block. This block doesn't need to be
66233 * scanned as it contains data and code that will be freed
66234 * after the module is initialized.
66235 */
66236 - kmemleak_ignore(ptr);
66237 - if (!ptr && mod->init_size) {
66238 - module_free(mod, mod->module_core);
66239 + kmemleak_not_leak(ptr);
66240 + if (!ptr && mod->init_size_rw) {
66241 + module_free(mod, mod->module_core_rw);
66242 return -ENOMEM;
66243 }
66244 - memset(ptr, 0, mod->init_size);
66245 - mod->module_init = ptr;
66246 + memset(ptr, 0, mod->init_size_rw);
66247 + mod->module_init_rw = ptr;
66248 +
66249 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
66250 + kmemleak_not_leak(ptr);
66251 + if (!ptr) {
66252 + module_free(mod, mod->module_init_rw);
66253 + module_free(mod, mod->module_core_rw);
66254 + return -ENOMEM;
66255 + }
66256 +
66257 + pax_open_kernel();
66258 + memset(ptr, 0, mod->core_size_rx);
66259 + pax_close_kernel();
66260 + mod->module_core_rx = ptr;
66261 +
66262 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
66263 + kmemleak_not_leak(ptr);
66264 + if (!ptr && mod->init_size_rx) {
66265 + module_free_exec(mod, mod->module_core_rx);
66266 + module_free(mod, mod->module_init_rw);
66267 + module_free(mod, mod->module_core_rw);
66268 + return -ENOMEM;
66269 + }
66270 +
66271 + pax_open_kernel();
66272 + memset(ptr, 0, mod->init_size_rx);
66273 + pax_close_kernel();
66274 + mod->module_init_rx = ptr;
66275
66276 /* Transfer each section which specifies SHF_ALLOC */
66277 DEBUGP("final section addresses:\n");
66278 @@ -2629,16 +2699,45 @@ static int move_module(struct module *mod, struct load_info *info)
66279 if (!(shdr->sh_flags & SHF_ALLOC))
66280 continue;
66281
66282 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
66283 - dest = mod->module_init
66284 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66285 - else
66286 - dest = mod->module_core + shdr->sh_entsize;
66287 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
66288 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66289 + dest = mod->module_init_rw
66290 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66291 + else
66292 + dest = mod->module_init_rx
66293 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66294 + } else {
66295 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66296 + dest = mod->module_core_rw + shdr->sh_entsize;
66297 + else
66298 + dest = mod->module_core_rx + shdr->sh_entsize;
66299 + }
66300 +
66301 + if (shdr->sh_type != SHT_NOBITS) {
66302 +
66303 +#ifdef CONFIG_PAX_KERNEXEC
66304 +#ifdef CONFIG_X86_64
66305 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
66306 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
66307 +#endif
66308 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
66309 + pax_open_kernel();
66310 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
66311 + pax_close_kernel();
66312 + } else
66313 +#endif
66314
66315 - if (shdr->sh_type != SHT_NOBITS)
66316 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
66317 + }
66318 /* Update sh_addr to point to copy in image. */
66319 - shdr->sh_addr = (unsigned long)dest;
66320 +
66321 +#ifdef CONFIG_PAX_KERNEXEC
66322 + if (shdr->sh_flags & SHF_EXECINSTR)
66323 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
66324 + else
66325 +#endif
66326 +
66327 + shdr->sh_addr = (unsigned long)dest;
66328 DEBUGP("\t0x%lx %s\n",
66329 shdr->sh_addr, info->secstrings + shdr->sh_name);
66330 }
66331 @@ -2689,12 +2788,12 @@ static void flush_module_icache(const struct module *mod)
66332 * Do it before processing of module parameters, so the module
66333 * can provide parameter accessor functions of its own.
66334 */
66335 - if (mod->module_init)
66336 - flush_icache_range((unsigned long)mod->module_init,
66337 - (unsigned long)mod->module_init
66338 - + mod->init_size);
66339 - flush_icache_range((unsigned long)mod->module_core,
66340 - (unsigned long)mod->module_core + mod->core_size);
66341 + if (mod->module_init_rx)
66342 + flush_icache_range((unsigned long)mod->module_init_rx,
66343 + (unsigned long)mod->module_init_rx
66344 + + mod->init_size_rx);
66345 + flush_icache_range((unsigned long)mod->module_core_rx,
66346 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
66347
66348 set_fs(old_fs);
66349 }
66350 @@ -2774,8 +2873,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
66351 {
66352 kfree(info->strmap);
66353 percpu_modfree(mod);
66354 - module_free(mod, mod->module_init);
66355 - module_free(mod, mod->module_core);
66356 + module_free_exec(mod, mod->module_init_rx);
66357 + module_free_exec(mod, mod->module_core_rx);
66358 + module_free(mod, mod->module_init_rw);
66359 + module_free(mod, mod->module_core_rw);
66360 }
66361
66362 int __weak module_finalize(const Elf_Ehdr *hdr,
66363 @@ -2839,9 +2940,38 @@ static struct module *load_module(void __user *umod,
66364 if (err)
66365 goto free_unload;
66366
66367 + /* Now copy in args */
66368 + mod->args = strndup_user(uargs, ~0UL >> 1);
66369 + if (IS_ERR(mod->args)) {
66370 + err = PTR_ERR(mod->args);
66371 + goto free_unload;
66372 + }
66373 +
66374 /* Set up MODINFO_ATTR fields */
66375 setup_modinfo(mod, &info);
66376
66377 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66378 + {
66379 + char *p, *p2;
66380 +
66381 + if (strstr(mod->args, "grsec_modharden_netdev")) {
66382 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
66383 + err = -EPERM;
66384 + goto free_modinfo;
66385 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
66386 + p += strlen("grsec_modharden_normal");
66387 + p2 = strstr(p, "_");
66388 + if (p2) {
66389 + *p2 = '\0';
66390 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
66391 + *p2 = '_';
66392 + }
66393 + err = -EPERM;
66394 + goto free_modinfo;
66395 + }
66396 + }
66397 +#endif
66398 +
66399 /* Fix up syms, so that st_value is a pointer to location. */
66400 err = simplify_symbols(mod, &info);
66401 if (err < 0)
66402 @@ -2857,13 +2987,6 @@ static struct module *load_module(void __user *umod,
66403
66404 flush_module_icache(mod);
66405
66406 - /* Now copy in args */
66407 - mod->args = strndup_user(uargs, ~0UL >> 1);
66408 - if (IS_ERR(mod->args)) {
66409 - err = PTR_ERR(mod->args);
66410 - goto free_arch_cleanup;
66411 - }
66412 -
66413 /* Mark state as coming so strong_try_module_get() ignores us. */
66414 mod->state = MODULE_STATE_COMING;
66415
66416 @@ -2921,11 +3044,10 @@ static struct module *load_module(void __user *umod,
66417 unlock:
66418 mutex_unlock(&module_mutex);
66419 synchronize_sched();
66420 - kfree(mod->args);
66421 - free_arch_cleanup:
66422 module_arch_cleanup(mod);
66423 free_modinfo:
66424 free_modinfo(mod);
66425 + kfree(mod->args);
66426 free_unload:
66427 module_unload_free(mod);
66428 free_module:
66429 @@ -2966,16 +3088,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
66430 MODULE_STATE_COMING, mod);
66431
66432 /* Set RO and NX regions for core */
66433 - set_section_ro_nx(mod->module_core,
66434 - mod->core_text_size,
66435 - mod->core_ro_size,
66436 - mod->core_size);
66437 + set_section_ro_nx(mod->module_core_rx,
66438 + mod->core_size_rx,
66439 + mod->core_size_rx,
66440 + mod->core_size_rx);
66441
66442 /* Set RO and NX regions for init */
66443 - set_section_ro_nx(mod->module_init,
66444 - mod->init_text_size,
66445 - mod->init_ro_size,
66446 - mod->init_size);
66447 + set_section_ro_nx(mod->module_init_rx,
66448 + mod->init_size_rx,
66449 + mod->init_size_rx,
66450 + mod->init_size_rx);
66451
66452 do_mod_ctors(mod);
66453 /* Start the module */
66454 @@ -3021,11 +3143,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
66455 mod->strtab = mod->core_strtab;
66456 #endif
66457 unset_module_init_ro_nx(mod);
66458 - module_free(mod, mod->module_init);
66459 - mod->module_init = NULL;
66460 - mod->init_size = 0;
66461 - mod->init_ro_size = 0;
66462 - mod->init_text_size = 0;
66463 + module_free(mod, mod->module_init_rw);
66464 + module_free_exec(mod, mod->module_init_rx);
66465 + mod->module_init_rw = NULL;
66466 + mod->module_init_rx = NULL;
66467 + mod->init_size_rw = 0;
66468 + mod->init_size_rx = 0;
66469 mutex_unlock(&module_mutex);
66470
66471 return 0;
66472 @@ -3056,10 +3179,16 @@ static const char *get_ksymbol(struct module *mod,
66473 unsigned long nextval;
66474
66475 /* At worse, next value is at end of module */
66476 - if (within_module_init(addr, mod))
66477 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
66478 + if (within_module_init_rx(addr, mod))
66479 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
66480 + else if (within_module_init_rw(addr, mod))
66481 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
66482 + else if (within_module_core_rx(addr, mod))
66483 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
66484 + else if (within_module_core_rw(addr, mod))
66485 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
66486 else
66487 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
66488 + return NULL;
66489
66490 /* Scan for closest preceding symbol, and next symbol. (ELF
66491 starts real symbols at 1). */
66492 @@ -3307,7 +3436,7 @@ static int m_show(struct seq_file *m, void *p)
66493 char buf[8];
66494
66495 seq_printf(m, "%s %u",
66496 - mod->name, mod->init_size + mod->core_size);
66497 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
66498 print_unload_info(m, mod);
66499
66500 /* Informative for users. */
66501 @@ -3316,7 +3445,7 @@ static int m_show(struct seq_file *m, void *p)
66502 mod->state == MODULE_STATE_COMING ? "Loading":
66503 "Live");
66504 /* Used by oprofile and other similar tools. */
66505 - seq_printf(m, " 0x%pK", mod->module_core);
66506 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
66507
66508 /* Taints info */
66509 if (mod->taints)
66510 @@ -3352,7 +3481,17 @@ static const struct file_operations proc_modules_operations = {
66511
66512 static int __init proc_modules_init(void)
66513 {
66514 +#ifndef CONFIG_GRKERNSEC_HIDESYM
66515 +#ifdef CONFIG_GRKERNSEC_PROC_USER
66516 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
66517 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66518 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
66519 +#else
66520 proc_create("modules", 0, NULL, &proc_modules_operations);
66521 +#endif
66522 +#else
66523 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
66524 +#endif
66525 return 0;
66526 }
66527 module_init(proc_modules_init);
66528 @@ -3411,12 +3550,12 @@ struct module *__module_address(unsigned long addr)
66529 {
66530 struct module *mod;
66531
66532 - if (addr < module_addr_min || addr > module_addr_max)
66533 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
66534 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
66535 return NULL;
66536
66537 list_for_each_entry_rcu(mod, &modules, list)
66538 - if (within_module_core(addr, mod)
66539 - || within_module_init(addr, mod))
66540 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
66541 return mod;
66542 return NULL;
66543 }
66544 @@ -3450,11 +3589,20 @@ bool is_module_text_address(unsigned long addr)
66545 */
66546 struct module *__module_text_address(unsigned long addr)
66547 {
66548 - struct module *mod = __module_address(addr);
66549 + struct module *mod;
66550 +
66551 +#ifdef CONFIG_X86_32
66552 + addr = ktla_ktva(addr);
66553 +#endif
66554 +
66555 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
66556 + return NULL;
66557 +
66558 + mod = __module_address(addr);
66559 +
66560 if (mod) {
66561 /* Make sure it's within the text section. */
66562 - if (!within(addr, mod->module_init, mod->init_text_size)
66563 - && !within(addr, mod->module_core, mod->core_text_size))
66564 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
66565 mod = NULL;
66566 }
66567 return mod;
66568 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
66569 index 7e3443f..b2a1e6b 100644
66570 --- a/kernel/mutex-debug.c
66571 +++ b/kernel/mutex-debug.c
66572 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
66573 }
66574
66575 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66576 - struct thread_info *ti)
66577 + struct task_struct *task)
66578 {
66579 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
66580
66581 /* Mark the current thread as blocked on the lock: */
66582 - ti->task->blocked_on = waiter;
66583 + task->blocked_on = waiter;
66584 }
66585
66586 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66587 - struct thread_info *ti)
66588 + struct task_struct *task)
66589 {
66590 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
66591 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
66592 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
66593 - ti->task->blocked_on = NULL;
66594 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
66595 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
66596 + task->blocked_on = NULL;
66597
66598 list_del_init(&waiter->list);
66599 waiter->task = NULL;
66600 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
66601 index 0799fd3..d06ae3b 100644
66602 --- a/kernel/mutex-debug.h
66603 +++ b/kernel/mutex-debug.h
66604 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
66605 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
66606 extern void debug_mutex_add_waiter(struct mutex *lock,
66607 struct mutex_waiter *waiter,
66608 - struct thread_info *ti);
66609 + struct task_struct *task);
66610 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66611 - struct thread_info *ti);
66612 + struct task_struct *task);
66613 extern void debug_mutex_unlock(struct mutex *lock);
66614 extern void debug_mutex_init(struct mutex *lock, const char *name,
66615 struct lock_class_key *key);
66616 diff --git a/kernel/mutex.c b/kernel/mutex.c
66617 index 89096dd..f91ebc5 100644
66618 --- a/kernel/mutex.c
66619 +++ b/kernel/mutex.c
66620 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66621 spin_lock_mutex(&lock->wait_lock, flags);
66622
66623 debug_mutex_lock_common(lock, &waiter);
66624 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
66625 + debug_mutex_add_waiter(lock, &waiter, task);
66626
66627 /* add waiting tasks to the end of the waitqueue (FIFO): */
66628 list_add_tail(&waiter.list, &lock->wait_list);
66629 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66630 * TASK_UNINTERRUPTIBLE case.)
66631 */
66632 if (unlikely(signal_pending_state(state, task))) {
66633 - mutex_remove_waiter(lock, &waiter,
66634 - task_thread_info(task));
66635 + mutex_remove_waiter(lock, &waiter, task);
66636 mutex_release(&lock->dep_map, 1, ip);
66637 spin_unlock_mutex(&lock->wait_lock, flags);
66638
66639 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66640 done:
66641 lock_acquired(&lock->dep_map, ip);
66642 /* got the lock - rejoice! */
66643 - mutex_remove_waiter(lock, &waiter, current_thread_info());
66644 + mutex_remove_waiter(lock, &waiter, task);
66645 mutex_set_owner(lock);
66646
66647 /* set it to 0 if there are no waiters left: */
66648 diff --git a/kernel/padata.c b/kernel/padata.c
66649 index b452599..5d68f4e 100644
66650 --- a/kernel/padata.c
66651 +++ b/kernel/padata.c
66652 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
66653 padata->pd = pd;
66654 padata->cb_cpu = cb_cpu;
66655
66656 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
66657 - atomic_set(&pd->seq_nr, -1);
66658 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
66659 + atomic_set_unchecked(&pd->seq_nr, -1);
66660
66661 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
66662 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
66663
66664 target_cpu = padata_cpu_hash(padata);
66665 queue = per_cpu_ptr(pd->pqueue, target_cpu);
66666 @@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
66667 padata_init_pqueues(pd);
66668 padata_init_squeues(pd);
66669 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
66670 - atomic_set(&pd->seq_nr, -1);
66671 + atomic_set_unchecked(&pd->seq_nr, -1);
66672 atomic_set(&pd->reorder_objects, 0);
66673 atomic_set(&pd->refcnt, 0);
66674 pd->pinst = pinst;
66675 diff --git a/kernel/panic.c b/kernel/panic.c
66676 index 3458469..342c500 100644
66677 --- a/kernel/panic.c
66678 +++ b/kernel/panic.c
66679 @@ -78,7 +78,11 @@ NORET_TYPE void panic(const char * fmt, ...)
66680 va_end(args);
66681 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
66682 #ifdef CONFIG_DEBUG_BUGVERBOSE
66683 - dump_stack();
66684 + /*
66685 + * Avoid nested stack-dumping if a panic occurs during oops processing
66686 + */
66687 + if (!oops_in_progress)
66688 + dump_stack();
66689 #endif
66690
66691 /*
66692 @@ -382,7 +386,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
66693 const char *board;
66694
66695 printk(KERN_WARNING "------------[ cut here ]------------\n");
66696 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
66697 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
66698 board = dmi_get_system_info(DMI_PRODUCT_NAME);
66699 if (board)
66700 printk(KERN_WARNING "Hardware name: %s\n", board);
66701 @@ -437,7 +441,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
66702 */
66703 void __stack_chk_fail(void)
66704 {
66705 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
66706 + dump_stack();
66707 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
66708 __builtin_return_address(0));
66709 }
66710 EXPORT_SYMBOL(__stack_chk_fail);
66711 diff --git a/kernel/pid.c b/kernel/pid.c
66712 index fa5f722..0c93e57 100644
66713 --- a/kernel/pid.c
66714 +++ b/kernel/pid.c
66715 @@ -33,6 +33,7 @@
66716 #include <linux/rculist.h>
66717 #include <linux/bootmem.h>
66718 #include <linux/hash.h>
66719 +#include <linux/security.h>
66720 #include <linux/pid_namespace.h>
66721 #include <linux/init_task.h>
66722 #include <linux/syscalls.h>
66723 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
66724
66725 int pid_max = PID_MAX_DEFAULT;
66726
66727 -#define RESERVED_PIDS 300
66728 +#define RESERVED_PIDS 500
66729
66730 int pid_max_min = RESERVED_PIDS + 1;
66731 int pid_max_max = PID_MAX_LIMIT;
66732 @@ -418,10 +419,18 @@ EXPORT_SYMBOL(pid_task);
66733 */
66734 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
66735 {
66736 + struct task_struct *task;
66737 +
66738 rcu_lockdep_assert(rcu_read_lock_held(),
66739 "find_task_by_pid_ns() needs rcu_read_lock()"
66740 " protection");
66741 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
66742 +
66743 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
66744 +
66745 + if (gr_pid_is_chrooted(task))
66746 + return NULL;
66747 +
66748 + return task;
66749 }
66750
66751 struct task_struct *find_task_by_vpid(pid_t vnr)
66752 @@ -429,6 +438,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
66753 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
66754 }
66755
66756 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
66757 +{
66758 + rcu_lockdep_assert(rcu_read_lock_held(),
66759 + "find_task_by_pid_ns() needs rcu_read_lock()"
66760 + " protection");
66761 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
66762 +}
66763 +
66764 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
66765 {
66766 struct pid *pid;
66767 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
66768 index e7cb76d..75eceb3 100644
66769 --- a/kernel/posix-cpu-timers.c
66770 +++ b/kernel/posix-cpu-timers.c
66771 @@ -6,6 +6,7 @@
66772 #include <linux/posix-timers.h>
66773 #include <linux/errno.h>
66774 #include <linux/math64.h>
66775 +#include <linux/security.h>
66776 #include <asm/uaccess.h>
66777 #include <linux/kernel_stat.h>
66778 #include <trace/events/timer.h>
66779 @@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
66780
66781 static __init int init_posix_cpu_timers(void)
66782 {
66783 - struct k_clock process = {
66784 + static struct k_clock process = {
66785 .clock_getres = process_cpu_clock_getres,
66786 .clock_get = process_cpu_clock_get,
66787 .timer_create = process_cpu_timer_create,
66788 .nsleep = process_cpu_nsleep,
66789 .nsleep_restart = process_cpu_nsleep_restart,
66790 };
66791 - struct k_clock thread = {
66792 + static struct k_clock thread = {
66793 .clock_getres = thread_cpu_clock_getres,
66794 .clock_get = thread_cpu_clock_get,
66795 .timer_create = thread_cpu_timer_create,
66796 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
66797 index 69185ae..cc2847a 100644
66798 --- a/kernel/posix-timers.c
66799 +++ b/kernel/posix-timers.c
66800 @@ -43,6 +43,7 @@
66801 #include <linux/idr.h>
66802 #include <linux/posix-clock.h>
66803 #include <linux/posix-timers.h>
66804 +#include <linux/grsecurity.h>
66805 #include <linux/syscalls.h>
66806 #include <linux/wait.h>
66807 #include <linux/workqueue.h>
66808 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
66809 * which we beg off on and pass to do_sys_settimeofday().
66810 */
66811
66812 -static struct k_clock posix_clocks[MAX_CLOCKS];
66813 +static struct k_clock *posix_clocks[MAX_CLOCKS];
66814
66815 /*
66816 * These ones are defined below.
66817 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
66818 */
66819 static __init int init_posix_timers(void)
66820 {
66821 - struct k_clock clock_realtime = {
66822 + static struct k_clock clock_realtime = {
66823 .clock_getres = hrtimer_get_res,
66824 .clock_get = posix_clock_realtime_get,
66825 .clock_set = posix_clock_realtime_set,
66826 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
66827 .timer_get = common_timer_get,
66828 .timer_del = common_timer_del,
66829 };
66830 - struct k_clock clock_monotonic = {
66831 + static struct k_clock clock_monotonic = {
66832 .clock_getres = hrtimer_get_res,
66833 .clock_get = posix_ktime_get_ts,
66834 .nsleep = common_nsleep,
66835 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
66836 .timer_get = common_timer_get,
66837 .timer_del = common_timer_del,
66838 };
66839 - struct k_clock clock_monotonic_raw = {
66840 + static struct k_clock clock_monotonic_raw = {
66841 .clock_getres = hrtimer_get_res,
66842 .clock_get = posix_get_monotonic_raw,
66843 };
66844 - struct k_clock clock_realtime_coarse = {
66845 + static struct k_clock clock_realtime_coarse = {
66846 .clock_getres = posix_get_coarse_res,
66847 .clock_get = posix_get_realtime_coarse,
66848 };
66849 - struct k_clock clock_monotonic_coarse = {
66850 + static struct k_clock clock_monotonic_coarse = {
66851 .clock_getres = posix_get_coarse_res,
66852 .clock_get = posix_get_monotonic_coarse,
66853 };
66854 - struct k_clock clock_boottime = {
66855 + static struct k_clock clock_boottime = {
66856 .clock_getres = hrtimer_get_res,
66857 .clock_get = posix_get_boottime,
66858 .nsleep = common_nsleep,
66859 @@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
66860 return;
66861 }
66862
66863 - posix_clocks[clock_id] = *new_clock;
66864 + posix_clocks[clock_id] = new_clock;
66865 }
66866 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
66867
66868 @@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
66869 return (id & CLOCKFD_MASK) == CLOCKFD ?
66870 &clock_posix_dynamic : &clock_posix_cpu;
66871
66872 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
66873 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
66874 return NULL;
66875 - return &posix_clocks[id];
66876 + return posix_clocks[id];
66877 }
66878
66879 static int common_timer_create(struct k_itimer *new_timer)
66880 @@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
66881 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
66882 return -EFAULT;
66883
66884 + /* only the CLOCK_REALTIME clock can be set, all other clocks
66885 + have their clock_set fptr set to a nosettime dummy function
66886 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
66887 + call common_clock_set, which calls do_sys_settimeofday, which
66888 + we hook
66889 + */
66890 +
66891 return kc->clock_set(which_clock, &new_tp);
66892 }
66893
66894 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
66895 index d523593..68197a4 100644
66896 --- a/kernel/power/poweroff.c
66897 +++ b/kernel/power/poweroff.c
66898 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
66899 .enable_mask = SYSRQ_ENABLE_BOOT,
66900 };
66901
66902 -static int pm_sysrq_init(void)
66903 +static int __init pm_sysrq_init(void)
66904 {
66905 register_sysrq_key('o', &sysrq_poweroff_op);
66906 return 0;
66907 diff --git a/kernel/power/process.c b/kernel/power/process.c
66908 index 3d4b954..11af930 100644
66909 --- a/kernel/power/process.c
66910 +++ b/kernel/power/process.c
66911 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only)
66912 u64 elapsed_csecs64;
66913 unsigned int elapsed_csecs;
66914 bool wakeup = false;
66915 + bool timedout = false;
66916
66917 do_gettimeofday(&start);
66918
66919 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_only)
66920
66921 while (true) {
66922 todo = 0;
66923 + if (time_after(jiffies, end_time))
66924 + timedout = true;
66925 read_lock(&tasklist_lock);
66926 do_each_thread(g, p) {
66927 if (frozen(p) || !freezable(p))
66928 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_only)
66929 * try_to_stop() after schedule() in ptrace/signal
66930 * stop sees TIF_FREEZE.
66931 */
66932 - if (!task_is_stopped_or_traced(p) &&
66933 - !freezer_should_skip(p))
66934 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
66935 todo++;
66936 + if (timedout) {
66937 + printk(KERN_ERR "Task refusing to freeze:\n");
66938 + sched_show_task(p);
66939 + }
66940 + }
66941 } while_each_thread(g, p);
66942 read_unlock(&tasklist_lock);
66943
66944 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_only)
66945 todo += wq_busy;
66946 }
66947
66948 - if (!todo || time_after(jiffies, end_time))
66949 + if (!todo || timedout)
66950 break;
66951
66952 if (pm_wakeup_pending()) {
66953 diff --git a/kernel/printk.c b/kernel/printk.c
66954 index 7982a0a..2095fdc 100644
66955 --- a/kernel/printk.c
66956 +++ b/kernel/printk.c
66957 @@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
66958 if (from_file && type != SYSLOG_ACTION_OPEN)
66959 return 0;
66960
66961 +#ifdef CONFIG_GRKERNSEC_DMESG
66962 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
66963 + return -EPERM;
66964 +#endif
66965 +
66966 if (syslog_action_restricted(type)) {
66967 if (capable(CAP_SYSLOG))
66968 return 0;
66969 diff --git a/kernel/profile.c b/kernel/profile.c
66970 index 76b8e77..a2930e8 100644
66971 --- a/kernel/profile.c
66972 +++ b/kernel/profile.c
66973 @@ -39,7 +39,7 @@ struct profile_hit {
66974 /* Oprofile timer tick hook */
66975 static int (*timer_hook)(struct pt_regs *) __read_mostly;
66976
66977 -static atomic_t *prof_buffer;
66978 +static atomic_unchecked_t *prof_buffer;
66979 static unsigned long prof_len, prof_shift;
66980
66981 int prof_on __read_mostly;
66982 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
66983 hits[i].pc = 0;
66984 continue;
66985 }
66986 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
66987 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
66988 hits[i].hits = hits[i].pc = 0;
66989 }
66990 }
66991 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
66992 * Add the current hit(s) and flush the write-queue out
66993 * to the global buffer:
66994 */
66995 - atomic_add(nr_hits, &prof_buffer[pc]);
66996 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
66997 for (i = 0; i < NR_PROFILE_HIT; ++i) {
66998 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
66999 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67000 hits[i].pc = hits[i].hits = 0;
67001 }
67002 out:
67003 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67004 {
67005 unsigned long pc;
67006 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
67007 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67008 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67009 }
67010 #endif /* !CONFIG_SMP */
67011
67012 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
67013 return -EFAULT;
67014 buf++; p++; count--; read++;
67015 }
67016 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
67017 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
67018 if (copy_to_user(buf, (void *)pnt, count))
67019 return -EFAULT;
67020 read += count;
67021 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
67022 }
67023 #endif
67024 profile_discard_flip_buffers();
67025 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
67026 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
67027 return count;
67028 }
67029
67030 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
67031 index 78ab24a..332c915 100644
67032 --- a/kernel/ptrace.c
67033 +++ b/kernel/ptrace.c
67034 @@ -172,7 +172,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
67035 return ret;
67036 }
67037
67038 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
67039 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
67040 + unsigned int log)
67041 {
67042 const struct cred *cred = current_cred(), *tcred;
67043
67044 @@ -198,7 +199,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
67045 cred->gid == tcred->sgid &&
67046 cred->gid == tcred->gid))
67047 goto ok;
67048 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
67049 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
67050 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
67051 goto ok;
67052 rcu_read_unlock();
67053 return -EPERM;
67054 @@ -207,7 +209,9 @@ ok:
67055 smp_rmb();
67056 if (task->mm)
67057 dumpable = get_dumpable(task->mm);
67058 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
67059 + if (!dumpable &&
67060 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
67061 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
67062 return -EPERM;
67063
67064 return security_ptrace_access_check(task, mode);
67065 @@ -217,7 +221,21 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
67066 {
67067 int err;
67068 task_lock(task);
67069 - err = __ptrace_may_access(task, mode);
67070 + err = __ptrace_may_access(task, mode, 0);
67071 + task_unlock(task);
67072 + return !err;
67073 +}
67074 +
67075 +bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode)
67076 +{
67077 + return __ptrace_may_access(task, mode, 0);
67078 +}
67079 +
67080 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
67081 +{
67082 + int err;
67083 + task_lock(task);
67084 + err = __ptrace_may_access(task, mode, 1);
67085 task_unlock(task);
67086 return !err;
67087 }
67088 @@ -262,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
67089 goto out;
67090
67091 task_lock(task);
67092 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
67093 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
67094 task_unlock(task);
67095 if (retval)
67096 goto unlock_creds;
67097 @@ -277,7 +295,7 @@ static int ptrace_attach(struct task_struct *task, long request,
67098 task->ptrace = PT_PTRACED;
67099 if (seize)
67100 task->ptrace |= PT_SEIZED;
67101 - if (task_ns_capable(task, CAP_SYS_PTRACE))
67102 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
67103 task->ptrace |= PT_PTRACE_CAP;
67104
67105 __ptrace_link(task, current);
67106 @@ -483,7 +501,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
67107 break;
67108 return -EIO;
67109 }
67110 - if (copy_to_user(dst, buf, retval))
67111 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
67112 return -EFAULT;
67113 copied += retval;
67114 src += retval;
67115 @@ -680,7 +698,7 @@ int ptrace_request(struct task_struct *child, long request,
67116 bool seized = child->ptrace & PT_SEIZED;
67117 int ret = -EIO;
67118 siginfo_t siginfo, *si;
67119 - void __user *datavp = (void __user *) data;
67120 + void __user *datavp = (__force void __user *) data;
67121 unsigned long __user *datalp = datavp;
67122 unsigned long flags;
67123
67124 @@ -882,14 +900,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
67125 goto out;
67126 }
67127
67128 + if (gr_handle_ptrace(child, request)) {
67129 + ret = -EPERM;
67130 + goto out_put_task_struct;
67131 + }
67132 +
67133 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67134 ret = ptrace_attach(child, request, data);
67135 /*
67136 * Some architectures need to do book-keeping after
67137 * a ptrace attach.
67138 */
67139 - if (!ret)
67140 + if (!ret) {
67141 arch_ptrace_attach(child);
67142 + gr_audit_ptrace(child);
67143 + }
67144 goto out_put_task_struct;
67145 }
67146
67147 @@ -915,7 +940,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
67148 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
67149 if (copied != sizeof(tmp))
67150 return -EIO;
67151 - return put_user(tmp, (unsigned long __user *)data);
67152 + return put_user(tmp, (__force unsigned long __user *)data);
67153 }
67154
67155 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
67156 @@ -1025,14 +1050,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
67157 goto out;
67158 }
67159
67160 + if (gr_handle_ptrace(child, request)) {
67161 + ret = -EPERM;
67162 + goto out_put_task_struct;
67163 + }
67164 +
67165 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67166 ret = ptrace_attach(child, request, data);
67167 /*
67168 * Some architectures need to do book-keeping after
67169 * a ptrace attach.
67170 */
67171 - if (!ret)
67172 + if (!ret) {
67173 arch_ptrace_attach(child);
67174 + gr_audit_ptrace(child);
67175 + }
67176 goto out_put_task_struct;
67177 }
67178
67179 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
67180 index 764825c..3aa6ac4 100644
67181 --- a/kernel/rcutorture.c
67182 +++ b/kernel/rcutorture.c
67183 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
67184 { 0 };
67185 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
67186 { 0 };
67187 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
67188 -static atomic_t n_rcu_torture_alloc;
67189 -static atomic_t n_rcu_torture_alloc_fail;
67190 -static atomic_t n_rcu_torture_free;
67191 -static atomic_t n_rcu_torture_mberror;
67192 -static atomic_t n_rcu_torture_error;
67193 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
67194 +static atomic_unchecked_t n_rcu_torture_alloc;
67195 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
67196 +static atomic_unchecked_t n_rcu_torture_free;
67197 +static atomic_unchecked_t n_rcu_torture_mberror;
67198 +static atomic_unchecked_t n_rcu_torture_error;
67199 static long n_rcu_torture_boost_ktrerror;
67200 static long n_rcu_torture_boost_rterror;
67201 static long n_rcu_torture_boost_failure;
67202 @@ -223,11 +223,11 @@ rcu_torture_alloc(void)
67203
67204 spin_lock_bh(&rcu_torture_lock);
67205 if (list_empty(&rcu_torture_freelist)) {
67206 - atomic_inc(&n_rcu_torture_alloc_fail);
67207 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
67208 spin_unlock_bh(&rcu_torture_lock);
67209 return NULL;
67210 }
67211 - atomic_inc(&n_rcu_torture_alloc);
67212 + atomic_inc_unchecked(&n_rcu_torture_alloc);
67213 p = rcu_torture_freelist.next;
67214 list_del_init(p);
67215 spin_unlock_bh(&rcu_torture_lock);
67216 @@ -240,7 +240,7 @@ rcu_torture_alloc(void)
67217 static void
67218 rcu_torture_free(struct rcu_torture *p)
67219 {
67220 - atomic_inc(&n_rcu_torture_free);
67221 + atomic_inc_unchecked(&n_rcu_torture_free);
67222 spin_lock_bh(&rcu_torture_lock);
67223 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
67224 spin_unlock_bh(&rcu_torture_lock);
67225 @@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
67226 i = rp->rtort_pipe_count;
67227 if (i > RCU_TORTURE_PIPE_LEN)
67228 i = RCU_TORTURE_PIPE_LEN;
67229 - atomic_inc(&rcu_torture_wcount[i]);
67230 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67231 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67232 rp->rtort_mbtest = 0;
67233 rcu_torture_free(rp);
67234 @@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
67235 i = rp->rtort_pipe_count;
67236 if (i > RCU_TORTURE_PIPE_LEN)
67237 i = RCU_TORTURE_PIPE_LEN;
67238 - atomic_inc(&rcu_torture_wcount[i]);
67239 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67240 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67241 rp->rtort_mbtest = 0;
67242 list_del(&rp->rtort_free);
67243 @@ -872,7 +872,7 @@ rcu_torture_writer(void *arg)
67244 i = old_rp->rtort_pipe_count;
67245 if (i > RCU_TORTURE_PIPE_LEN)
67246 i = RCU_TORTURE_PIPE_LEN;
67247 - atomic_inc(&rcu_torture_wcount[i]);
67248 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67249 old_rp->rtort_pipe_count++;
67250 cur_ops->deferred_free(old_rp);
67251 }
67252 @@ -940,7 +940,7 @@ static void rcu_torture_timer(unsigned long unused)
67253 return;
67254 }
67255 if (p->rtort_mbtest == 0)
67256 - atomic_inc(&n_rcu_torture_mberror);
67257 + atomic_inc_unchecked(&n_rcu_torture_mberror);
67258 spin_lock(&rand_lock);
67259 cur_ops->read_delay(&rand);
67260 n_rcu_torture_timers++;
67261 @@ -1001,7 +1001,7 @@ rcu_torture_reader(void *arg)
67262 continue;
67263 }
67264 if (p->rtort_mbtest == 0)
67265 - atomic_inc(&n_rcu_torture_mberror);
67266 + atomic_inc_unchecked(&n_rcu_torture_mberror);
67267 cur_ops->read_delay(&rand);
67268 preempt_disable();
67269 pipe_count = p->rtort_pipe_count;
67270 @@ -1060,16 +1060,16 @@ rcu_torture_printk(char *page)
67271 rcu_torture_current,
67272 rcu_torture_current_version,
67273 list_empty(&rcu_torture_freelist),
67274 - atomic_read(&n_rcu_torture_alloc),
67275 - atomic_read(&n_rcu_torture_alloc_fail),
67276 - atomic_read(&n_rcu_torture_free),
67277 - atomic_read(&n_rcu_torture_mberror),
67278 + atomic_read_unchecked(&n_rcu_torture_alloc),
67279 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
67280 + atomic_read_unchecked(&n_rcu_torture_free),
67281 + atomic_read_unchecked(&n_rcu_torture_mberror),
67282 n_rcu_torture_boost_ktrerror,
67283 n_rcu_torture_boost_rterror,
67284 n_rcu_torture_boost_failure,
67285 n_rcu_torture_boosts,
67286 n_rcu_torture_timers);
67287 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
67288 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
67289 n_rcu_torture_boost_ktrerror != 0 ||
67290 n_rcu_torture_boost_rterror != 0 ||
67291 n_rcu_torture_boost_failure != 0)
67292 @@ -1077,7 +1077,7 @@ rcu_torture_printk(char *page)
67293 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
67294 if (i > 1) {
67295 cnt += sprintf(&page[cnt], "!!! ");
67296 - atomic_inc(&n_rcu_torture_error);
67297 + atomic_inc_unchecked(&n_rcu_torture_error);
67298 WARN_ON_ONCE(1);
67299 }
67300 cnt += sprintf(&page[cnt], "Reader Pipe: ");
67301 @@ -1091,7 +1091,7 @@ rcu_torture_printk(char *page)
67302 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
67303 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67304 cnt += sprintf(&page[cnt], " %d",
67305 - atomic_read(&rcu_torture_wcount[i]));
67306 + atomic_read_unchecked(&rcu_torture_wcount[i]));
67307 }
67308 cnt += sprintf(&page[cnt], "\n");
67309 if (cur_ops->stats)
67310 @@ -1401,7 +1401,7 @@ rcu_torture_cleanup(void)
67311
67312 if (cur_ops->cleanup)
67313 cur_ops->cleanup();
67314 - if (atomic_read(&n_rcu_torture_error))
67315 + if (atomic_read_unchecked(&n_rcu_torture_error))
67316 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
67317 else
67318 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
67319 @@ -1465,17 +1465,17 @@ rcu_torture_init(void)
67320
67321 rcu_torture_current = NULL;
67322 rcu_torture_current_version = 0;
67323 - atomic_set(&n_rcu_torture_alloc, 0);
67324 - atomic_set(&n_rcu_torture_alloc_fail, 0);
67325 - atomic_set(&n_rcu_torture_free, 0);
67326 - atomic_set(&n_rcu_torture_mberror, 0);
67327 - atomic_set(&n_rcu_torture_error, 0);
67328 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
67329 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
67330 + atomic_set_unchecked(&n_rcu_torture_free, 0);
67331 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
67332 + atomic_set_unchecked(&n_rcu_torture_error, 0);
67333 n_rcu_torture_boost_ktrerror = 0;
67334 n_rcu_torture_boost_rterror = 0;
67335 n_rcu_torture_boost_failure = 0;
67336 n_rcu_torture_boosts = 0;
67337 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
67338 - atomic_set(&rcu_torture_wcount[i], 0);
67339 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
67340 for_each_possible_cpu(cpu) {
67341 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67342 per_cpu(rcu_torture_count, cpu)[i] = 0;
67343 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
67344 index 6b76d81..7afc1b3 100644
67345 --- a/kernel/rcutree.c
67346 +++ b/kernel/rcutree.c
67347 @@ -367,9 +367,9 @@ void rcu_enter_nohz(void)
67348 trace_rcu_dyntick("Start");
67349 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
67350 smp_mb__before_atomic_inc(); /* See above. */
67351 - atomic_inc(&rdtp->dynticks);
67352 + atomic_inc_unchecked(&rdtp->dynticks);
67353 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
67354 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
67355 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
67356 local_irq_restore(flags);
67357 }
67358
67359 @@ -391,10 +391,10 @@ void rcu_exit_nohz(void)
67360 return;
67361 }
67362 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
67363 - atomic_inc(&rdtp->dynticks);
67364 + atomic_inc_unchecked(&rdtp->dynticks);
67365 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
67366 smp_mb__after_atomic_inc(); /* See above. */
67367 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
67368 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
67369 trace_rcu_dyntick("End");
67370 local_irq_restore(flags);
67371 }
67372 @@ -411,14 +411,14 @@ void rcu_nmi_enter(void)
67373 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
67374
67375 if (rdtp->dynticks_nmi_nesting == 0 &&
67376 - (atomic_read(&rdtp->dynticks) & 0x1))
67377 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
67378 return;
67379 rdtp->dynticks_nmi_nesting++;
67380 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
67381 - atomic_inc(&rdtp->dynticks);
67382 + atomic_inc_unchecked(&rdtp->dynticks);
67383 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
67384 smp_mb__after_atomic_inc(); /* See above. */
67385 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
67386 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
67387 }
67388
67389 /**
67390 @@ -437,9 +437,9 @@ void rcu_nmi_exit(void)
67391 return;
67392 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
67393 smp_mb__before_atomic_inc(); /* See above. */
67394 - atomic_inc(&rdtp->dynticks);
67395 + atomic_inc_unchecked(&rdtp->dynticks);
67396 smp_mb__after_atomic_inc(); /* Force delay to next write. */
67397 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
67398 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
67399 }
67400
67401 /**
67402 @@ -474,7 +474,7 @@ void rcu_irq_exit(void)
67403 */
67404 static int dyntick_save_progress_counter(struct rcu_data *rdp)
67405 {
67406 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
67407 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
67408 return 0;
67409 }
67410
67411 @@ -489,7 +489,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
67412 unsigned int curr;
67413 unsigned int snap;
67414
67415 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
67416 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
67417 snap = (unsigned int)rdp->dynticks_snap;
67418
67419 /*
67420 @@ -1552,7 +1552,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
67421 /*
67422 * Do RCU core processing for the current CPU.
67423 */
67424 -static void rcu_process_callbacks(struct softirq_action *unused)
67425 +static void rcu_process_callbacks(void)
67426 {
67427 trace_rcu_utilization("Start RCU core");
67428 __rcu_process_callbacks(&rcu_sched_state,
67429 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
67430 index 849ce9e..74bc9de 100644
67431 --- a/kernel/rcutree.h
67432 +++ b/kernel/rcutree.h
67433 @@ -86,7 +86,7 @@
67434 struct rcu_dynticks {
67435 int dynticks_nesting; /* Track irq/process nesting level. */
67436 int dynticks_nmi_nesting; /* Track NMI nesting level. */
67437 - atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
67438 + atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
67439 };
67440
67441 /* RCU's kthread states for tracing. */
67442 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
67443 index 4b9b9f8..2326053 100644
67444 --- a/kernel/rcutree_plugin.h
67445 +++ b/kernel/rcutree_plugin.h
67446 @@ -842,7 +842,7 @@ void synchronize_rcu_expedited(void)
67447
67448 /* Clean up and exit. */
67449 smp_mb(); /* ensure expedited GP seen before counter increment. */
67450 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
67451 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
67452 unlock_mb_ret:
67453 mutex_unlock(&sync_rcu_preempt_exp_mutex);
67454 mb_ret:
67455 @@ -1815,8 +1815,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
67456
67457 #else /* #ifndef CONFIG_SMP */
67458
67459 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
67460 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
67461 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
67462 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
67463
67464 static int synchronize_sched_expedited_cpu_stop(void *data)
67465 {
67466 @@ -1871,7 +1871,7 @@ void synchronize_sched_expedited(void)
67467 int firstsnap, s, snap, trycount = 0;
67468
67469 /* Note that atomic_inc_return() implies full memory barrier. */
67470 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
67471 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
67472 get_online_cpus();
67473
67474 /*
67475 @@ -1892,7 +1892,7 @@ void synchronize_sched_expedited(void)
67476 }
67477
67478 /* Check to see if someone else did our work for us. */
67479 - s = atomic_read(&sync_sched_expedited_done);
67480 + s = atomic_read_unchecked(&sync_sched_expedited_done);
67481 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
67482 smp_mb(); /* ensure test happens before caller kfree */
67483 return;
67484 @@ -1907,7 +1907,7 @@ void synchronize_sched_expedited(void)
67485 * grace period works for us.
67486 */
67487 get_online_cpus();
67488 - snap = atomic_read(&sync_sched_expedited_started) - 1;
67489 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
67490 smp_mb(); /* ensure read is before try_stop_cpus(). */
67491 }
67492
67493 @@ -1918,12 +1918,12 @@ void synchronize_sched_expedited(void)
67494 * than we did beat us to the punch.
67495 */
67496 do {
67497 - s = atomic_read(&sync_sched_expedited_done);
67498 + s = atomic_read_unchecked(&sync_sched_expedited_done);
67499 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
67500 smp_mb(); /* ensure test happens before caller kfree */
67501 break;
67502 }
67503 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
67504 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
67505
67506 put_online_cpus();
67507 }
67508 @@ -1985,7 +1985,7 @@ int rcu_needs_cpu(int cpu)
67509 for_each_online_cpu(thatcpu) {
67510 if (thatcpu == cpu)
67511 continue;
67512 - snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
67513 + snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks,
67514 thatcpu).dynticks);
67515 smp_mb(); /* Order sampling of snap with end of grace period. */
67516 if ((snap & 0x1) != 0) {
67517 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
67518 index 9feffa4..54058df 100644
67519 --- a/kernel/rcutree_trace.c
67520 +++ b/kernel/rcutree_trace.c
67521 @@ -69,7 +69,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
67522 rdp->qs_pending);
67523 #ifdef CONFIG_NO_HZ
67524 seq_printf(m, " dt=%d/%d/%d df=%lu",
67525 - atomic_read(&rdp->dynticks->dynticks),
67526 + atomic_read_unchecked(&rdp->dynticks->dynticks),
67527 rdp->dynticks->dynticks_nesting,
67528 rdp->dynticks->dynticks_nmi_nesting,
67529 rdp->dynticks_fqs);
67530 @@ -143,7 +143,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
67531 rdp->qs_pending);
67532 #ifdef CONFIG_NO_HZ
67533 seq_printf(m, ",%d,%d,%d,%lu",
67534 - atomic_read(&rdp->dynticks->dynticks),
67535 + atomic_read_unchecked(&rdp->dynticks->dynticks),
67536 rdp->dynticks->dynticks_nesting,
67537 rdp->dynticks->dynticks_nmi_nesting,
67538 rdp->dynticks_fqs);
67539 diff --git a/kernel/resource.c b/kernel/resource.c
67540 index 7640b3a..5879283 100644
67541 --- a/kernel/resource.c
67542 +++ b/kernel/resource.c
67543 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
67544
67545 static int __init ioresources_init(void)
67546 {
67547 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
67548 +#ifdef CONFIG_GRKERNSEC_PROC_USER
67549 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
67550 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
67551 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67552 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
67553 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
67554 +#endif
67555 +#else
67556 proc_create("ioports", 0, NULL, &proc_ioports_operations);
67557 proc_create("iomem", 0, NULL, &proc_iomem_operations);
67558 +#endif
67559 return 0;
67560 }
67561 __initcall(ioresources_init);
67562 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
67563 index 3d9f31c..7fefc9e 100644
67564 --- a/kernel/rtmutex-tester.c
67565 +++ b/kernel/rtmutex-tester.c
67566 @@ -20,7 +20,7 @@
67567 #define MAX_RT_TEST_MUTEXES 8
67568
67569 static spinlock_t rttest_lock;
67570 -static atomic_t rttest_event;
67571 +static atomic_unchecked_t rttest_event;
67572
67573 struct test_thread_data {
67574 int opcode;
67575 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67576
67577 case RTTEST_LOCKCONT:
67578 td->mutexes[td->opdata] = 1;
67579 - td->event = atomic_add_return(1, &rttest_event);
67580 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67581 return 0;
67582
67583 case RTTEST_RESET:
67584 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67585 return 0;
67586
67587 case RTTEST_RESETEVENT:
67588 - atomic_set(&rttest_event, 0);
67589 + atomic_set_unchecked(&rttest_event, 0);
67590 return 0;
67591
67592 default:
67593 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67594 return ret;
67595
67596 td->mutexes[id] = 1;
67597 - td->event = atomic_add_return(1, &rttest_event);
67598 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67599 rt_mutex_lock(&mutexes[id]);
67600 - td->event = atomic_add_return(1, &rttest_event);
67601 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67602 td->mutexes[id] = 4;
67603 return 0;
67604
67605 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67606 return ret;
67607
67608 td->mutexes[id] = 1;
67609 - td->event = atomic_add_return(1, &rttest_event);
67610 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67611 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
67612 - td->event = atomic_add_return(1, &rttest_event);
67613 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67614 td->mutexes[id] = ret ? 0 : 4;
67615 return ret ? -EINTR : 0;
67616
67617 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67618 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
67619 return ret;
67620
67621 - td->event = atomic_add_return(1, &rttest_event);
67622 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67623 rt_mutex_unlock(&mutexes[id]);
67624 - td->event = atomic_add_return(1, &rttest_event);
67625 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67626 td->mutexes[id] = 0;
67627 return 0;
67628
67629 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67630 break;
67631
67632 td->mutexes[dat] = 2;
67633 - td->event = atomic_add_return(1, &rttest_event);
67634 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67635 break;
67636
67637 default:
67638 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67639 return;
67640
67641 td->mutexes[dat] = 3;
67642 - td->event = atomic_add_return(1, &rttest_event);
67643 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67644 break;
67645
67646 case RTTEST_LOCKNOWAIT:
67647 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67648 return;
67649
67650 td->mutexes[dat] = 1;
67651 - td->event = atomic_add_return(1, &rttest_event);
67652 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67653 return;
67654
67655 default:
67656 diff --git a/kernel/sched.c b/kernel/sched.c
67657 index d6b149c..896cbb8 100644
67658 --- a/kernel/sched.c
67659 +++ b/kernel/sched.c
67660 @@ -4389,6 +4389,19 @@ pick_next_task(struct rq *rq)
67661 BUG(); /* the idle class will always have a runnable task */
67662 }
67663
67664 +#ifdef CONFIG_GRKERNSEC_SETXID
67665 +extern void gr_delayed_cred_worker(void);
67666 +static inline void gr_cred_schedule(void)
67667 +{
67668 + if (unlikely(current->delayed_cred))
67669 + gr_delayed_cred_worker();
67670 +}
67671 +#else
67672 +static inline void gr_cred_schedule(void)
67673 +{
67674 +}
67675 +#endif
67676 +
67677 /*
67678 * __schedule() is the main scheduler function.
67679 */
67680 @@ -4408,6 +4421,8 @@ need_resched:
67681
67682 schedule_debug(prev);
67683
67684 + gr_cred_schedule();
67685 +
67686 if (sched_feat(HRTICK))
67687 hrtick_clear(rq);
67688
67689 @@ -5098,6 +5113,8 @@ int can_nice(const struct task_struct *p, const int nice)
67690 /* convert nice value [19,-20] to rlimit style value [1,40] */
67691 int nice_rlim = 20 - nice;
67692
67693 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
67694 +
67695 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
67696 capable(CAP_SYS_NICE));
67697 }
67698 @@ -5131,7 +5148,8 @@ SYSCALL_DEFINE1(nice, int, increment)
67699 if (nice > 19)
67700 nice = 19;
67701
67702 - if (increment < 0 && !can_nice(current, nice))
67703 + if (increment < 0 && (!can_nice(current, nice) ||
67704 + gr_handle_chroot_nice()))
67705 return -EPERM;
67706
67707 retval = security_task_setnice(current, nice);
67708 @@ -5288,6 +5306,7 @@ recheck:
67709 unsigned long rlim_rtprio =
67710 task_rlimit(p, RLIMIT_RTPRIO);
67711
67712 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
67713 /* can't set/change the rt policy */
67714 if (policy != p->policy && !rlim_rtprio)
67715 return -EPERM;
67716 diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
67717 index 429242f..d7cca82 100644
67718 --- a/kernel/sched_autogroup.c
67719 +++ b/kernel/sched_autogroup.c
67720 @@ -7,7 +7,7 @@
67721
67722 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
67723 static struct autogroup autogroup_default;
67724 -static atomic_t autogroup_seq_nr;
67725 +static atomic_unchecked_t autogroup_seq_nr;
67726
67727 static void __init autogroup_init(struct task_struct *init_task)
67728 {
67729 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
67730
67731 kref_init(&ag->kref);
67732 init_rwsem(&ag->lock);
67733 - ag->id = atomic_inc_return(&autogroup_seq_nr);
67734 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
67735 ag->tg = tg;
67736 #ifdef CONFIG_RT_GROUP_SCHED
67737 /*
67738 diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
67739 index 8a39fa3..34f3dbc 100644
67740 --- a/kernel/sched_fair.c
67741 +++ b/kernel/sched_fair.c
67742 @@ -4801,7 +4801,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
67743 * run_rebalance_domains is triggered when needed from the scheduler tick.
67744 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
67745 */
67746 -static void run_rebalance_domains(struct softirq_action *h)
67747 +static void run_rebalance_domains(void)
67748 {
67749 int this_cpu = smp_processor_id();
67750 struct rq *this_rq = cpu_rq(this_cpu);
67751 diff --git a/kernel/signal.c b/kernel/signal.c
67752 index 2065515..aed2987 100644
67753 --- a/kernel/signal.c
67754 +++ b/kernel/signal.c
67755 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
67756
67757 int print_fatal_signals __read_mostly;
67758
67759 -static void __user *sig_handler(struct task_struct *t, int sig)
67760 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
67761 {
67762 return t->sighand->action[sig - 1].sa.sa_handler;
67763 }
67764
67765 -static int sig_handler_ignored(void __user *handler, int sig)
67766 +static int sig_handler_ignored(__sighandler_t handler, int sig)
67767 {
67768 /* Is it explicitly or implicitly ignored? */
67769 return handler == SIG_IGN ||
67770 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
67771 static int sig_task_ignored(struct task_struct *t, int sig,
67772 int from_ancestor_ns)
67773 {
67774 - void __user *handler;
67775 + __sighandler_t handler;
67776
67777 handler = sig_handler(t, sig);
67778
67779 @@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
67780 atomic_inc(&user->sigpending);
67781 rcu_read_unlock();
67782
67783 + if (!override_rlimit)
67784 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
67785 +
67786 if (override_rlimit ||
67787 atomic_read(&user->sigpending) <=
67788 task_rlimit(t, RLIMIT_SIGPENDING)) {
67789 @@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
67790
67791 int unhandled_signal(struct task_struct *tsk, int sig)
67792 {
67793 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
67794 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
67795 if (is_global_init(tsk))
67796 return 1;
67797 if (handler != SIG_IGN && handler != SIG_DFL)
67798 @@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
67799 }
67800 }
67801
67802 + /* allow glibc communication via tgkill to other threads in our
67803 + thread group */
67804 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
67805 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
67806 + && gr_handle_signal(t, sig))
67807 + return -EPERM;
67808 +
67809 return security_task_kill(t, info, sig, 0);
67810 }
67811
67812 @@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
67813 return send_signal(sig, info, p, 1);
67814 }
67815
67816 -static int
67817 +int
67818 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
67819 {
67820 return send_signal(sig, info, t, 0);
67821 @@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
67822 unsigned long int flags;
67823 int ret, blocked, ignored;
67824 struct k_sigaction *action;
67825 + int is_unhandled = 0;
67826
67827 spin_lock_irqsave(&t->sighand->siglock, flags);
67828 action = &t->sighand->action[sig-1];
67829 @@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
67830 }
67831 if (action->sa.sa_handler == SIG_DFL)
67832 t->signal->flags &= ~SIGNAL_UNKILLABLE;
67833 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
67834 + is_unhandled = 1;
67835 ret = specific_send_sig_info(sig, info, t);
67836 spin_unlock_irqrestore(&t->sighand->siglock, flags);
67837
67838 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
67839 + normal operation */
67840 + if (is_unhandled) {
67841 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
67842 + gr_handle_crash(t, sig);
67843 + }
67844 +
67845 return ret;
67846 }
67847
67848 @@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
67849 ret = check_kill_permission(sig, info, p);
67850 rcu_read_unlock();
67851
67852 - if (!ret && sig)
67853 + if (!ret && sig) {
67854 ret = do_send_sig_info(sig, info, p, true);
67855 + if (!ret)
67856 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
67857 + }
67858
67859 return ret;
67860 }
67861 @@ -2754,7 +2777,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
67862 int error = -ESRCH;
67863
67864 rcu_read_lock();
67865 - p = find_task_by_vpid(pid);
67866 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
67867 + /* allow glibc communication via tgkill to other threads in our
67868 + thread group */
67869 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
67870 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
67871 + p = find_task_by_vpid_unrestricted(pid);
67872 + else
67873 +#endif
67874 + p = find_task_by_vpid(pid);
67875 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
67876 error = check_kill_permission(sig, info, p);
67877 /*
67878 diff --git a/kernel/smp.c b/kernel/smp.c
67879 index db197d6..17aef0b 100644
67880 --- a/kernel/smp.c
67881 +++ b/kernel/smp.c
67882 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
67883 }
67884 EXPORT_SYMBOL(smp_call_function);
67885
67886 -void ipi_call_lock(void)
67887 +void ipi_call_lock(void) __acquires(call_function.lock)
67888 {
67889 raw_spin_lock(&call_function.lock);
67890 }
67891
67892 -void ipi_call_unlock(void)
67893 +void ipi_call_unlock(void) __releases(call_function.lock)
67894 {
67895 raw_spin_unlock(&call_function.lock);
67896 }
67897
67898 -void ipi_call_lock_irq(void)
67899 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
67900 {
67901 raw_spin_lock_irq(&call_function.lock);
67902 }
67903
67904 -void ipi_call_unlock_irq(void)
67905 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
67906 {
67907 raw_spin_unlock_irq(&call_function.lock);
67908 }
67909 diff --git a/kernel/softirq.c b/kernel/softirq.c
67910 index 2c71d91..1021f81 100644
67911 --- a/kernel/softirq.c
67912 +++ b/kernel/softirq.c
67913 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
67914
67915 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
67916
67917 -char *softirq_to_name[NR_SOFTIRQS] = {
67918 +const char * const softirq_to_name[NR_SOFTIRQS] = {
67919 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
67920 "TASKLET", "SCHED", "HRTIMER", "RCU"
67921 };
67922 @@ -235,7 +235,7 @@ restart:
67923 kstat_incr_softirqs_this_cpu(vec_nr);
67924
67925 trace_softirq_entry(vec_nr);
67926 - h->action(h);
67927 + h->action();
67928 trace_softirq_exit(vec_nr);
67929 if (unlikely(prev_count != preempt_count())) {
67930 printk(KERN_ERR "huh, entered softirq %u %s %p"
67931 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
67932 local_irq_restore(flags);
67933 }
67934
67935 -void open_softirq(int nr, void (*action)(struct softirq_action *))
67936 +void open_softirq(int nr, void (*action)(void))
67937 {
67938 - softirq_vec[nr].action = action;
67939 + pax_open_kernel();
67940 + *(void **)&softirq_vec[nr].action = action;
67941 + pax_close_kernel();
67942 }
67943
67944 /*
67945 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
67946
67947 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
67948
67949 -static void tasklet_action(struct softirq_action *a)
67950 +static void tasklet_action(void)
67951 {
67952 struct tasklet_struct *list;
67953
67954 @@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
67955 }
67956 }
67957
67958 -static void tasklet_hi_action(struct softirq_action *a)
67959 +static void tasklet_hi_action(void)
67960 {
67961 struct tasklet_struct *list;
67962
67963 diff --git a/kernel/sys.c b/kernel/sys.c
67964 index 481611f..0754d86 100644
67965 --- a/kernel/sys.c
67966 +++ b/kernel/sys.c
67967 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
67968 error = -EACCES;
67969 goto out;
67970 }
67971 +
67972 + if (gr_handle_chroot_setpriority(p, niceval)) {
67973 + error = -EACCES;
67974 + goto out;
67975 + }
67976 +
67977 no_nice = security_task_setnice(p, niceval);
67978 if (no_nice) {
67979 error = no_nice;
67980 @@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
67981 goto error;
67982 }
67983
67984 + if (gr_check_group_change(new->gid, new->egid, -1))
67985 + goto error;
67986 +
67987 if (rgid != (gid_t) -1 ||
67988 (egid != (gid_t) -1 && egid != old->gid))
67989 new->sgid = new->egid;
67990 @@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
67991 old = current_cred();
67992
67993 retval = -EPERM;
67994 +
67995 + if (gr_check_group_change(gid, gid, gid))
67996 + goto error;
67997 +
67998 if (nsown_capable(CAP_SETGID))
67999 new->gid = new->egid = new->sgid = new->fsgid = gid;
68000 else if (gid == old->gid || gid == old->sgid)
68001 @@ -618,7 +631,7 @@ error:
68002 /*
68003 * change the user struct in a credentials set to match the new UID
68004 */
68005 -static int set_user(struct cred *new)
68006 +int set_user(struct cred *new)
68007 {
68008 struct user_struct *new_user;
68009
68010 @@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
68011 goto error;
68012 }
68013
68014 + if (gr_check_user_change(new->uid, new->euid, -1))
68015 + goto error;
68016 +
68017 if (new->uid != old->uid) {
68018 retval = set_user(new);
68019 if (retval < 0)
68020 @@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
68021 old = current_cred();
68022
68023 retval = -EPERM;
68024 +
68025 + if (gr_check_crash_uid(uid))
68026 + goto error;
68027 + if (gr_check_user_change(uid, uid, uid))
68028 + goto error;
68029 +
68030 if (nsown_capable(CAP_SETUID)) {
68031 new->suid = new->uid = uid;
68032 if (uid != old->uid) {
68033 @@ -786,6 +808,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
68034 goto error;
68035 }
68036
68037 + if (gr_check_user_change(ruid, euid, -1))
68038 + goto error;
68039 +
68040 if (ruid != (uid_t) -1) {
68041 new->uid = ruid;
68042 if (ruid != old->uid) {
68043 @@ -850,6 +875,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
68044 goto error;
68045 }
68046
68047 + if (gr_check_group_change(rgid, egid, -1))
68048 + goto error;
68049 +
68050 if (rgid != (gid_t) -1)
68051 new->gid = rgid;
68052 if (egid != (gid_t) -1)
68053 @@ -896,6 +924,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
68054 old = current_cred();
68055 old_fsuid = old->fsuid;
68056
68057 + if (gr_check_user_change(-1, -1, uid))
68058 + goto error;
68059 +
68060 if (uid == old->uid || uid == old->euid ||
68061 uid == old->suid || uid == old->fsuid ||
68062 nsown_capable(CAP_SETUID)) {
68063 @@ -906,6 +937,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
68064 }
68065 }
68066
68067 +error:
68068 abort_creds(new);
68069 return old_fsuid;
68070
68071 @@ -932,12 +964,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
68072 if (gid == old->gid || gid == old->egid ||
68073 gid == old->sgid || gid == old->fsgid ||
68074 nsown_capable(CAP_SETGID)) {
68075 + if (gr_check_group_change(-1, -1, gid))
68076 + goto error;
68077 +
68078 if (gid != old_fsgid) {
68079 new->fsgid = gid;
68080 goto change_okay;
68081 }
68082 }
68083
68084 +error:
68085 abort_creds(new);
68086 return old_fsgid;
68087
68088 @@ -1189,7 +1225,10 @@ static int override_release(char __user *release, int len)
68089 }
68090 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
68091 snprintf(buf, len, "2.6.%u%s", v, rest);
68092 - ret = copy_to_user(release, buf, len);
68093 + if (len > sizeof(buf))
68094 + ret = -EFAULT;
68095 + else
68096 + ret = copy_to_user(release, buf, len);
68097 }
68098 return ret;
68099 }
68100 @@ -1243,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
68101 return -EFAULT;
68102
68103 down_read(&uts_sem);
68104 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
68105 + error = __copy_to_user(name->sysname, &utsname()->sysname,
68106 __OLD_UTS_LEN);
68107 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
68108 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
68109 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
68110 __OLD_UTS_LEN);
68111 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
68112 - error |= __copy_to_user(&name->release, &utsname()->release,
68113 + error |= __copy_to_user(name->release, &utsname()->release,
68114 __OLD_UTS_LEN);
68115 error |= __put_user(0, name->release + __OLD_UTS_LEN);
68116 - error |= __copy_to_user(&name->version, &utsname()->version,
68117 + error |= __copy_to_user(name->version, &utsname()->version,
68118 __OLD_UTS_LEN);
68119 error |= __put_user(0, name->version + __OLD_UTS_LEN);
68120 - error |= __copy_to_user(&name->machine, &utsname()->machine,
68121 + error |= __copy_to_user(name->machine, &utsname()->machine,
68122 __OLD_UTS_LEN);
68123 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
68124 up_read(&uts_sem);
68125 @@ -1720,7 +1759,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
68126 error = get_dumpable(me->mm);
68127 break;
68128 case PR_SET_DUMPABLE:
68129 - if (arg2 < 0 || arg2 > 1) {
68130 + if (arg2 > 1) {
68131 error = -EINVAL;
68132 break;
68133 }
68134 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
68135 index ae27196..7506d69 100644
68136 --- a/kernel/sysctl.c
68137 +++ b/kernel/sysctl.c
68138 @@ -86,6 +86,13 @@
68139
68140
68141 #if defined(CONFIG_SYSCTL)
68142 +#include <linux/grsecurity.h>
68143 +#include <linux/grinternal.h>
68144 +
68145 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
68146 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
68147 + const int op);
68148 +extern int gr_handle_chroot_sysctl(const int op);
68149
68150 /* External variables not in a header file. */
68151 extern int sysctl_overcommit_memory;
68152 @@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
68153 }
68154
68155 #endif
68156 +extern struct ctl_table grsecurity_table[];
68157
68158 static struct ctl_table root_table[];
68159 static struct ctl_table_root sysctl_table_root;
68160 @@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[];
68161 int sysctl_legacy_va_layout;
68162 #endif
68163
68164 +#ifdef CONFIG_PAX_SOFTMODE
68165 +static ctl_table pax_table[] = {
68166 + {
68167 + .procname = "softmode",
68168 + .data = &pax_softmode,
68169 + .maxlen = sizeof(unsigned int),
68170 + .mode = 0600,
68171 + .proc_handler = &proc_dointvec,
68172 + },
68173 +
68174 + { }
68175 +};
68176 +#endif
68177 +
68178 /* The default sysctl tables: */
68179
68180 static struct ctl_table root_table[] = {
68181 @@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000;
68182 #endif
68183
68184 static struct ctl_table kern_table[] = {
68185 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
68186 + {
68187 + .procname = "grsecurity",
68188 + .mode = 0500,
68189 + .child = grsecurity_table,
68190 + },
68191 +#endif
68192 +
68193 +#ifdef CONFIG_PAX_SOFTMODE
68194 + {
68195 + .procname = "pax",
68196 + .mode = 0500,
68197 + .child = pax_table,
68198 + },
68199 +#endif
68200 +
68201 {
68202 .procname = "sched_child_runs_first",
68203 .data = &sysctl_sched_child_runs_first,
68204 @@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = {
68205 .data = &modprobe_path,
68206 .maxlen = KMOD_PATH_LEN,
68207 .mode = 0644,
68208 - .proc_handler = proc_dostring,
68209 + .proc_handler = proc_dostring_modpriv,
68210 },
68211 {
68212 .procname = "modules_disabled",
68213 @@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = {
68214 .extra1 = &zero,
68215 .extra2 = &one,
68216 },
68217 +#endif
68218 {
68219 .procname = "kptr_restrict",
68220 .data = &kptr_restrict,
68221 .maxlen = sizeof(int),
68222 .mode = 0644,
68223 .proc_handler = proc_dmesg_restrict,
68224 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68225 + .extra1 = &two,
68226 +#else
68227 .extra1 = &zero,
68228 +#endif
68229 .extra2 = &two,
68230 },
68231 -#endif
68232 {
68233 .procname = "ngroups_max",
68234 .data = &ngroups_max,
68235 @@ -1216,6 +1258,13 @@ static struct ctl_table vm_table[] = {
68236 .proc_handler = proc_dointvec_minmax,
68237 .extra1 = &zero,
68238 },
68239 + {
68240 + .procname = "heap_stack_gap",
68241 + .data = &sysctl_heap_stack_gap,
68242 + .maxlen = sizeof(sysctl_heap_stack_gap),
68243 + .mode = 0644,
68244 + .proc_handler = proc_doulongvec_minmax,
68245 + },
68246 #else
68247 {
68248 .procname = "nr_trim_pages",
68249 @@ -1720,6 +1769,17 @@ static int test_perm(int mode, int op)
68250 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
68251 {
68252 int mode;
68253 + int error;
68254 +
68255 + if (table->parent != NULL && table->parent->procname != NULL &&
68256 + table->procname != NULL &&
68257 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
68258 + return -EACCES;
68259 + if (gr_handle_chroot_sysctl(op))
68260 + return -EACCES;
68261 + error = gr_handle_sysctl(table, op);
68262 + if (error)
68263 + return error;
68264
68265 if (root->permissions)
68266 mode = root->permissions(root, current->nsproxy, table);
68267 @@ -2124,6 +2184,16 @@ int proc_dostring(struct ctl_table *table, int write,
68268 buffer, lenp, ppos);
68269 }
68270
68271 +int proc_dostring_modpriv(struct ctl_table *table, int write,
68272 + void __user *buffer, size_t *lenp, loff_t *ppos)
68273 +{
68274 + if (write && !capable(CAP_SYS_MODULE))
68275 + return -EPERM;
68276 +
68277 + return _proc_do_string(table->data, table->maxlen, write,
68278 + buffer, lenp, ppos);
68279 +}
68280 +
68281 static size_t proc_skip_spaces(char **buf)
68282 {
68283 size_t ret;
68284 @@ -2229,6 +2299,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
68285 len = strlen(tmp);
68286 if (len > *size)
68287 len = *size;
68288 + if (len > sizeof(tmp))
68289 + len = sizeof(tmp);
68290 if (copy_to_user(*buf, tmp, len))
68291 return -EFAULT;
68292 *size -= len;
68293 @@ -2545,8 +2617,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
68294 *i = val;
68295 } else {
68296 val = convdiv * (*i) / convmul;
68297 - if (!first)
68298 + if (!first) {
68299 err = proc_put_char(&buffer, &left, '\t');
68300 + if (err)
68301 + break;
68302 + }
68303 err = proc_put_long(&buffer, &left, val, false);
68304 if (err)
68305 break;
68306 @@ -2941,6 +3016,12 @@ int proc_dostring(struct ctl_table *table, int write,
68307 return -ENOSYS;
68308 }
68309
68310 +int proc_dostring_modpriv(struct ctl_table *table, int write,
68311 + void __user *buffer, size_t *lenp, loff_t *ppos)
68312 +{
68313 + return -ENOSYS;
68314 +}
68315 +
68316 int proc_dointvec(struct ctl_table *table, int write,
68317 void __user *buffer, size_t *lenp, loff_t *ppos)
68318 {
68319 @@ -2997,6 +3078,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
68320 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
68321 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
68322 EXPORT_SYMBOL(proc_dostring);
68323 +EXPORT_SYMBOL(proc_dostring_modpriv);
68324 EXPORT_SYMBOL(proc_doulongvec_minmax);
68325 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
68326 EXPORT_SYMBOL(register_sysctl_table);
68327 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
68328 index a650694..aaeeb20 100644
68329 --- a/kernel/sysctl_binary.c
68330 +++ b/kernel/sysctl_binary.c
68331 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
68332 int i;
68333
68334 set_fs(KERNEL_DS);
68335 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68336 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68337 set_fs(old_fs);
68338 if (result < 0)
68339 goto out_kfree;
68340 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
68341 }
68342
68343 set_fs(KERNEL_DS);
68344 - result = vfs_write(file, buffer, str - buffer, &pos);
68345 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
68346 set_fs(old_fs);
68347 if (result < 0)
68348 goto out_kfree;
68349 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
68350 int i;
68351
68352 set_fs(KERNEL_DS);
68353 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68354 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68355 set_fs(old_fs);
68356 if (result < 0)
68357 goto out_kfree;
68358 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
68359 }
68360
68361 set_fs(KERNEL_DS);
68362 - result = vfs_write(file, buffer, str - buffer, &pos);
68363 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
68364 set_fs(old_fs);
68365 if (result < 0)
68366 goto out_kfree;
68367 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
68368 int i;
68369
68370 set_fs(KERNEL_DS);
68371 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
68372 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
68373 set_fs(old_fs);
68374 if (result < 0)
68375 goto out;
68376 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
68377 __le16 dnaddr;
68378
68379 set_fs(KERNEL_DS);
68380 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
68381 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
68382 set_fs(old_fs);
68383 if (result < 0)
68384 goto out;
68385 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
68386 le16_to_cpu(dnaddr) & 0x3ff);
68387
68388 set_fs(KERNEL_DS);
68389 - result = vfs_write(file, buf, len, &pos);
68390 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
68391 set_fs(old_fs);
68392 if (result < 0)
68393 goto out;
68394 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
68395 index 362da65..ab8ef8c 100644
68396 --- a/kernel/sysctl_check.c
68397 +++ b/kernel/sysctl_check.c
68398 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
68399 set_fail(&fail, table, "Directory with extra2");
68400 } else {
68401 if ((table->proc_handler == proc_dostring) ||
68402 + (table->proc_handler == proc_dostring_modpriv) ||
68403 (table->proc_handler == proc_dointvec) ||
68404 (table->proc_handler == proc_dointvec_minmax) ||
68405 (table->proc_handler == proc_dointvec_jiffies) ||
68406 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
68407 index e660464..c8b9e67 100644
68408 --- a/kernel/taskstats.c
68409 +++ b/kernel/taskstats.c
68410 @@ -27,9 +27,12 @@
68411 #include <linux/cgroup.h>
68412 #include <linux/fs.h>
68413 #include <linux/file.h>
68414 +#include <linux/grsecurity.h>
68415 #include <net/genetlink.h>
68416 #include <linux/atomic.h>
68417
68418 +extern int gr_is_taskstats_denied(int pid);
68419 +
68420 /*
68421 * Maximum length of a cpumask that can be specified in
68422 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
68423 @@ -556,6 +559,9 @@ err:
68424
68425 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
68426 {
68427 + if (gr_is_taskstats_denied(current->pid))
68428 + return -EACCES;
68429 +
68430 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
68431 return cmd_attr_register_cpumask(info);
68432 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
68433 diff --git a/kernel/time.c b/kernel/time.c
68434 index 73e416d..cfc6f69 100644
68435 --- a/kernel/time.c
68436 +++ b/kernel/time.c
68437 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
68438 return error;
68439
68440 if (tz) {
68441 + /* we log in do_settimeofday called below, so don't log twice
68442 + */
68443 + if (!tv)
68444 + gr_log_timechange();
68445 +
68446 /* SMP safe, global irq locking makes it work. */
68447 sys_tz = *tz;
68448 update_vsyscall_tz();
68449 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
68450 index 8a46f5d..bbe6f9c 100644
68451 --- a/kernel/time/alarmtimer.c
68452 +++ b/kernel/time/alarmtimer.c
68453 @@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
68454 struct platform_device *pdev;
68455 int error = 0;
68456 int i;
68457 - struct k_clock alarm_clock = {
68458 + static struct k_clock alarm_clock = {
68459 .clock_getres = alarm_clock_getres,
68460 .clock_get = alarm_clock_get,
68461 .timer_create = alarm_timer_create,
68462 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
68463 index fd4a7b1..fae5c2a 100644
68464 --- a/kernel/time/tick-broadcast.c
68465 +++ b/kernel/time/tick-broadcast.c
68466 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
68467 * then clear the broadcast bit.
68468 */
68469 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
68470 - int cpu = smp_processor_id();
68471 + cpu = smp_processor_id();
68472
68473 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
68474 tick_broadcast_clear_oneshot(cpu);
68475 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
68476 index 2378413..be455fd 100644
68477 --- a/kernel/time/timekeeping.c
68478 +++ b/kernel/time/timekeeping.c
68479 @@ -14,6 +14,7 @@
68480 #include <linux/init.h>
68481 #include <linux/mm.h>
68482 #include <linux/sched.h>
68483 +#include <linux/grsecurity.h>
68484 #include <linux/syscore_ops.h>
68485 #include <linux/clocksource.h>
68486 #include <linux/jiffies.h>
68487 @@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
68488 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
68489 return -EINVAL;
68490
68491 + gr_log_timechange();
68492 +
68493 write_seqlock_irqsave(&xtime_lock, flags);
68494
68495 timekeeping_forward_now();
68496 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
68497 index 3258455..f35227d 100644
68498 --- a/kernel/time/timer_list.c
68499 +++ b/kernel/time/timer_list.c
68500 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
68501
68502 static void print_name_offset(struct seq_file *m, void *sym)
68503 {
68504 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68505 + SEQ_printf(m, "<%p>", NULL);
68506 +#else
68507 char symname[KSYM_NAME_LEN];
68508
68509 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
68510 SEQ_printf(m, "<%pK>", sym);
68511 else
68512 SEQ_printf(m, "%s", symname);
68513 +#endif
68514 }
68515
68516 static void
68517 @@ -112,7 +116,11 @@ next_one:
68518 static void
68519 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
68520 {
68521 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68522 + SEQ_printf(m, " .base: %p\n", NULL);
68523 +#else
68524 SEQ_printf(m, " .base: %pK\n", base);
68525 +#endif
68526 SEQ_printf(m, " .index: %d\n",
68527 base->index);
68528 SEQ_printf(m, " .resolution: %Lu nsecs\n",
68529 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
68530 {
68531 struct proc_dir_entry *pe;
68532
68533 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
68534 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
68535 +#else
68536 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
68537 +#endif
68538 if (!pe)
68539 return -ENOMEM;
68540 return 0;
68541 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
68542 index 0b537f2..9e71eca 100644
68543 --- a/kernel/time/timer_stats.c
68544 +++ b/kernel/time/timer_stats.c
68545 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
68546 static unsigned long nr_entries;
68547 static struct entry entries[MAX_ENTRIES];
68548
68549 -static atomic_t overflow_count;
68550 +static atomic_unchecked_t overflow_count;
68551
68552 /*
68553 * The entries are in a hash-table, for fast lookup:
68554 @@ -140,7 +140,7 @@ static void reset_entries(void)
68555 nr_entries = 0;
68556 memset(entries, 0, sizeof(entries));
68557 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
68558 - atomic_set(&overflow_count, 0);
68559 + atomic_set_unchecked(&overflow_count, 0);
68560 }
68561
68562 static struct entry *alloc_entry(void)
68563 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
68564 if (likely(entry))
68565 entry->count++;
68566 else
68567 - atomic_inc(&overflow_count);
68568 + atomic_inc_unchecked(&overflow_count);
68569
68570 out_unlock:
68571 raw_spin_unlock_irqrestore(lock, flags);
68572 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
68573
68574 static void print_name_offset(struct seq_file *m, unsigned long addr)
68575 {
68576 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68577 + seq_printf(m, "<%p>", NULL);
68578 +#else
68579 char symname[KSYM_NAME_LEN];
68580
68581 if (lookup_symbol_name(addr, symname) < 0)
68582 seq_printf(m, "<%p>", (void *)addr);
68583 else
68584 seq_printf(m, "%s", symname);
68585 +#endif
68586 }
68587
68588 static int tstats_show(struct seq_file *m, void *v)
68589 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
68590
68591 seq_puts(m, "Timer Stats Version: v0.2\n");
68592 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
68593 - if (atomic_read(&overflow_count))
68594 + if (atomic_read_unchecked(&overflow_count))
68595 seq_printf(m, "Overflow: %d entries\n",
68596 - atomic_read(&overflow_count));
68597 + atomic_read_unchecked(&overflow_count));
68598
68599 for (i = 0; i < nr_entries; i++) {
68600 entry = entries + i;
68601 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
68602 {
68603 struct proc_dir_entry *pe;
68604
68605 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
68606 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
68607 +#else
68608 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
68609 +#endif
68610 if (!pe)
68611 return -ENOMEM;
68612 return 0;
68613 diff --git a/kernel/timer.c b/kernel/timer.c
68614 index 9c3c62b..441690e 100644
68615 --- a/kernel/timer.c
68616 +++ b/kernel/timer.c
68617 @@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
68618 /*
68619 * This function runs timers and the timer-tq in bottom half context.
68620 */
68621 -static void run_timer_softirq(struct softirq_action *h)
68622 +static void run_timer_softirq(void)
68623 {
68624 struct tvec_base *base = __this_cpu_read(tvec_bases);
68625
68626 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
68627 index 16fc34a..efd8bb8 100644
68628 --- a/kernel/trace/blktrace.c
68629 +++ b/kernel/trace/blktrace.c
68630 @@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
68631 struct blk_trace *bt = filp->private_data;
68632 char buf[16];
68633
68634 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
68635 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
68636
68637 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
68638 }
68639 @@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
68640 return 1;
68641
68642 bt = buf->chan->private_data;
68643 - atomic_inc(&bt->dropped);
68644 + atomic_inc_unchecked(&bt->dropped);
68645 return 0;
68646 }
68647
68648 @@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
68649
68650 bt->dir = dir;
68651 bt->dev = dev;
68652 - atomic_set(&bt->dropped, 0);
68653 + atomic_set_unchecked(&bt->dropped, 0);
68654
68655 ret = -EIO;
68656 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
68657 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
68658 index 25b4f4d..6f4772d 100644
68659 --- a/kernel/trace/ftrace.c
68660 +++ b/kernel/trace/ftrace.c
68661 @@ -1587,12 +1587,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
68662 if (unlikely(ftrace_disabled))
68663 return 0;
68664
68665 + ret = ftrace_arch_code_modify_prepare();
68666 + FTRACE_WARN_ON(ret);
68667 + if (ret)
68668 + return 0;
68669 +
68670 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
68671 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
68672 if (ret) {
68673 ftrace_bug(ret, ip);
68674 - return 0;
68675 }
68676 - return 1;
68677 + return ret ? 0 : 1;
68678 }
68679
68680 /*
68681 @@ -2608,7 +2613,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
68682
68683 int
68684 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
68685 - void *data)
68686 + void *data)
68687 {
68688 struct ftrace_func_probe *entry;
68689 struct ftrace_page *pg;
68690 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
68691 index f2bd275..adaf3a2 100644
68692 --- a/kernel/trace/trace.c
68693 +++ b/kernel/trace/trace.c
68694 @@ -4201,10 +4201,9 @@ static const struct file_operations tracing_dyn_info_fops = {
68695 };
68696 #endif
68697
68698 -static struct dentry *d_tracer;
68699 -
68700 struct dentry *tracing_init_dentry(void)
68701 {
68702 + static struct dentry *d_tracer;
68703 static int once;
68704
68705 if (d_tracer)
68706 @@ -4224,10 +4223,9 @@ struct dentry *tracing_init_dentry(void)
68707 return d_tracer;
68708 }
68709
68710 -static struct dentry *d_percpu;
68711 -
68712 struct dentry *tracing_dentry_percpu(void)
68713 {
68714 + static struct dentry *d_percpu;
68715 static int once;
68716 struct dentry *d_tracer;
68717
68718 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
68719 index c212a7f..7b02394 100644
68720 --- a/kernel/trace/trace_events.c
68721 +++ b/kernel/trace/trace_events.c
68722 @@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
68723 struct ftrace_module_file_ops {
68724 struct list_head list;
68725 struct module *mod;
68726 - struct file_operations id;
68727 - struct file_operations enable;
68728 - struct file_operations format;
68729 - struct file_operations filter;
68730 };
68731
68732 static struct ftrace_module_file_ops *
68733 @@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
68734
68735 file_ops->mod = mod;
68736
68737 - file_ops->id = ftrace_event_id_fops;
68738 - file_ops->id.owner = mod;
68739 -
68740 - file_ops->enable = ftrace_enable_fops;
68741 - file_ops->enable.owner = mod;
68742 -
68743 - file_ops->filter = ftrace_event_filter_fops;
68744 - file_ops->filter.owner = mod;
68745 -
68746 - file_ops->format = ftrace_event_format_fops;
68747 - file_ops->format.owner = mod;
68748 + pax_open_kernel();
68749 + *(void **)&mod->trace_id.owner = mod;
68750 + *(void **)&mod->trace_enable.owner = mod;
68751 + *(void **)&mod->trace_filter.owner = mod;
68752 + *(void **)&mod->trace_format.owner = mod;
68753 + pax_close_kernel();
68754
68755 list_add(&file_ops->list, &ftrace_module_file_list);
68756
68757 @@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
68758
68759 for_each_event(call, start, end) {
68760 __trace_add_event_call(*call, mod,
68761 - &file_ops->id, &file_ops->enable,
68762 - &file_ops->filter, &file_ops->format);
68763 + &mod->trace_id, &mod->trace_enable,
68764 + &mod->trace_filter, &mod->trace_format);
68765 }
68766 }
68767
68768 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
68769 index 00d527c..7c5b1a3 100644
68770 --- a/kernel/trace/trace_kprobe.c
68771 +++ b/kernel/trace/trace_kprobe.c
68772 @@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68773 long ret;
68774 int maxlen = get_rloc_len(*(u32 *)dest);
68775 u8 *dst = get_rloc_data(dest);
68776 - u8 *src = addr;
68777 + const u8 __user *src = (const u8 __force_user *)addr;
68778 mm_segment_t old_fs = get_fs();
68779 if (!maxlen)
68780 return;
68781 @@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68782 pagefault_disable();
68783 do
68784 ret = __copy_from_user_inatomic(dst++, src++, 1);
68785 - while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
68786 + while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
68787 dst[-1] = '\0';
68788 pagefault_enable();
68789 set_fs(old_fs);
68790 @@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68791 ((u8 *)get_rloc_data(dest))[0] = '\0';
68792 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
68793 } else
68794 - *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
68795 + *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
68796 get_rloc_offs(*(u32 *)dest));
68797 }
68798 /* Return the length of string -- including null terminal byte */
68799 @@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
68800 set_fs(KERNEL_DS);
68801 pagefault_disable();
68802 do {
68803 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
68804 + ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
68805 len++;
68806 } while (c && ret == 0 && len < MAX_STRING_SIZE);
68807 pagefault_enable();
68808 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
68809 index fd3c8aa..5f324a6 100644
68810 --- a/kernel/trace/trace_mmiotrace.c
68811 +++ b/kernel/trace/trace_mmiotrace.c
68812 @@ -24,7 +24,7 @@ struct header_iter {
68813 static struct trace_array *mmio_trace_array;
68814 static bool overrun_detected;
68815 static unsigned long prev_overruns;
68816 -static atomic_t dropped_count;
68817 +static atomic_unchecked_t dropped_count;
68818
68819 static void mmio_reset_data(struct trace_array *tr)
68820 {
68821 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
68822
68823 static unsigned long count_overruns(struct trace_iterator *iter)
68824 {
68825 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
68826 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
68827 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
68828
68829 if (over > prev_overruns)
68830 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
68831 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
68832 sizeof(*entry), 0, pc);
68833 if (!event) {
68834 - atomic_inc(&dropped_count);
68835 + atomic_inc_unchecked(&dropped_count);
68836 return;
68837 }
68838 entry = ring_buffer_event_data(event);
68839 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
68840 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
68841 sizeof(*entry), 0, pc);
68842 if (!event) {
68843 - atomic_inc(&dropped_count);
68844 + atomic_inc_unchecked(&dropped_count);
68845 return;
68846 }
68847 entry = ring_buffer_event_data(event);
68848 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
68849 index 5199930..26c73a0 100644
68850 --- a/kernel/trace/trace_output.c
68851 +++ b/kernel/trace/trace_output.c
68852 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
68853
68854 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
68855 if (!IS_ERR(p)) {
68856 - p = mangle_path(s->buffer + s->len, p, "\n");
68857 + p = mangle_path(s->buffer + s->len, p, "\n\\");
68858 if (p) {
68859 s->len = p - s->buffer;
68860 return 1;
68861 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
68862 index 77575b3..6e623d1 100644
68863 --- a/kernel/trace/trace_stack.c
68864 +++ b/kernel/trace/trace_stack.c
68865 @@ -50,7 +50,7 @@ static inline void check_stack(void)
68866 return;
68867
68868 /* we do not handle interrupt stacks yet */
68869 - if (!object_is_on_stack(&this_size))
68870 + if (!object_starts_on_stack(&this_size))
68871 return;
68872
68873 local_irq_save(flags);
68874 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
68875 index 209b379..7f76423 100644
68876 --- a/kernel/trace/trace_workqueue.c
68877 +++ b/kernel/trace/trace_workqueue.c
68878 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
68879 int cpu;
68880 pid_t pid;
68881 /* Can be inserted from interrupt or user context, need to be atomic */
68882 - atomic_t inserted;
68883 + atomic_unchecked_t inserted;
68884 /*
68885 * Don't need to be atomic, works are serialized in a single workqueue thread
68886 * on a single CPU.
68887 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
68888 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
68889 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
68890 if (node->pid == wq_thread->pid) {
68891 - atomic_inc(&node->inserted);
68892 + atomic_inc_unchecked(&node->inserted);
68893 goto found;
68894 }
68895 }
68896 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
68897 tsk = get_pid_task(pid, PIDTYPE_PID);
68898 if (tsk) {
68899 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
68900 - atomic_read(&cws->inserted), cws->executed,
68901 + atomic_read_unchecked(&cws->inserted), cws->executed,
68902 tsk->comm);
68903 put_task_struct(tsk);
68904 }
68905 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
68906 index 82928f5..92da771 100644
68907 --- a/lib/Kconfig.debug
68908 +++ b/lib/Kconfig.debug
68909 @@ -1103,6 +1103,7 @@ config LATENCYTOP
68910 depends on DEBUG_KERNEL
68911 depends on STACKTRACE_SUPPORT
68912 depends on PROC_FS
68913 + depends on !GRKERNSEC_HIDESYM
68914 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
68915 select KALLSYMS
68916 select KALLSYMS_ALL
68917 diff --git a/lib/bitmap.c b/lib/bitmap.c
68918 index 0d4a127..33a06c7 100644
68919 --- a/lib/bitmap.c
68920 +++ b/lib/bitmap.c
68921 @@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
68922 {
68923 int c, old_c, totaldigits, ndigits, nchunks, nbits;
68924 u32 chunk;
68925 - const char __user __force *ubuf = (const char __user __force *)buf;
68926 + const char __user *ubuf = (const char __force_user *)buf;
68927
68928 bitmap_zero(maskp, nmaskbits);
68929
68930 @@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
68931 {
68932 if (!access_ok(VERIFY_READ, ubuf, ulen))
68933 return -EFAULT;
68934 - return __bitmap_parse((const char __force *)ubuf,
68935 + return __bitmap_parse((const char __force_kernel *)ubuf,
68936 ulen, 1, maskp, nmaskbits);
68937
68938 }
68939 @@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
68940 {
68941 unsigned a, b;
68942 int c, old_c, totaldigits;
68943 - const char __user __force *ubuf = (const char __user __force *)buf;
68944 + const char __user *ubuf = (const char __force_user *)buf;
68945 int exp_digit, in_range;
68946
68947 totaldigits = c = 0;
68948 @@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
68949 {
68950 if (!access_ok(VERIFY_READ, ubuf, ulen))
68951 return -EFAULT;
68952 - return __bitmap_parselist((const char __force *)ubuf,
68953 + return __bitmap_parselist((const char __force_kernel *)ubuf,
68954 ulen, 1, maskp, nmaskbits);
68955 }
68956 EXPORT_SYMBOL(bitmap_parselist_user);
68957 diff --git a/lib/bug.c b/lib/bug.c
68958 index 1955209..cbbb2ad 100644
68959 --- a/lib/bug.c
68960 +++ b/lib/bug.c
68961 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
68962 return BUG_TRAP_TYPE_NONE;
68963
68964 bug = find_bug(bugaddr);
68965 + if (!bug)
68966 + return BUG_TRAP_TYPE_NONE;
68967
68968 file = NULL;
68969 line = 0;
68970 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
68971 index a78b7c6..2c73084 100644
68972 --- a/lib/debugobjects.c
68973 +++ b/lib/debugobjects.c
68974 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
68975 if (limit > 4)
68976 return;
68977
68978 - is_on_stack = object_is_on_stack(addr);
68979 + is_on_stack = object_starts_on_stack(addr);
68980 if (is_on_stack == onstack)
68981 return;
68982
68983 diff --git a/lib/devres.c b/lib/devres.c
68984 index 7c0e953..f642b5c 100644
68985 --- a/lib/devres.c
68986 +++ b/lib/devres.c
68987 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
68988 void devm_iounmap(struct device *dev, void __iomem *addr)
68989 {
68990 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
68991 - (void *)addr));
68992 + (void __force *)addr));
68993 iounmap(addr);
68994 }
68995 EXPORT_SYMBOL(devm_iounmap);
68996 @@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
68997 {
68998 ioport_unmap(addr);
68999 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
69000 - devm_ioport_map_match, (void *)addr));
69001 + devm_ioport_map_match, (void __force *)addr));
69002 }
69003 EXPORT_SYMBOL(devm_ioport_unmap);
69004
69005 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
69006 index fea790a..ebb0e82 100644
69007 --- a/lib/dma-debug.c
69008 +++ b/lib/dma-debug.c
69009 @@ -925,7 +925,7 @@ out:
69010
69011 static void check_for_stack(struct device *dev, void *addr)
69012 {
69013 - if (object_is_on_stack(addr))
69014 + if (object_starts_on_stack(addr))
69015 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
69016 "stack [addr=%p]\n", addr);
69017 }
69018 diff --git a/lib/extable.c b/lib/extable.c
69019 index 4cac81e..63e9b8f 100644
69020 --- a/lib/extable.c
69021 +++ b/lib/extable.c
69022 @@ -13,6 +13,7 @@
69023 #include <linux/init.h>
69024 #include <linux/sort.h>
69025 #include <asm/uaccess.h>
69026 +#include <asm/pgtable.h>
69027
69028 #ifndef ARCH_HAS_SORT_EXTABLE
69029 /*
69030 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
69031 void sort_extable(struct exception_table_entry *start,
69032 struct exception_table_entry *finish)
69033 {
69034 + pax_open_kernel();
69035 sort(start, finish - start, sizeof(struct exception_table_entry),
69036 cmp_ex, NULL);
69037 + pax_close_kernel();
69038 }
69039
69040 #ifdef CONFIG_MODULES
69041 diff --git a/lib/inflate.c b/lib/inflate.c
69042 index 013a761..c28f3fc 100644
69043 --- a/lib/inflate.c
69044 +++ b/lib/inflate.c
69045 @@ -269,7 +269,7 @@ static void free(void *where)
69046 malloc_ptr = free_mem_ptr;
69047 }
69048 #else
69049 -#define malloc(a) kmalloc(a, GFP_KERNEL)
69050 +#define malloc(a) kmalloc((a), GFP_KERNEL)
69051 #define free(a) kfree(a)
69052 #endif
69053
69054 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
69055 index bd2bea9..6b3c95e 100644
69056 --- a/lib/is_single_threaded.c
69057 +++ b/lib/is_single_threaded.c
69058 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
69059 struct task_struct *p, *t;
69060 bool ret;
69061
69062 + if (!mm)
69063 + return true;
69064 +
69065 if (atomic_read(&task->signal->live) != 1)
69066 return false;
69067
69068 diff --git a/lib/kref.c b/lib/kref.c
69069 index 3efb882..8492f4c 100644
69070 --- a/lib/kref.c
69071 +++ b/lib/kref.c
69072 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
69073 */
69074 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
69075 {
69076 - WARN_ON(release == NULL);
69077 + BUG_ON(release == NULL);
69078 WARN_ON(release == (void (*)(struct kref *))kfree);
69079
69080 if (atomic_dec_and_test(&kref->refcount)) {
69081 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
69082 index d9df745..e73c2fe 100644
69083 --- a/lib/radix-tree.c
69084 +++ b/lib/radix-tree.c
69085 @@ -80,7 +80,7 @@ struct radix_tree_preload {
69086 int nr;
69087 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
69088 };
69089 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
69090 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
69091
69092 static inline void *ptr_to_indirect(void *ptr)
69093 {
69094 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
69095 index 993599e..f1dbc14 100644
69096 --- a/lib/vsprintf.c
69097 +++ b/lib/vsprintf.c
69098 @@ -16,6 +16,9 @@
69099 * - scnprintf and vscnprintf
69100 */
69101
69102 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69103 +#define __INCLUDED_BY_HIDESYM 1
69104 +#endif
69105 #include <stdarg.h>
69106 #include <linux/module.h>
69107 #include <linux/types.h>
69108 @@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
69109 char sym[KSYM_SYMBOL_LEN];
69110 if (ext == 'B')
69111 sprint_backtrace(sym, value);
69112 - else if (ext != 'f' && ext != 's')
69113 + else if (ext != 'f' && ext != 's' && ext != 'a')
69114 sprint_symbol(sym, value);
69115 else
69116 kallsyms_lookup(value, NULL, NULL, NULL, sym);
69117 @@ -777,7 +780,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
69118 return string(buf, end, uuid, spec);
69119 }
69120
69121 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69122 +int kptr_restrict __read_mostly = 2;
69123 +#else
69124 int kptr_restrict __read_mostly;
69125 +#endif
69126
69127 /*
69128 * Show a '%p' thing. A kernel extension is that the '%p' is followed
69129 @@ -791,6 +798,8 @@ int kptr_restrict __read_mostly;
69130 * - 'S' For symbolic direct pointers with offset
69131 * - 's' For symbolic direct pointers without offset
69132 * - 'B' For backtraced symbolic direct pointers with offset
69133 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
69134 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
69135 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
69136 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
69137 * - 'M' For a 6-byte MAC address, it prints the address in the
69138 @@ -835,12 +844,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69139 {
69140 if (!ptr && *fmt != 'K') {
69141 /*
69142 - * Print (null) with the same width as a pointer so it makes
69143 + * Print (nil) with the same width as a pointer so it makes
69144 * tabular output look nice.
69145 */
69146 if (spec.field_width == -1)
69147 spec.field_width = 2 * sizeof(void *);
69148 - return string(buf, end, "(null)", spec);
69149 + return string(buf, end, "(nil)", spec);
69150 }
69151
69152 switch (*fmt) {
69153 @@ -850,6 +859,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69154 /* Fallthrough */
69155 case 'S':
69156 case 's':
69157 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69158 + break;
69159 +#else
69160 + return symbol_string(buf, end, ptr, spec, *fmt);
69161 +#endif
69162 + case 'A':
69163 + case 'a':
69164 case 'B':
69165 return symbol_string(buf, end, ptr, spec, *fmt);
69166 case 'R':
69167 @@ -878,9 +894,15 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69168 case 'U':
69169 return uuid_string(buf, end, ptr, spec, fmt);
69170 case 'V':
69171 - return buf + vsnprintf(buf, end > buf ? end - buf : 0,
69172 - ((struct va_format *)ptr)->fmt,
69173 - *(((struct va_format *)ptr)->va));
69174 + {
69175 + va_list va;
69176 +
69177 + va_copy(va, *((struct va_format *)ptr)->va);
69178 + buf += vsnprintf(buf, end > buf ? end - buf : 0,
69179 + ((struct va_format *)ptr)->fmt, va);
69180 + va_end(va);
69181 + return buf;
69182 + }
69183 case 'K':
69184 /*
69185 * %pK cannot be used in IRQ context because its test
69186 @@ -1608,11 +1630,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
69187 typeof(type) value; \
69188 if (sizeof(type) == 8) { \
69189 args = PTR_ALIGN(args, sizeof(u32)); \
69190 - *(u32 *)&value = *(u32 *)args; \
69191 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
69192 + *(u32 *)&value = *(const u32 *)args; \
69193 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
69194 } else { \
69195 args = PTR_ALIGN(args, sizeof(type)); \
69196 - value = *(typeof(type) *)args; \
69197 + value = *(const typeof(type) *)args; \
69198 } \
69199 args += sizeof(type); \
69200 value; \
69201 @@ -1675,7 +1697,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
69202 case FORMAT_TYPE_STR: {
69203 const char *str_arg = args;
69204 args += strlen(str_arg) + 1;
69205 - str = string(str, end, (char *)str_arg, spec);
69206 + str = string(str, end, str_arg, spec);
69207 break;
69208 }
69209
69210 diff --git a/localversion-grsec b/localversion-grsec
69211 new file mode 100644
69212 index 0000000..7cd6065
69213 --- /dev/null
69214 +++ b/localversion-grsec
69215 @@ -0,0 +1 @@
69216 +-grsec
69217 diff --git a/mm/Kconfig b/mm/Kconfig
69218 index 011b110..b492af2 100644
69219 --- a/mm/Kconfig
69220 +++ b/mm/Kconfig
69221 @@ -241,10 +241,10 @@ config KSM
69222 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
69223
69224 config DEFAULT_MMAP_MIN_ADDR
69225 - int "Low address space to protect from user allocation"
69226 + int "Low address space to protect from user allocation"
69227 depends on MMU
69228 - default 4096
69229 - help
69230 + default 65536
69231 + help
69232 This is the portion of low virtual memory which should be protected
69233 from userspace allocation. Keeping a user from writing to low pages
69234 can help reduce the impact of kernel NULL pointer bugs.
69235 diff --git a/mm/filemap.c b/mm/filemap.c
69236 index 03c5b0e..a01e793 100644
69237 --- a/mm/filemap.c
69238 +++ b/mm/filemap.c
69239 @@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
69240 struct address_space *mapping = file->f_mapping;
69241
69242 if (!mapping->a_ops->readpage)
69243 - return -ENOEXEC;
69244 + return -ENODEV;
69245 file_accessed(file);
69246 vma->vm_ops = &generic_file_vm_ops;
69247 vma->vm_flags |= VM_CAN_NONLINEAR;
69248 @@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
69249 *pos = i_size_read(inode);
69250
69251 if (limit != RLIM_INFINITY) {
69252 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
69253 if (*pos >= limit) {
69254 send_sig(SIGXFSZ, current, 0);
69255 return -EFBIG;
69256 diff --git a/mm/fremap.c b/mm/fremap.c
69257 index 9ed4fd4..c42648d 100644
69258 --- a/mm/fremap.c
69259 +++ b/mm/fremap.c
69260 @@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
69261 retry:
69262 vma = find_vma(mm, start);
69263
69264 +#ifdef CONFIG_PAX_SEGMEXEC
69265 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
69266 + goto out;
69267 +#endif
69268 +
69269 /*
69270 * Make sure the vma is shared, that it supports prefaulting,
69271 * and that the remapped range is valid and fully within
69272 diff --git a/mm/highmem.c b/mm/highmem.c
69273 index 57d82c6..e9e0552 100644
69274 --- a/mm/highmem.c
69275 +++ b/mm/highmem.c
69276 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
69277 * So no dangers, even with speculative execution.
69278 */
69279 page = pte_page(pkmap_page_table[i]);
69280 + pax_open_kernel();
69281 pte_clear(&init_mm, (unsigned long)page_address(page),
69282 &pkmap_page_table[i]);
69283 -
69284 + pax_close_kernel();
69285 set_page_address(page, NULL);
69286 need_flush = 1;
69287 }
69288 @@ -186,9 +187,11 @@ start:
69289 }
69290 }
69291 vaddr = PKMAP_ADDR(last_pkmap_nr);
69292 +
69293 + pax_open_kernel();
69294 set_pte_at(&init_mm, vaddr,
69295 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
69296 -
69297 + pax_close_kernel();
69298 pkmap_count[last_pkmap_nr] = 1;
69299 set_page_address(page, (void *)vaddr);
69300
69301 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
69302 index 8f005e9..1cb1036 100644
69303 --- a/mm/huge_memory.c
69304 +++ b/mm/huge_memory.c
69305 @@ -704,7 +704,7 @@ out:
69306 * run pte_offset_map on the pmd, if an huge pmd could
69307 * materialize from under us from a different thread.
69308 */
69309 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
69310 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
69311 return VM_FAULT_OOM;
69312 /* if an huge pmd materialized from under us just retry later */
69313 if (unlikely(pmd_trans_huge(*pmd)))
69314 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
69315 index 2316840..b418671 100644
69316 --- a/mm/hugetlb.c
69317 +++ b/mm/hugetlb.c
69318 @@ -2347,6 +2347,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
69319 return 1;
69320 }
69321
69322 +#ifdef CONFIG_PAX_SEGMEXEC
69323 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
69324 +{
69325 + struct mm_struct *mm = vma->vm_mm;
69326 + struct vm_area_struct *vma_m;
69327 + unsigned long address_m;
69328 + pte_t *ptep_m;
69329 +
69330 + vma_m = pax_find_mirror_vma(vma);
69331 + if (!vma_m)
69332 + return;
69333 +
69334 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69335 + address_m = address + SEGMEXEC_TASK_SIZE;
69336 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
69337 + get_page(page_m);
69338 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
69339 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
69340 +}
69341 +#endif
69342 +
69343 /*
69344 * Hugetlb_cow() should be called with page lock of the original hugepage held.
69345 */
69346 @@ -2450,6 +2471,11 @@ retry_avoidcopy:
69347 make_huge_pte(vma, new_page, 1));
69348 page_remove_rmap(old_page);
69349 hugepage_add_new_anon_rmap(new_page, vma, address);
69350 +
69351 +#ifdef CONFIG_PAX_SEGMEXEC
69352 + pax_mirror_huge_pte(vma, address, new_page);
69353 +#endif
69354 +
69355 /* Make the old page be freed below */
69356 new_page = old_page;
69357 mmu_notifier_invalidate_range_end(mm,
69358 @@ -2601,6 +2627,10 @@ retry:
69359 && (vma->vm_flags & VM_SHARED)));
69360 set_huge_pte_at(mm, address, ptep, new_pte);
69361
69362 +#ifdef CONFIG_PAX_SEGMEXEC
69363 + pax_mirror_huge_pte(vma, address, page);
69364 +#endif
69365 +
69366 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
69367 /* Optimization, do the COW without a second fault */
69368 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
69369 @@ -2630,6 +2660,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69370 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
69371 struct hstate *h = hstate_vma(vma);
69372
69373 +#ifdef CONFIG_PAX_SEGMEXEC
69374 + struct vm_area_struct *vma_m;
69375 +#endif
69376 +
69377 ptep = huge_pte_offset(mm, address);
69378 if (ptep) {
69379 entry = huge_ptep_get(ptep);
69380 @@ -2641,6 +2675,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69381 VM_FAULT_SET_HINDEX(h - hstates);
69382 }
69383
69384 +#ifdef CONFIG_PAX_SEGMEXEC
69385 + vma_m = pax_find_mirror_vma(vma);
69386 + if (vma_m) {
69387 + unsigned long address_m;
69388 +
69389 + if (vma->vm_start > vma_m->vm_start) {
69390 + address_m = address;
69391 + address -= SEGMEXEC_TASK_SIZE;
69392 + vma = vma_m;
69393 + h = hstate_vma(vma);
69394 + } else
69395 + address_m = address + SEGMEXEC_TASK_SIZE;
69396 +
69397 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
69398 + return VM_FAULT_OOM;
69399 + address_m &= HPAGE_MASK;
69400 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
69401 + }
69402 +#endif
69403 +
69404 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
69405 if (!ptep)
69406 return VM_FAULT_OOM;
69407 diff --git a/mm/internal.h b/mm/internal.h
69408 index 2189af4..f2ca332 100644
69409 --- a/mm/internal.h
69410 +++ b/mm/internal.h
69411 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
69412 * in mm/page_alloc.c
69413 */
69414 extern void __free_pages_bootmem(struct page *page, unsigned int order);
69415 +extern void free_compound_page(struct page *page);
69416 extern void prep_compound_page(struct page *page, unsigned long order);
69417 #ifdef CONFIG_MEMORY_FAILURE
69418 extern bool is_free_buddy_page(struct page *page);
69419 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
69420 index f3b2a00..61da94d 100644
69421 --- a/mm/kmemleak.c
69422 +++ b/mm/kmemleak.c
69423 @@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq,
69424
69425 for (i = 0; i < object->trace_len; i++) {
69426 void *ptr = (void *)object->trace[i];
69427 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
69428 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
69429 }
69430 }
69431
69432 diff --git a/mm/maccess.c b/mm/maccess.c
69433 index d53adf9..03a24bf 100644
69434 --- a/mm/maccess.c
69435 +++ b/mm/maccess.c
69436 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
69437 set_fs(KERNEL_DS);
69438 pagefault_disable();
69439 ret = __copy_from_user_inatomic(dst,
69440 - (__force const void __user *)src, size);
69441 + (const void __force_user *)src, size);
69442 pagefault_enable();
69443 set_fs(old_fs);
69444
69445 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
69446
69447 set_fs(KERNEL_DS);
69448 pagefault_disable();
69449 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
69450 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
69451 pagefault_enable();
69452 set_fs(old_fs);
69453
69454 diff --git a/mm/madvise.c b/mm/madvise.c
69455 index 74bf193..feb6fd3 100644
69456 --- a/mm/madvise.c
69457 +++ b/mm/madvise.c
69458 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
69459 pgoff_t pgoff;
69460 unsigned long new_flags = vma->vm_flags;
69461
69462 +#ifdef CONFIG_PAX_SEGMEXEC
69463 + struct vm_area_struct *vma_m;
69464 +#endif
69465 +
69466 switch (behavior) {
69467 case MADV_NORMAL:
69468 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
69469 @@ -110,6 +114,13 @@ success:
69470 /*
69471 * vm_flags is protected by the mmap_sem held in write mode.
69472 */
69473 +
69474 +#ifdef CONFIG_PAX_SEGMEXEC
69475 + vma_m = pax_find_mirror_vma(vma);
69476 + if (vma_m)
69477 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
69478 +#endif
69479 +
69480 vma->vm_flags = new_flags;
69481
69482 out:
69483 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
69484 struct vm_area_struct ** prev,
69485 unsigned long start, unsigned long end)
69486 {
69487 +
69488 +#ifdef CONFIG_PAX_SEGMEXEC
69489 + struct vm_area_struct *vma_m;
69490 +#endif
69491 +
69492 *prev = vma;
69493 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
69494 return -EINVAL;
69495 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
69496 zap_page_range(vma, start, end - start, &details);
69497 } else
69498 zap_page_range(vma, start, end - start, NULL);
69499 +
69500 +#ifdef CONFIG_PAX_SEGMEXEC
69501 + vma_m = pax_find_mirror_vma(vma);
69502 + if (vma_m) {
69503 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
69504 + struct zap_details details = {
69505 + .nonlinear_vma = vma_m,
69506 + .last_index = ULONG_MAX,
69507 + };
69508 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
69509 + } else
69510 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
69511 + }
69512 +#endif
69513 +
69514 return 0;
69515 }
69516
69517 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
69518 if (end < start)
69519 goto out;
69520
69521 +#ifdef CONFIG_PAX_SEGMEXEC
69522 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69523 + if (end > SEGMEXEC_TASK_SIZE)
69524 + goto out;
69525 + } else
69526 +#endif
69527 +
69528 + if (end > TASK_SIZE)
69529 + goto out;
69530 +
69531 error = 0;
69532 if (end == start)
69533 goto out;
69534 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
69535 index 06d3479..0778eef 100644
69536 --- a/mm/memory-failure.c
69537 +++ b/mm/memory-failure.c
69538 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
69539
69540 int sysctl_memory_failure_recovery __read_mostly = 1;
69541
69542 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
69543 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
69544
69545 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
69546
69547 @@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
69548 si.si_signo = SIGBUS;
69549 si.si_errno = 0;
69550 si.si_code = BUS_MCEERR_AO;
69551 - si.si_addr = (void *)addr;
69552 + si.si_addr = (void __user *)addr;
69553 #ifdef __ARCH_SI_TRAPNO
69554 si.si_trapno = trapno;
69555 #endif
69556 @@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
69557 }
69558
69559 nr_pages = 1 << compound_trans_order(hpage);
69560 - atomic_long_add(nr_pages, &mce_bad_pages);
69561 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
69562
69563 /*
69564 * We need/can do nothing about count=0 pages.
69565 @@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
69566 if (!PageHWPoison(hpage)
69567 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
69568 || (p != hpage && TestSetPageHWPoison(hpage))) {
69569 - atomic_long_sub(nr_pages, &mce_bad_pages);
69570 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69571 return 0;
69572 }
69573 set_page_hwpoison_huge_page(hpage);
69574 @@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
69575 }
69576 if (hwpoison_filter(p)) {
69577 if (TestClearPageHWPoison(p))
69578 - atomic_long_sub(nr_pages, &mce_bad_pages);
69579 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69580 unlock_page(hpage);
69581 put_page(hpage);
69582 return 0;
69583 @@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
69584 return 0;
69585 }
69586 if (TestClearPageHWPoison(p))
69587 - atomic_long_sub(nr_pages, &mce_bad_pages);
69588 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69589 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
69590 return 0;
69591 }
69592 @@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
69593 */
69594 if (TestClearPageHWPoison(page)) {
69595 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
69596 - atomic_long_sub(nr_pages, &mce_bad_pages);
69597 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69598 freeit = 1;
69599 if (PageHuge(page))
69600 clear_page_hwpoison_huge_page(page);
69601 @@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
69602 }
69603 done:
69604 if (!PageHWPoison(hpage))
69605 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
69606 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
69607 set_page_hwpoison_huge_page(hpage);
69608 dequeue_hwpoisoned_huge_page(hpage);
69609 /* keep elevated page count for bad page */
69610 @@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
69611 return ret;
69612
69613 done:
69614 - atomic_long_add(1, &mce_bad_pages);
69615 + atomic_long_add_unchecked(1, &mce_bad_pages);
69616 SetPageHWPoison(page);
69617 /* keep elevated page count for bad page */
69618 return ret;
69619 diff --git a/mm/memory.c b/mm/memory.c
69620 index 829d437..3d3926a 100644
69621 --- a/mm/memory.c
69622 +++ b/mm/memory.c
69623 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
69624 return;
69625
69626 pmd = pmd_offset(pud, start);
69627 +
69628 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
69629 pud_clear(pud);
69630 pmd_free_tlb(tlb, pmd, start);
69631 +#endif
69632 +
69633 }
69634
69635 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
69636 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
69637 if (end - 1 > ceiling - 1)
69638 return;
69639
69640 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
69641 pud = pud_offset(pgd, start);
69642 pgd_clear(pgd);
69643 pud_free_tlb(tlb, pud, start);
69644 +#endif
69645 +
69646 }
69647
69648 /*
69649 @@ -1566,12 +1573,6 @@ no_page_table:
69650 return page;
69651 }
69652
69653 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
69654 -{
69655 - return stack_guard_page_start(vma, addr) ||
69656 - stack_guard_page_end(vma, addr+PAGE_SIZE);
69657 -}
69658 -
69659 /**
69660 * __get_user_pages() - pin user pages in memory
69661 * @tsk: task_struct of target task
69662 @@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69663 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
69664 i = 0;
69665
69666 - do {
69667 + while (nr_pages) {
69668 struct vm_area_struct *vma;
69669
69670 - vma = find_extend_vma(mm, start);
69671 + vma = find_vma(mm, start);
69672 if (!vma && in_gate_area(mm, start)) {
69673 unsigned long pg = start & PAGE_MASK;
69674 pgd_t *pgd;
69675 @@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69676 goto next_page;
69677 }
69678
69679 - if (!vma ||
69680 + if (!vma || start < vma->vm_start ||
69681 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
69682 !(vm_flags & vma->vm_flags))
69683 return i ? : -EFAULT;
69684 @@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69685 int ret;
69686 unsigned int fault_flags = 0;
69687
69688 - /* For mlock, just skip the stack guard page. */
69689 - if (foll_flags & FOLL_MLOCK) {
69690 - if (stack_guard_page(vma, start))
69691 - goto next_page;
69692 - }
69693 if (foll_flags & FOLL_WRITE)
69694 fault_flags |= FAULT_FLAG_WRITE;
69695 if (nonblocking)
69696 @@ -1800,7 +1796,7 @@ next_page:
69697 start += PAGE_SIZE;
69698 nr_pages--;
69699 } while (nr_pages && start < vma->vm_end);
69700 - } while (nr_pages);
69701 + }
69702 return i;
69703 }
69704 EXPORT_SYMBOL(__get_user_pages);
69705 @@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
69706 page_add_file_rmap(page);
69707 set_pte_at(mm, addr, pte, mk_pte(page, prot));
69708
69709 +#ifdef CONFIG_PAX_SEGMEXEC
69710 + pax_mirror_file_pte(vma, addr, page, ptl);
69711 +#endif
69712 +
69713 retval = 0;
69714 pte_unmap_unlock(pte, ptl);
69715 return retval;
69716 @@ -2041,10 +2041,22 @@ out:
69717 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
69718 struct page *page)
69719 {
69720 +
69721 +#ifdef CONFIG_PAX_SEGMEXEC
69722 + struct vm_area_struct *vma_m;
69723 +#endif
69724 +
69725 if (addr < vma->vm_start || addr >= vma->vm_end)
69726 return -EFAULT;
69727 if (!page_count(page))
69728 return -EINVAL;
69729 +
69730 +#ifdef CONFIG_PAX_SEGMEXEC
69731 + vma_m = pax_find_mirror_vma(vma);
69732 + if (vma_m)
69733 + vma_m->vm_flags |= VM_INSERTPAGE;
69734 +#endif
69735 +
69736 vma->vm_flags |= VM_INSERTPAGE;
69737 return insert_page(vma, addr, page, vma->vm_page_prot);
69738 }
69739 @@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
69740 unsigned long pfn)
69741 {
69742 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
69743 + BUG_ON(vma->vm_mirror);
69744
69745 if (addr < vma->vm_start || addr >= vma->vm_end)
69746 return -EFAULT;
69747 @@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
69748 copy_user_highpage(dst, src, va, vma);
69749 }
69750
69751 +#ifdef CONFIG_PAX_SEGMEXEC
69752 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
69753 +{
69754 + struct mm_struct *mm = vma->vm_mm;
69755 + spinlock_t *ptl;
69756 + pte_t *pte, entry;
69757 +
69758 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
69759 + entry = *pte;
69760 + if (!pte_present(entry)) {
69761 + if (!pte_none(entry)) {
69762 + BUG_ON(pte_file(entry));
69763 + free_swap_and_cache(pte_to_swp_entry(entry));
69764 + pte_clear_not_present_full(mm, address, pte, 0);
69765 + }
69766 + } else {
69767 + struct page *page;
69768 +
69769 + flush_cache_page(vma, address, pte_pfn(entry));
69770 + entry = ptep_clear_flush(vma, address, pte);
69771 + BUG_ON(pte_dirty(entry));
69772 + page = vm_normal_page(vma, address, entry);
69773 + if (page) {
69774 + update_hiwater_rss(mm);
69775 + if (PageAnon(page))
69776 + dec_mm_counter_fast(mm, MM_ANONPAGES);
69777 + else
69778 + dec_mm_counter_fast(mm, MM_FILEPAGES);
69779 + page_remove_rmap(page);
69780 + page_cache_release(page);
69781 + }
69782 + }
69783 + pte_unmap_unlock(pte, ptl);
69784 +}
69785 +
69786 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
69787 + *
69788 + * the ptl of the lower mapped page is held on entry and is not released on exit
69789 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
69790 + */
69791 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
69792 +{
69793 + struct mm_struct *mm = vma->vm_mm;
69794 + unsigned long address_m;
69795 + spinlock_t *ptl_m;
69796 + struct vm_area_struct *vma_m;
69797 + pmd_t *pmd_m;
69798 + pte_t *pte_m, entry_m;
69799 +
69800 + BUG_ON(!page_m || !PageAnon(page_m));
69801 +
69802 + vma_m = pax_find_mirror_vma(vma);
69803 + if (!vma_m)
69804 + return;
69805 +
69806 + BUG_ON(!PageLocked(page_m));
69807 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69808 + address_m = address + SEGMEXEC_TASK_SIZE;
69809 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
69810 + pte_m = pte_offset_map(pmd_m, address_m);
69811 + ptl_m = pte_lockptr(mm, pmd_m);
69812 + if (ptl != ptl_m) {
69813 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
69814 + if (!pte_none(*pte_m))
69815 + goto out;
69816 + }
69817 +
69818 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
69819 + page_cache_get(page_m);
69820 + page_add_anon_rmap(page_m, vma_m, address_m);
69821 + inc_mm_counter_fast(mm, MM_ANONPAGES);
69822 + set_pte_at(mm, address_m, pte_m, entry_m);
69823 + update_mmu_cache(vma_m, address_m, entry_m);
69824 +out:
69825 + if (ptl != ptl_m)
69826 + spin_unlock(ptl_m);
69827 + pte_unmap(pte_m);
69828 + unlock_page(page_m);
69829 +}
69830 +
69831 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
69832 +{
69833 + struct mm_struct *mm = vma->vm_mm;
69834 + unsigned long address_m;
69835 + spinlock_t *ptl_m;
69836 + struct vm_area_struct *vma_m;
69837 + pmd_t *pmd_m;
69838 + pte_t *pte_m, entry_m;
69839 +
69840 + BUG_ON(!page_m || PageAnon(page_m));
69841 +
69842 + vma_m = pax_find_mirror_vma(vma);
69843 + if (!vma_m)
69844 + return;
69845 +
69846 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69847 + address_m = address + SEGMEXEC_TASK_SIZE;
69848 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
69849 + pte_m = pte_offset_map(pmd_m, address_m);
69850 + ptl_m = pte_lockptr(mm, pmd_m);
69851 + if (ptl != ptl_m) {
69852 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
69853 + if (!pte_none(*pte_m))
69854 + goto out;
69855 + }
69856 +
69857 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
69858 + page_cache_get(page_m);
69859 + page_add_file_rmap(page_m);
69860 + inc_mm_counter_fast(mm, MM_FILEPAGES);
69861 + set_pte_at(mm, address_m, pte_m, entry_m);
69862 + update_mmu_cache(vma_m, address_m, entry_m);
69863 +out:
69864 + if (ptl != ptl_m)
69865 + spin_unlock(ptl_m);
69866 + pte_unmap(pte_m);
69867 +}
69868 +
69869 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
69870 +{
69871 + struct mm_struct *mm = vma->vm_mm;
69872 + unsigned long address_m;
69873 + spinlock_t *ptl_m;
69874 + struct vm_area_struct *vma_m;
69875 + pmd_t *pmd_m;
69876 + pte_t *pte_m, entry_m;
69877 +
69878 + vma_m = pax_find_mirror_vma(vma);
69879 + if (!vma_m)
69880 + return;
69881 +
69882 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69883 + address_m = address + SEGMEXEC_TASK_SIZE;
69884 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
69885 + pte_m = pte_offset_map(pmd_m, address_m);
69886 + ptl_m = pte_lockptr(mm, pmd_m);
69887 + if (ptl != ptl_m) {
69888 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
69889 + if (!pte_none(*pte_m))
69890 + goto out;
69891 + }
69892 +
69893 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
69894 + set_pte_at(mm, address_m, pte_m, entry_m);
69895 +out:
69896 + if (ptl != ptl_m)
69897 + spin_unlock(ptl_m);
69898 + pte_unmap(pte_m);
69899 +}
69900 +
69901 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
69902 +{
69903 + struct page *page_m;
69904 + pte_t entry;
69905 +
69906 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
69907 + goto out;
69908 +
69909 + entry = *pte;
69910 + page_m = vm_normal_page(vma, address, entry);
69911 + if (!page_m)
69912 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
69913 + else if (PageAnon(page_m)) {
69914 + if (pax_find_mirror_vma(vma)) {
69915 + pte_unmap_unlock(pte, ptl);
69916 + lock_page(page_m);
69917 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
69918 + if (pte_same(entry, *pte))
69919 + pax_mirror_anon_pte(vma, address, page_m, ptl);
69920 + else
69921 + unlock_page(page_m);
69922 + }
69923 + } else
69924 + pax_mirror_file_pte(vma, address, page_m, ptl);
69925 +
69926 +out:
69927 + pte_unmap_unlock(pte, ptl);
69928 +}
69929 +#endif
69930 +
69931 /*
69932 * This routine handles present pages, when users try to write
69933 * to a shared page. It is done by copying the page to a new address
69934 @@ -2656,6 +2849,12 @@ gotten:
69935 */
69936 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
69937 if (likely(pte_same(*page_table, orig_pte))) {
69938 +
69939 +#ifdef CONFIG_PAX_SEGMEXEC
69940 + if (pax_find_mirror_vma(vma))
69941 + BUG_ON(!trylock_page(new_page));
69942 +#endif
69943 +
69944 if (old_page) {
69945 if (!PageAnon(old_page)) {
69946 dec_mm_counter_fast(mm, MM_FILEPAGES);
69947 @@ -2707,6 +2906,10 @@ gotten:
69948 page_remove_rmap(old_page);
69949 }
69950
69951 +#ifdef CONFIG_PAX_SEGMEXEC
69952 + pax_mirror_anon_pte(vma, address, new_page, ptl);
69953 +#endif
69954 +
69955 /* Free the old page.. */
69956 new_page = old_page;
69957 ret |= VM_FAULT_WRITE;
69958 @@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
69959 swap_free(entry);
69960 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
69961 try_to_free_swap(page);
69962 +
69963 +#ifdef CONFIG_PAX_SEGMEXEC
69964 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
69965 +#endif
69966 +
69967 unlock_page(page);
69968 if (swapcache) {
69969 /*
69970 @@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
69971
69972 /* No need to invalidate - it was non-present before */
69973 update_mmu_cache(vma, address, page_table);
69974 +
69975 +#ifdef CONFIG_PAX_SEGMEXEC
69976 + pax_mirror_anon_pte(vma, address, page, ptl);
69977 +#endif
69978 +
69979 unlock:
69980 pte_unmap_unlock(page_table, ptl);
69981 out:
69982 @@ -3028,40 +3241,6 @@ out_release:
69983 }
69984
69985 /*
69986 - * This is like a special single-page "expand_{down|up}wards()",
69987 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
69988 - * doesn't hit another vma.
69989 - */
69990 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
69991 -{
69992 - address &= PAGE_MASK;
69993 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
69994 - struct vm_area_struct *prev = vma->vm_prev;
69995 -
69996 - /*
69997 - * Is there a mapping abutting this one below?
69998 - *
69999 - * That's only ok if it's the same stack mapping
70000 - * that has gotten split..
70001 - */
70002 - if (prev && prev->vm_end == address)
70003 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
70004 -
70005 - expand_downwards(vma, address - PAGE_SIZE);
70006 - }
70007 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
70008 - struct vm_area_struct *next = vma->vm_next;
70009 -
70010 - /* As VM_GROWSDOWN but s/below/above/ */
70011 - if (next && next->vm_start == address + PAGE_SIZE)
70012 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
70013 -
70014 - expand_upwards(vma, address + PAGE_SIZE);
70015 - }
70016 - return 0;
70017 -}
70018 -
70019 -/*
70020 * We enter with non-exclusive mmap_sem (to exclude vma changes,
70021 * but allow concurrent faults), and pte mapped but not yet locked.
70022 * We return with mmap_sem still held, but pte unmapped and unlocked.
70023 @@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70024 unsigned long address, pte_t *page_table, pmd_t *pmd,
70025 unsigned int flags)
70026 {
70027 - struct page *page;
70028 + struct page *page = NULL;
70029 spinlock_t *ptl;
70030 pte_t entry;
70031
70032 - pte_unmap(page_table);
70033 -
70034 - /* Check if we need to add a guard page to the stack */
70035 - if (check_stack_guard_page(vma, address) < 0)
70036 - return VM_FAULT_SIGBUS;
70037 -
70038 - /* Use the zero-page for reads */
70039 if (!(flags & FAULT_FLAG_WRITE)) {
70040 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
70041 vma->vm_page_prot));
70042 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70043 + ptl = pte_lockptr(mm, pmd);
70044 + spin_lock(ptl);
70045 if (!pte_none(*page_table))
70046 goto unlock;
70047 goto setpte;
70048 }
70049
70050 /* Allocate our own private page. */
70051 + pte_unmap(page_table);
70052 +
70053 if (unlikely(anon_vma_prepare(vma)))
70054 goto oom;
70055 page = alloc_zeroed_user_highpage_movable(vma, address);
70056 @@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70057 if (!pte_none(*page_table))
70058 goto release;
70059
70060 +#ifdef CONFIG_PAX_SEGMEXEC
70061 + if (pax_find_mirror_vma(vma))
70062 + BUG_ON(!trylock_page(page));
70063 +#endif
70064 +
70065 inc_mm_counter_fast(mm, MM_ANONPAGES);
70066 page_add_new_anon_rmap(page, vma, address);
70067 setpte:
70068 @@ -3116,6 +3296,12 @@ setpte:
70069
70070 /* No need to invalidate - it was non-present before */
70071 update_mmu_cache(vma, address, page_table);
70072 +
70073 +#ifdef CONFIG_PAX_SEGMEXEC
70074 + if (page)
70075 + pax_mirror_anon_pte(vma, address, page, ptl);
70076 +#endif
70077 +
70078 unlock:
70079 pte_unmap_unlock(page_table, ptl);
70080 return 0;
70081 @@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70082 */
70083 /* Only go through if we didn't race with anybody else... */
70084 if (likely(pte_same(*page_table, orig_pte))) {
70085 +
70086 +#ifdef CONFIG_PAX_SEGMEXEC
70087 + if (anon && pax_find_mirror_vma(vma))
70088 + BUG_ON(!trylock_page(page));
70089 +#endif
70090 +
70091 flush_icache_page(vma, page);
70092 entry = mk_pte(page, vma->vm_page_prot);
70093 if (flags & FAULT_FLAG_WRITE)
70094 @@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70095
70096 /* no need to invalidate: a not-present page won't be cached */
70097 update_mmu_cache(vma, address, page_table);
70098 +
70099 +#ifdef CONFIG_PAX_SEGMEXEC
70100 + if (anon)
70101 + pax_mirror_anon_pte(vma, address, page, ptl);
70102 + else
70103 + pax_mirror_file_pte(vma, address, page, ptl);
70104 +#endif
70105 +
70106 } else {
70107 if (cow_page)
70108 mem_cgroup_uncharge_page(cow_page);
70109 @@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *mm,
70110 if (flags & FAULT_FLAG_WRITE)
70111 flush_tlb_fix_spurious_fault(vma, address);
70112 }
70113 +
70114 +#ifdef CONFIG_PAX_SEGMEXEC
70115 + pax_mirror_pte(vma, address, pte, pmd, ptl);
70116 + return 0;
70117 +#endif
70118 +
70119 unlock:
70120 pte_unmap_unlock(pte, ptl);
70121 return 0;
70122 @@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70123 pmd_t *pmd;
70124 pte_t *pte;
70125
70126 +#ifdef CONFIG_PAX_SEGMEXEC
70127 + struct vm_area_struct *vma_m;
70128 +#endif
70129 +
70130 __set_current_state(TASK_RUNNING);
70131
70132 count_vm_event(PGFAULT);
70133 @@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70134 if (unlikely(is_vm_hugetlb_page(vma)))
70135 return hugetlb_fault(mm, vma, address, flags);
70136
70137 +#ifdef CONFIG_PAX_SEGMEXEC
70138 + vma_m = pax_find_mirror_vma(vma);
70139 + if (vma_m) {
70140 + unsigned long address_m;
70141 + pgd_t *pgd_m;
70142 + pud_t *pud_m;
70143 + pmd_t *pmd_m;
70144 +
70145 + if (vma->vm_start > vma_m->vm_start) {
70146 + address_m = address;
70147 + address -= SEGMEXEC_TASK_SIZE;
70148 + vma = vma_m;
70149 + } else
70150 + address_m = address + SEGMEXEC_TASK_SIZE;
70151 +
70152 + pgd_m = pgd_offset(mm, address_m);
70153 + pud_m = pud_alloc(mm, pgd_m, address_m);
70154 + if (!pud_m)
70155 + return VM_FAULT_OOM;
70156 + pmd_m = pmd_alloc(mm, pud_m, address_m);
70157 + if (!pmd_m)
70158 + return VM_FAULT_OOM;
70159 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
70160 + return VM_FAULT_OOM;
70161 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
70162 + }
70163 +#endif
70164 +
70165 pgd = pgd_offset(mm, address);
70166 pud = pud_alloc(mm, pgd, address);
70167 if (!pud)
70168 @@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70169 * run pte_offset_map on the pmd, if an huge pmd could
70170 * materialize from under us from a different thread.
70171 */
70172 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
70173 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
70174 return VM_FAULT_OOM;
70175 /* if an huge pmd materialized from under us just retry later */
70176 if (unlikely(pmd_trans_huge(*pmd)))
70177 @@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
70178 gate_vma.vm_start = FIXADDR_USER_START;
70179 gate_vma.vm_end = FIXADDR_USER_END;
70180 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
70181 - gate_vma.vm_page_prot = __P101;
70182 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
70183 /*
70184 * Make sure the vDSO gets into every core dump.
70185 * Dumping its contents makes post-mortem fully interpretable later
70186 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
70187 index c3fdbcb..2e8ef90 100644
70188 --- a/mm/mempolicy.c
70189 +++ b/mm/mempolicy.c
70190 @@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
70191 unsigned long vmstart;
70192 unsigned long vmend;
70193
70194 +#ifdef CONFIG_PAX_SEGMEXEC
70195 + struct vm_area_struct *vma_m;
70196 +#endif
70197 +
70198 vma = find_vma_prev(mm, start, &prev);
70199 if (!vma || vma->vm_start > start)
70200 return -EFAULT;
70201 @@ -678,6 +682,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
70202 err = policy_vma(vma, new_pol);
70203 if (err)
70204 goto out;
70205 +
70206 +#ifdef CONFIG_PAX_SEGMEXEC
70207 + vma_m = pax_find_mirror_vma(vma);
70208 + if (vma_m) {
70209 + err = policy_vma(vma_m, new_pol);
70210 + if (err)
70211 + goto out;
70212 + }
70213 +#endif
70214 +
70215 }
70216
70217 out:
70218 @@ -1111,6 +1125,17 @@ static long do_mbind(unsigned long start, unsigned long len,
70219
70220 if (end < start)
70221 return -EINVAL;
70222 +
70223 +#ifdef CONFIG_PAX_SEGMEXEC
70224 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
70225 + if (end > SEGMEXEC_TASK_SIZE)
70226 + return -EINVAL;
70227 + } else
70228 +#endif
70229 +
70230 + if (end > TASK_SIZE)
70231 + return -EINVAL;
70232 +
70233 if (end == start)
70234 return 0;
70235
70236 @@ -1329,6 +1354,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
70237 if (!mm)
70238 goto out;
70239
70240 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70241 + if (mm != current->mm &&
70242 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
70243 + err = -EPERM;
70244 + goto out;
70245 + }
70246 +#endif
70247 +
70248 /*
70249 * Check if this process has the right to modify the specified
70250 * process. The right exists if the process has administrative
70251 @@ -1338,8 +1371,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
70252 rcu_read_lock();
70253 tcred = __task_cred(task);
70254 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
70255 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
70256 - !capable(CAP_SYS_NICE)) {
70257 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
70258 rcu_read_unlock();
70259 err = -EPERM;
70260 goto out;
70261 diff --git a/mm/migrate.c b/mm/migrate.c
70262 index 177aca4..ab3a744 100644
70263 --- a/mm/migrate.c
70264 +++ b/mm/migrate.c
70265 @@ -1313,6 +1313,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
70266 if (!mm)
70267 return -EINVAL;
70268
70269 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70270 + if (mm != current->mm &&
70271 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
70272 + err = -EPERM;
70273 + goto out;
70274 + }
70275 +#endif
70276 +
70277 /*
70278 * Check if this process has the right to modify the specified
70279 * process. The right exists if the process has administrative
70280 @@ -1322,8 +1330,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
70281 rcu_read_lock();
70282 tcred = __task_cred(task);
70283 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
70284 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
70285 - !capable(CAP_SYS_NICE)) {
70286 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
70287 rcu_read_unlock();
70288 err = -EPERM;
70289 goto out;
70290 diff --git a/mm/mlock.c b/mm/mlock.c
70291 index 4f4f53b..9511904 100644
70292 --- a/mm/mlock.c
70293 +++ b/mm/mlock.c
70294 @@ -13,6 +13,7 @@
70295 #include <linux/pagemap.h>
70296 #include <linux/mempolicy.h>
70297 #include <linux/syscalls.h>
70298 +#include <linux/security.h>
70299 #include <linux/sched.h>
70300 #include <linux/export.h>
70301 #include <linux/rmap.h>
70302 @@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
70303 return -EINVAL;
70304 if (end == start)
70305 return 0;
70306 + if (end > TASK_SIZE)
70307 + return -EINVAL;
70308 +
70309 vma = find_vma_prev(current->mm, start, &prev);
70310 if (!vma || vma->vm_start > start)
70311 return -ENOMEM;
70312 @@ -395,6 +399,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
70313 for (nstart = start ; ; ) {
70314 vm_flags_t newflags;
70315
70316 +#ifdef CONFIG_PAX_SEGMEXEC
70317 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70318 + break;
70319 +#endif
70320 +
70321 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
70322
70323 newflags = vma->vm_flags | VM_LOCKED;
70324 @@ -500,6 +509,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
70325 lock_limit >>= PAGE_SHIFT;
70326
70327 /* check against resource limits */
70328 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
70329 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
70330 error = do_mlock(start, len, 1);
70331 up_write(&current->mm->mmap_sem);
70332 @@ -523,17 +533,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
70333 static int do_mlockall(int flags)
70334 {
70335 struct vm_area_struct * vma, * prev = NULL;
70336 - unsigned int def_flags = 0;
70337
70338 if (flags & MCL_FUTURE)
70339 - def_flags = VM_LOCKED;
70340 - current->mm->def_flags = def_flags;
70341 + current->mm->def_flags |= VM_LOCKED;
70342 + else
70343 + current->mm->def_flags &= ~VM_LOCKED;
70344 if (flags == MCL_FUTURE)
70345 goto out;
70346
70347 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
70348 vm_flags_t newflags;
70349
70350 +#ifdef CONFIG_PAX_SEGMEXEC
70351 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70352 + break;
70353 +#endif
70354 +
70355 + BUG_ON(vma->vm_end > TASK_SIZE);
70356 newflags = vma->vm_flags | VM_LOCKED;
70357 if (!(flags & MCL_CURRENT))
70358 newflags &= ~VM_LOCKED;
70359 @@ -566,6 +582,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
70360 lock_limit >>= PAGE_SHIFT;
70361
70362 ret = -ENOMEM;
70363 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
70364 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
70365 capable(CAP_IPC_LOCK))
70366 ret = do_mlockall(flags);
70367 diff --git a/mm/mmap.c b/mm/mmap.c
70368 index eae90af..c930262 100644
70369 --- a/mm/mmap.c
70370 +++ b/mm/mmap.c
70371 @@ -46,6 +46,16 @@
70372 #define arch_rebalance_pgtables(addr, len) (addr)
70373 #endif
70374
70375 +static inline void verify_mm_writelocked(struct mm_struct *mm)
70376 +{
70377 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
70378 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
70379 + up_read(&mm->mmap_sem);
70380 + BUG();
70381 + }
70382 +#endif
70383 +}
70384 +
70385 static void unmap_region(struct mm_struct *mm,
70386 struct vm_area_struct *vma, struct vm_area_struct *prev,
70387 unsigned long start, unsigned long end);
70388 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
70389 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
70390 *
70391 */
70392 -pgprot_t protection_map[16] = {
70393 +pgprot_t protection_map[16] __read_only = {
70394 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
70395 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
70396 };
70397
70398 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
70399 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
70400 {
70401 - return __pgprot(pgprot_val(protection_map[vm_flags &
70402 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
70403 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
70404 pgprot_val(arch_vm_get_page_prot(vm_flags)));
70405 +
70406 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70407 + if (!(__supported_pte_mask & _PAGE_NX) &&
70408 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
70409 + (vm_flags & (VM_READ | VM_WRITE)))
70410 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
70411 +#endif
70412 +
70413 + return prot;
70414 }
70415 EXPORT_SYMBOL(vm_get_page_prot);
70416
70417 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
70418 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
70419 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
70420 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
70421 /*
70422 * Make sure vm_committed_as in one cacheline and not cacheline shared with
70423 * other variables. It can be updated by several CPUs frequently.
70424 @@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
70425 struct vm_area_struct *next = vma->vm_next;
70426
70427 might_sleep();
70428 + BUG_ON(vma->vm_mirror);
70429 if (vma->vm_ops && vma->vm_ops->close)
70430 vma->vm_ops->close(vma);
70431 if (vma->vm_file) {
70432 @@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
70433 * not page aligned -Ram Gupta
70434 */
70435 rlim = rlimit(RLIMIT_DATA);
70436 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
70437 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
70438 (mm->end_data - mm->start_data) > rlim)
70439 goto out;
70440 @@ -689,6 +711,12 @@ static int
70441 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
70442 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
70443 {
70444 +
70445 +#ifdef CONFIG_PAX_SEGMEXEC
70446 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
70447 + return 0;
70448 +#endif
70449 +
70450 if (is_mergeable_vma(vma, file, vm_flags) &&
70451 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
70452 if (vma->vm_pgoff == vm_pgoff)
70453 @@ -708,6 +736,12 @@ static int
70454 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
70455 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
70456 {
70457 +
70458 +#ifdef CONFIG_PAX_SEGMEXEC
70459 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
70460 + return 0;
70461 +#endif
70462 +
70463 if (is_mergeable_vma(vma, file, vm_flags) &&
70464 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
70465 pgoff_t vm_pglen;
70466 @@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
70467 struct vm_area_struct *vma_merge(struct mm_struct *mm,
70468 struct vm_area_struct *prev, unsigned long addr,
70469 unsigned long end, unsigned long vm_flags,
70470 - struct anon_vma *anon_vma, struct file *file,
70471 + struct anon_vma *anon_vma, struct file *file,
70472 pgoff_t pgoff, struct mempolicy *policy)
70473 {
70474 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
70475 struct vm_area_struct *area, *next;
70476 int err;
70477
70478 +#ifdef CONFIG_PAX_SEGMEXEC
70479 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
70480 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
70481 +
70482 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
70483 +#endif
70484 +
70485 /*
70486 * We later require that vma->vm_flags == vm_flags,
70487 * so this tests vma->vm_flags & VM_SPECIAL, too.
70488 @@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70489 if (next && next->vm_end == end) /* cases 6, 7, 8 */
70490 next = next->vm_next;
70491
70492 +#ifdef CONFIG_PAX_SEGMEXEC
70493 + if (prev)
70494 + prev_m = pax_find_mirror_vma(prev);
70495 + if (area)
70496 + area_m = pax_find_mirror_vma(area);
70497 + if (next)
70498 + next_m = pax_find_mirror_vma(next);
70499 +#endif
70500 +
70501 /*
70502 * Can it merge with the predecessor?
70503 */
70504 @@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70505 /* cases 1, 6 */
70506 err = vma_adjust(prev, prev->vm_start,
70507 next->vm_end, prev->vm_pgoff, NULL);
70508 - } else /* cases 2, 5, 7 */
70509 +
70510 +#ifdef CONFIG_PAX_SEGMEXEC
70511 + if (!err && prev_m)
70512 + err = vma_adjust(prev_m, prev_m->vm_start,
70513 + next_m->vm_end, prev_m->vm_pgoff, NULL);
70514 +#endif
70515 +
70516 + } else { /* cases 2, 5, 7 */
70517 err = vma_adjust(prev, prev->vm_start,
70518 end, prev->vm_pgoff, NULL);
70519 +
70520 +#ifdef CONFIG_PAX_SEGMEXEC
70521 + if (!err && prev_m)
70522 + err = vma_adjust(prev_m, prev_m->vm_start,
70523 + end_m, prev_m->vm_pgoff, NULL);
70524 +#endif
70525 +
70526 + }
70527 if (err)
70528 return NULL;
70529 khugepaged_enter_vma_merge(prev);
70530 @@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70531 mpol_equal(policy, vma_policy(next)) &&
70532 can_vma_merge_before(next, vm_flags,
70533 anon_vma, file, pgoff+pglen)) {
70534 - if (prev && addr < prev->vm_end) /* case 4 */
70535 + if (prev && addr < prev->vm_end) { /* case 4 */
70536 err = vma_adjust(prev, prev->vm_start,
70537 addr, prev->vm_pgoff, NULL);
70538 - else /* cases 3, 8 */
70539 +
70540 +#ifdef CONFIG_PAX_SEGMEXEC
70541 + if (!err && prev_m)
70542 + err = vma_adjust(prev_m, prev_m->vm_start,
70543 + addr_m, prev_m->vm_pgoff, NULL);
70544 +#endif
70545 +
70546 + } else { /* cases 3, 8 */
70547 err = vma_adjust(area, addr, next->vm_end,
70548 next->vm_pgoff - pglen, NULL);
70549 +
70550 +#ifdef CONFIG_PAX_SEGMEXEC
70551 + if (!err && area_m)
70552 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
70553 + next_m->vm_pgoff - pglen, NULL);
70554 +#endif
70555 +
70556 + }
70557 if (err)
70558 return NULL;
70559 khugepaged_enter_vma_merge(area);
70560 @@ -921,14 +1001,11 @@ none:
70561 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
70562 struct file *file, long pages)
70563 {
70564 - const unsigned long stack_flags
70565 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
70566 -
70567 if (file) {
70568 mm->shared_vm += pages;
70569 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
70570 mm->exec_vm += pages;
70571 - } else if (flags & stack_flags)
70572 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
70573 mm->stack_vm += pages;
70574 if (flags & (VM_RESERVED|VM_IO))
70575 mm->reserved_vm += pages;
70576 @@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70577 * (the exception is when the underlying filesystem is noexec
70578 * mounted, in which case we dont add PROT_EXEC.)
70579 */
70580 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70581 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70582 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
70583 prot |= PROT_EXEC;
70584
70585 @@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70586 /* Obtain the address to map to. we verify (or select) it and ensure
70587 * that it represents a valid section of the address space.
70588 */
70589 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
70590 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
70591 if (addr & ~PAGE_MASK)
70592 return addr;
70593
70594 @@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70595 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
70596 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
70597
70598 +#ifdef CONFIG_PAX_MPROTECT
70599 + if (mm->pax_flags & MF_PAX_MPROTECT) {
70600 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
70601 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
70602 + gr_log_rwxmmap(file);
70603 +
70604 +#ifdef CONFIG_PAX_EMUPLT
70605 + vm_flags &= ~VM_EXEC;
70606 +#else
70607 + return -EPERM;
70608 +#endif
70609 +
70610 + }
70611 +
70612 + if (!(vm_flags & VM_EXEC))
70613 + vm_flags &= ~VM_MAYEXEC;
70614 +#else
70615 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
70616 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
70617 +#endif
70618 + else
70619 + vm_flags &= ~VM_MAYWRITE;
70620 + }
70621 +#endif
70622 +
70623 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70624 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
70625 + vm_flags &= ~VM_PAGEEXEC;
70626 +#endif
70627 +
70628 if (flags & MAP_LOCKED)
70629 if (!can_do_mlock())
70630 return -EPERM;
70631 @@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70632 locked += mm->locked_vm;
70633 lock_limit = rlimit(RLIMIT_MEMLOCK);
70634 lock_limit >>= PAGE_SHIFT;
70635 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
70636 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
70637 return -EAGAIN;
70638 }
70639 @@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70640 if (error)
70641 return error;
70642
70643 + if (!gr_acl_handle_mmap(file, prot))
70644 + return -EACCES;
70645 +
70646 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
70647 }
70648 EXPORT_SYMBOL(do_mmap_pgoff);
70649 @@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
70650 vm_flags_t vm_flags = vma->vm_flags;
70651
70652 /* If it was private or non-writable, the write bit is already clear */
70653 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
70654 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
70655 return 0;
70656
70657 /* The backer wishes to know when pages are first written to? */
70658 @@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
70659 unsigned long charged = 0;
70660 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
70661
70662 +#ifdef CONFIG_PAX_SEGMEXEC
70663 + struct vm_area_struct *vma_m = NULL;
70664 +#endif
70665 +
70666 + /*
70667 + * mm->mmap_sem is required to protect against another thread
70668 + * changing the mappings in case we sleep.
70669 + */
70670 + verify_mm_writelocked(mm);
70671 +
70672 /* Clear old maps */
70673 error = -ENOMEM;
70674 -munmap_back:
70675 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70676 if (vma && vma->vm_start < addr + len) {
70677 if (do_munmap(mm, addr, len))
70678 return -ENOMEM;
70679 - goto munmap_back;
70680 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70681 + BUG_ON(vma && vma->vm_start < addr + len);
70682 }
70683
70684 /* Check against address space limit. */
70685 @@ -1258,6 +1379,16 @@ munmap_back:
70686 goto unacct_error;
70687 }
70688
70689 +#ifdef CONFIG_PAX_SEGMEXEC
70690 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
70691 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70692 + if (!vma_m) {
70693 + error = -ENOMEM;
70694 + goto free_vma;
70695 + }
70696 + }
70697 +#endif
70698 +
70699 vma->vm_mm = mm;
70700 vma->vm_start = addr;
70701 vma->vm_end = addr + len;
70702 @@ -1266,8 +1397,9 @@ munmap_back:
70703 vma->vm_pgoff = pgoff;
70704 INIT_LIST_HEAD(&vma->anon_vma_chain);
70705
70706 + error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
70707 +
70708 if (file) {
70709 - error = -EINVAL;
70710 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
70711 goto free_vma;
70712 if (vm_flags & VM_DENYWRITE) {
70713 @@ -1281,6 +1413,19 @@ munmap_back:
70714 error = file->f_op->mmap(file, vma);
70715 if (error)
70716 goto unmap_and_free_vma;
70717 +
70718 +#ifdef CONFIG_PAX_SEGMEXEC
70719 + if (vma_m && (vm_flags & VM_EXECUTABLE))
70720 + added_exe_file_vma(mm);
70721 +#endif
70722 +
70723 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70724 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
70725 + vma->vm_flags |= VM_PAGEEXEC;
70726 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
70727 + }
70728 +#endif
70729 +
70730 if (vm_flags & VM_EXECUTABLE)
70731 added_exe_file_vma(mm);
70732
70733 @@ -1293,6 +1438,8 @@ munmap_back:
70734 pgoff = vma->vm_pgoff;
70735 vm_flags = vma->vm_flags;
70736 } else if (vm_flags & VM_SHARED) {
70737 + if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
70738 + goto free_vma;
70739 error = shmem_zero_setup(vma);
70740 if (error)
70741 goto free_vma;
70742 @@ -1316,6 +1463,11 @@ munmap_back:
70743 vma_link(mm, vma, prev, rb_link, rb_parent);
70744 file = vma->vm_file;
70745
70746 +#ifdef CONFIG_PAX_SEGMEXEC
70747 + if (vma_m)
70748 + BUG_ON(pax_mirror_vma(vma_m, vma));
70749 +#endif
70750 +
70751 /* Once vma denies write, undo our temporary denial count */
70752 if (correct_wcount)
70753 atomic_inc(&inode->i_writecount);
70754 @@ -1324,6 +1476,7 @@ out:
70755
70756 mm->total_vm += len >> PAGE_SHIFT;
70757 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
70758 + track_exec_limit(mm, addr, addr + len, vm_flags);
70759 if (vm_flags & VM_LOCKED) {
70760 if (!mlock_vma_pages_range(vma, addr, addr + len))
70761 mm->locked_vm += (len >> PAGE_SHIFT);
70762 @@ -1341,6 +1494,12 @@ unmap_and_free_vma:
70763 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
70764 charged = 0;
70765 free_vma:
70766 +
70767 +#ifdef CONFIG_PAX_SEGMEXEC
70768 + if (vma_m)
70769 + kmem_cache_free(vm_area_cachep, vma_m);
70770 +#endif
70771 +
70772 kmem_cache_free(vm_area_cachep, vma);
70773 unacct_error:
70774 if (charged)
70775 @@ -1348,6 +1507,44 @@ unacct_error:
70776 return error;
70777 }
70778
70779 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
70780 +{
70781 + if (!vma) {
70782 +#ifdef CONFIG_STACK_GROWSUP
70783 + if (addr > sysctl_heap_stack_gap)
70784 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
70785 + else
70786 + vma = find_vma(current->mm, 0);
70787 + if (vma && (vma->vm_flags & VM_GROWSUP))
70788 + return false;
70789 +#endif
70790 + return true;
70791 + }
70792 +
70793 + if (addr + len > vma->vm_start)
70794 + return false;
70795 +
70796 + if (vma->vm_flags & VM_GROWSDOWN)
70797 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
70798 +#ifdef CONFIG_STACK_GROWSUP
70799 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
70800 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
70801 +#endif
70802 +
70803 + return true;
70804 +}
70805 +
70806 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
70807 +{
70808 + if (vma->vm_start < len)
70809 + return -ENOMEM;
70810 + if (!(vma->vm_flags & VM_GROWSDOWN))
70811 + return vma->vm_start - len;
70812 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
70813 + return vma->vm_start - len - sysctl_heap_stack_gap;
70814 + return -ENOMEM;
70815 +}
70816 +
70817 /* Get an address range which is currently unmapped.
70818 * For shmat() with addr=0.
70819 *
70820 @@ -1374,18 +1571,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
70821 if (flags & MAP_FIXED)
70822 return addr;
70823
70824 +#ifdef CONFIG_PAX_RANDMMAP
70825 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
70826 +#endif
70827 +
70828 if (addr) {
70829 addr = PAGE_ALIGN(addr);
70830 - vma = find_vma(mm, addr);
70831 - if (TASK_SIZE - len >= addr &&
70832 - (!vma || addr + len <= vma->vm_start))
70833 - return addr;
70834 + if (TASK_SIZE - len >= addr) {
70835 + vma = find_vma(mm, addr);
70836 + if (check_heap_stack_gap(vma, addr, len))
70837 + return addr;
70838 + }
70839 }
70840 if (len > mm->cached_hole_size) {
70841 - start_addr = addr = mm->free_area_cache;
70842 + start_addr = addr = mm->free_area_cache;
70843 } else {
70844 - start_addr = addr = TASK_UNMAPPED_BASE;
70845 - mm->cached_hole_size = 0;
70846 + start_addr = addr = mm->mmap_base;
70847 + mm->cached_hole_size = 0;
70848 }
70849
70850 full_search:
70851 @@ -1396,34 +1598,40 @@ full_search:
70852 * Start a new search - just in case we missed
70853 * some holes.
70854 */
70855 - if (start_addr != TASK_UNMAPPED_BASE) {
70856 - addr = TASK_UNMAPPED_BASE;
70857 - start_addr = addr;
70858 + if (start_addr != mm->mmap_base) {
70859 + start_addr = addr = mm->mmap_base;
70860 mm->cached_hole_size = 0;
70861 goto full_search;
70862 }
70863 return -ENOMEM;
70864 }
70865 - if (!vma || addr + len <= vma->vm_start) {
70866 - /*
70867 - * Remember the place where we stopped the search:
70868 - */
70869 - mm->free_area_cache = addr + len;
70870 - return addr;
70871 - }
70872 + if (check_heap_stack_gap(vma, addr, len))
70873 + break;
70874 if (addr + mm->cached_hole_size < vma->vm_start)
70875 mm->cached_hole_size = vma->vm_start - addr;
70876 addr = vma->vm_end;
70877 }
70878 +
70879 + /*
70880 + * Remember the place where we stopped the search:
70881 + */
70882 + mm->free_area_cache = addr + len;
70883 + return addr;
70884 }
70885 #endif
70886
70887 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
70888 {
70889 +
70890 +#ifdef CONFIG_PAX_SEGMEXEC
70891 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
70892 + return;
70893 +#endif
70894 +
70895 /*
70896 * Is this a new hole at the lowest possible address?
70897 */
70898 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
70899 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
70900 mm->free_area_cache = addr;
70901 mm->cached_hole_size = ~0UL;
70902 }
70903 @@ -1441,7 +1649,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
70904 {
70905 struct vm_area_struct *vma;
70906 struct mm_struct *mm = current->mm;
70907 - unsigned long addr = addr0;
70908 + unsigned long base = mm->mmap_base, addr = addr0;
70909
70910 /* requested length too big for entire address space */
70911 if (len > TASK_SIZE)
70912 @@ -1450,13 +1658,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
70913 if (flags & MAP_FIXED)
70914 return addr;
70915
70916 +#ifdef CONFIG_PAX_RANDMMAP
70917 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
70918 +#endif
70919 +
70920 /* requesting a specific address */
70921 if (addr) {
70922 addr = PAGE_ALIGN(addr);
70923 - vma = find_vma(mm, addr);
70924 - if (TASK_SIZE - len >= addr &&
70925 - (!vma || addr + len <= vma->vm_start))
70926 - return addr;
70927 + if (TASK_SIZE - len >= addr) {
70928 + vma = find_vma(mm, addr);
70929 + if (check_heap_stack_gap(vma, addr, len))
70930 + return addr;
70931 + }
70932 }
70933
70934 /* check if free_area_cache is useful for us */
70935 @@ -1471,7 +1684,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
70936 /* make sure it can fit in the remaining address space */
70937 if (addr > len) {
70938 vma = find_vma(mm, addr-len);
70939 - if (!vma || addr <= vma->vm_start)
70940 + if (check_heap_stack_gap(vma, addr - len, len))
70941 /* remember the address as a hint for next time */
70942 return (mm->free_area_cache = addr-len);
70943 }
70944 @@ -1488,7 +1701,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
70945 * return with success:
70946 */
70947 vma = find_vma(mm, addr);
70948 - if (!vma || addr+len <= vma->vm_start)
70949 + if (check_heap_stack_gap(vma, addr, len))
70950 /* remember the address as a hint for next time */
70951 return (mm->free_area_cache = addr);
70952
70953 @@ -1497,8 +1710,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
70954 mm->cached_hole_size = vma->vm_start - addr;
70955
70956 /* try just below the current vma->vm_start */
70957 - addr = vma->vm_start-len;
70958 - } while (len < vma->vm_start);
70959 + addr = skip_heap_stack_gap(vma, len);
70960 + } while (!IS_ERR_VALUE(addr));
70961
70962 bottomup:
70963 /*
70964 @@ -1507,13 +1720,21 @@ bottomup:
70965 * can happen with large stack limits and large mmap()
70966 * allocations.
70967 */
70968 + mm->mmap_base = TASK_UNMAPPED_BASE;
70969 +
70970 +#ifdef CONFIG_PAX_RANDMMAP
70971 + if (mm->pax_flags & MF_PAX_RANDMMAP)
70972 + mm->mmap_base += mm->delta_mmap;
70973 +#endif
70974 +
70975 + mm->free_area_cache = mm->mmap_base;
70976 mm->cached_hole_size = ~0UL;
70977 - mm->free_area_cache = TASK_UNMAPPED_BASE;
70978 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
70979 /*
70980 * Restore the topdown base:
70981 */
70982 - mm->free_area_cache = mm->mmap_base;
70983 + mm->mmap_base = base;
70984 + mm->free_area_cache = base;
70985 mm->cached_hole_size = ~0UL;
70986
70987 return addr;
70988 @@ -1522,6 +1743,12 @@ bottomup:
70989
70990 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
70991 {
70992 +
70993 +#ifdef CONFIG_PAX_SEGMEXEC
70994 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
70995 + return;
70996 +#endif
70997 +
70998 /*
70999 * Is this a new hole at the highest possible address?
71000 */
71001 @@ -1529,8 +1756,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71002 mm->free_area_cache = addr;
71003
71004 /* dont allow allocations above current base */
71005 - if (mm->free_area_cache > mm->mmap_base)
71006 + if (mm->free_area_cache > mm->mmap_base) {
71007 mm->free_area_cache = mm->mmap_base;
71008 + mm->cached_hole_size = ~0UL;
71009 + }
71010 }
71011
71012 unsigned long
71013 @@ -1603,40 +1832,50 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
71014
71015 EXPORT_SYMBOL(find_vma);
71016
71017 -/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
71018 +/*
71019 + * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
71020 + */
71021 struct vm_area_struct *
71022 find_vma_prev(struct mm_struct *mm, unsigned long addr,
71023 struct vm_area_struct **pprev)
71024 {
71025 - struct vm_area_struct *vma = NULL, *prev = NULL;
71026 - struct rb_node *rb_node;
71027 - if (!mm)
71028 - goto out;
71029 -
71030 - /* Guard against addr being lower than the first VMA */
71031 - vma = mm->mmap;
71032 -
71033 - /* Go through the RB tree quickly. */
71034 - rb_node = mm->mm_rb.rb_node;
71035 -
71036 - while (rb_node) {
71037 - struct vm_area_struct *vma_tmp;
71038 - vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
71039 -
71040 - if (addr < vma_tmp->vm_end) {
71041 - rb_node = rb_node->rb_left;
71042 - } else {
71043 - prev = vma_tmp;
71044 - if (!prev->vm_next || (addr < prev->vm_next->vm_end))
71045 - break;
71046 + struct vm_area_struct *vma;
71047 +
71048 + vma = find_vma(mm, addr);
71049 + if (vma) {
71050 + *pprev = vma->vm_prev;
71051 + } else {
71052 + struct rb_node *rb_node = mm->mm_rb.rb_node;
71053 + *pprev = NULL;
71054 + while (rb_node) {
71055 + *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
71056 rb_node = rb_node->rb_right;
71057 }
71058 }
71059 + return vma;
71060 +}
71061 +
71062 +#ifdef CONFIG_PAX_SEGMEXEC
71063 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
71064 +{
71065 + struct vm_area_struct *vma_m;
71066
71067 -out:
71068 - *pprev = prev;
71069 - return prev ? prev->vm_next : vma;
71070 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
71071 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
71072 + BUG_ON(vma->vm_mirror);
71073 + return NULL;
71074 + }
71075 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
71076 + vma_m = vma->vm_mirror;
71077 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
71078 + BUG_ON(vma->vm_file != vma_m->vm_file);
71079 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
71080 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
71081 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
71082 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
71083 + return vma_m;
71084 }
71085 +#endif
71086
71087 /*
71088 * Verify that the stack growth is acceptable and
71089 @@ -1654,6 +1893,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71090 return -ENOMEM;
71091
71092 /* Stack limit test */
71093 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
71094 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
71095 return -ENOMEM;
71096
71097 @@ -1664,6 +1904,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71098 locked = mm->locked_vm + grow;
71099 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
71100 limit >>= PAGE_SHIFT;
71101 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
71102 if (locked > limit && !capable(CAP_IPC_LOCK))
71103 return -ENOMEM;
71104 }
71105 @@ -1694,37 +1935,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71106 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
71107 * vma is the last one with address > vma->vm_end. Have to extend vma.
71108 */
71109 +#ifndef CONFIG_IA64
71110 +static
71111 +#endif
71112 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
71113 {
71114 int error;
71115 + bool locknext;
71116
71117 if (!(vma->vm_flags & VM_GROWSUP))
71118 return -EFAULT;
71119
71120 + /* Also guard against wrapping around to address 0. */
71121 + if (address < PAGE_ALIGN(address+1))
71122 + address = PAGE_ALIGN(address+1);
71123 + else
71124 + return -ENOMEM;
71125 +
71126 /*
71127 * We must make sure the anon_vma is allocated
71128 * so that the anon_vma locking is not a noop.
71129 */
71130 if (unlikely(anon_vma_prepare(vma)))
71131 return -ENOMEM;
71132 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
71133 + if (locknext && anon_vma_prepare(vma->vm_next))
71134 + return -ENOMEM;
71135 vma_lock_anon_vma(vma);
71136 + if (locknext)
71137 + vma_lock_anon_vma(vma->vm_next);
71138
71139 /*
71140 * vma->vm_start/vm_end cannot change under us because the caller
71141 * is required to hold the mmap_sem in read mode. We need the
71142 - * anon_vma lock to serialize against concurrent expand_stacks.
71143 - * Also guard against wrapping around to address 0.
71144 + * anon_vma locks to serialize against concurrent expand_stacks
71145 + * and expand_upwards.
71146 */
71147 - if (address < PAGE_ALIGN(address+4))
71148 - address = PAGE_ALIGN(address+4);
71149 - else {
71150 - vma_unlock_anon_vma(vma);
71151 - return -ENOMEM;
71152 - }
71153 error = 0;
71154
71155 /* Somebody else might have raced and expanded it already */
71156 - if (address > vma->vm_end) {
71157 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
71158 + error = -ENOMEM;
71159 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
71160 unsigned long size, grow;
71161
71162 size = address - vma->vm_start;
71163 @@ -1739,6 +1991,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
71164 }
71165 }
71166 }
71167 + if (locknext)
71168 + vma_unlock_anon_vma(vma->vm_next);
71169 vma_unlock_anon_vma(vma);
71170 khugepaged_enter_vma_merge(vma);
71171 return error;
71172 @@ -1752,6 +2006,8 @@ int expand_downwards(struct vm_area_struct *vma,
71173 unsigned long address)
71174 {
71175 int error;
71176 + bool lockprev = false;
71177 + struct vm_area_struct *prev;
71178
71179 /*
71180 * We must make sure the anon_vma is allocated
71181 @@ -1765,6 +2021,15 @@ int expand_downwards(struct vm_area_struct *vma,
71182 if (error)
71183 return error;
71184
71185 + prev = vma->vm_prev;
71186 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
71187 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
71188 +#endif
71189 + if (lockprev && anon_vma_prepare(prev))
71190 + return -ENOMEM;
71191 + if (lockprev)
71192 + vma_lock_anon_vma(prev);
71193 +
71194 vma_lock_anon_vma(vma);
71195
71196 /*
71197 @@ -1774,9 +2039,17 @@ int expand_downwards(struct vm_area_struct *vma,
71198 */
71199
71200 /* Somebody else might have raced and expanded it already */
71201 - if (address < vma->vm_start) {
71202 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
71203 + error = -ENOMEM;
71204 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
71205 unsigned long size, grow;
71206
71207 +#ifdef CONFIG_PAX_SEGMEXEC
71208 + struct vm_area_struct *vma_m;
71209 +
71210 + vma_m = pax_find_mirror_vma(vma);
71211 +#endif
71212 +
71213 size = vma->vm_end - address;
71214 grow = (vma->vm_start - address) >> PAGE_SHIFT;
71215
71216 @@ -1786,11 +2059,22 @@ int expand_downwards(struct vm_area_struct *vma,
71217 if (!error) {
71218 vma->vm_start = address;
71219 vma->vm_pgoff -= grow;
71220 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
71221 +
71222 +#ifdef CONFIG_PAX_SEGMEXEC
71223 + if (vma_m) {
71224 + vma_m->vm_start -= grow << PAGE_SHIFT;
71225 + vma_m->vm_pgoff -= grow;
71226 + }
71227 +#endif
71228 +
71229 perf_event_mmap(vma);
71230 }
71231 }
71232 }
71233 vma_unlock_anon_vma(vma);
71234 + if (lockprev)
71235 + vma_unlock_anon_vma(prev);
71236 khugepaged_enter_vma_merge(vma);
71237 return error;
71238 }
71239 @@ -1860,6 +2144,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
71240 do {
71241 long nrpages = vma_pages(vma);
71242
71243 +#ifdef CONFIG_PAX_SEGMEXEC
71244 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
71245 + vma = remove_vma(vma);
71246 + continue;
71247 + }
71248 +#endif
71249 +
71250 mm->total_vm -= nrpages;
71251 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
71252 vma = remove_vma(vma);
71253 @@ -1905,6 +2196,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
71254 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
71255 vma->vm_prev = NULL;
71256 do {
71257 +
71258 +#ifdef CONFIG_PAX_SEGMEXEC
71259 + if (vma->vm_mirror) {
71260 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
71261 + vma->vm_mirror->vm_mirror = NULL;
71262 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
71263 + vma->vm_mirror = NULL;
71264 + }
71265 +#endif
71266 +
71267 rb_erase(&vma->vm_rb, &mm->mm_rb);
71268 mm->map_count--;
71269 tail_vma = vma;
71270 @@ -1933,14 +2234,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71271 struct vm_area_struct *new;
71272 int err = -ENOMEM;
71273
71274 +#ifdef CONFIG_PAX_SEGMEXEC
71275 + struct vm_area_struct *vma_m, *new_m = NULL;
71276 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
71277 +#endif
71278 +
71279 if (is_vm_hugetlb_page(vma) && (addr &
71280 ~(huge_page_mask(hstate_vma(vma)))))
71281 return -EINVAL;
71282
71283 +#ifdef CONFIG_PAX_SEGMEXEC
71284 + vma_m = pax_find_mirror_vma(vma);
71285 +#endif
71286 +
71287 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71288 if (!new)
71289 goto out_err;
71290
71291 +#ifdef CONFIG_PAX_SEGMEXEC
71292 + if (vma_m) {
71293 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71294 + if (!new_m) {
71295 + kmem_cache_free(vm_area_cachep, new);
71296 + goto out_err;
71297 + }
71298 + }
71299 +#endif
71300 +
71301 /* most fields are the same, copy all, and then fixup */
71302 *new = *vma;
71303
71304 @@ -1953,6 +2273,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71305 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
71306 }
71307
71308 +#ifdef CONFIG_PAX_SEGMEXEC
71309 + if (vma_m) {
71310 + *new_m = *vma_m;
71311 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
71312 + new_m->vm_mirror = new;
71313 + new->vm_mirror = new_m;
71314 +
71315 + if (new_below)
71316 + new_m->vm_end = addr_m;
71317 + else {
71318 + new_m->vm_start = addr_m;
71319 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
71320 + }
71321 + }
71322 +#endif
71323 +
71324 pol = mpol_dup(vma_policy(vma));
71325 if (IS_ERR(pol)) {
71326 err = PTR_ERR(pol);
71327 @@ -1978,6 +2314,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71328 else
71329 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
71330
71331 +#ifdef CONFIG_PAX_SEGMEXEC
71332 + if (!err && vma_m) {
71333 + if (anon_vma_clone(new_m, vma_m))
71334 + goto out_free_mpol;
71335 +
71336 + mpol_get(pol);
71337 + vma_set_policy(new_m, pol);
71338 +
71339 + if (new_m->vm_file) {
71340 + get_file(new_m->vm_file);
71341 + if (vma_m->vm_flags & VM_EXECUTABLE)
71342 + added_exe_file_vma(mm);
71343 + }
71344 +
71345 + if (new_m->vm_ops && new_m->vm_ops->open)
71346 + new_m->vm_ops->open(new_m);
71347 +
71348 + if (new_below)
71349 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
71350 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
71351 + else
71352 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
71353 +
71354 + if (err) {
71355 + if (new_m->vm_ops && new_m->vm_ops->close)
71356 + new_m->vm_ops->close(new_m);
71357 + if (new_m->vm_file) {
71358 + if (vma_m->vm_flags & VM_EXECUTABLE)
71359 + removed_exe_file_vma(mm);
71360 + fput(new_m->vm_file);
71361 + }
71362 + mpol_put(pol);
71363 + }
71364 + }
71365 +#endif
71366 +
71367 /* Success. */
71368 if (!err)
71369 return 0;
71370 @@ -1990,10 +2362,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71371 removed_exe_file_vma(mm);
71372 fput(new->vm_file);
71373 }
71374 - unlink_anon_vmas(new);
71375 out_free_mpol:
71376 mpol_put(pol);
71377 out_free_vma:
71378 +
71379 +#ifdef CONFIG_PAX_SEGMEXEC
71380 + if (new_m) {
71381 + unlink_anon_vmas(new_m);
71382 + kmem_cache_free(vm_area_cachep, new_m);
71383 + }
71384 +#endif
71385 +
71386 + unlink_anon_vmas(new);
71387 kmem_cache_free(vm_area_cachep, new);
71388 out_err:
71389 return err;
71390 @@ -2006,6 +2386,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71391 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71392 unsigned long addr, int new_below)
71393 {
71394 +
71395 +#ifdef CONFIG_PAX_SEGMEXEC
71396 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
71397 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
71398 + if (mm->map_count >= sysctl_max_map_count-1)
71399 + return -ENOMEM;
71400 + } else
71401 +#endif
71402 +
71403 if (mm->map_count >= sysctl_max_map_count)
71404 return -ENOMEM;
71405
71406 @@ -2017,11 +2406,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71407 * work. This now handles partial unmappings.
71408 * Jeremy Fitzhardinge <jeremy@goop.org>
71409 */
71410 +#ifdef CONFIG_PAX_SEGMEXEC
71411 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71412 {
71413 + int ret = __do_munmap(mm, start, len);
71414 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
71415 + return ret;
71416 +
71417 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
71418 +}
71419 +
71420 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71421 +#else
71422 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71423 +#endif
71424 +{
71425 unsigned long end;
71426 struct vm_area_struct *vma, *prev, *last;
71427
71428 + /*
71429 + * mm->mmap_sem is required to protect against another thread
71430 + * changing the mappings in case we sleep.
71431 + */
71432 + verify_mm_writelocked(mm);
71433 +
71434 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
71435 return -EINVAL;
71436
71437 @@ -2096,6 +2504,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71438 /* Fix up all other VM information */
71439 remove_vma_list(mm, vma);
71440
71441 + track_exec_limit(mm, start, end, 0UL);
71442 +
71443 return 0;
71444 }
71445
71446 @@ -2108,22 +2518,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
71447
71448 profile_munmap(addr);
71449
71450 +#ifdef CONFIG_PAX_SEGMEXEC
71451 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
71452 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
71453 + return -EINVAL;
71454 +#endif
71455 +
71456 down_write(&mm->mmap_sem);
71457 ret = do_munmap(mm, addr, len);
71458 up_write(&mm->mmap_sem);
71459 return ret;
71460 }
71461
71462 -static inline void verify_mm_writelocked(struct mm_struct *mm)
71463 -{
71464 -#ifdef CONFIG_DEBUG_VM
71465 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
71466 - WARN_ON(1);
71467 - up_read(&mm->mmap_sem);
71468 - }
71469 -#endif
71470 -}
71471 -
71472 /*
71473 * this is really a simplified "do_mmap". it only handles
71474 * anonymous maps. eventually we may be able to do some
71475 @@ -2137,6 +2543,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71476 struct rb_node ** rb_link, * rb_parent;
71477 pgoff_t pgoff = addr >> PAGE_SHIFT;
71478 int error;
71479 + unsigned long charged;
71480
71481 len = PAGE_ALIGN(len);
71482 if (!len)
71483 @@ -2148,16 +2555,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71484
71485 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
71486
71487 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
71488 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
71489 + flags &= ~VM_EXEC;
71490 +
71491 +#ifdef CONFIG_PAX_MPROTECT
71492 + if (mm->pax_flags & MF_PAX_MPROTECT)
71493 + flags &= ~VM_MAYEXEC;
71494 +#endif
71495 +
71496 + }
71497 +#endif
71498 +
71499 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
71500 if (error & ~PAGE_MASK)
71501 return error;
71502
71503 + charged = len >> PAGE_SHIFT;
71504 +
71505 /*
71506 * mlock MCL_FUTURE?
71507 */
71508 if (mm->def_flags & VM_LOCKED) {
71509 unsigned long locked, lock_limit;
71510 - locked = len >> PAGE_SHIFT;
71511 + locked = charged;
71512 locked += mm->locked_vm;
71513 lock_limit = rlimit(RLIMIT_MEMLOCK);
71514 lock_limit >>= PAGE_SHIFT;
71515 @@ -2174,22 +2595,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71516 /*
71517 * Clear old maps. this also does some error checking for us
71518 */
71519 - munmap_back:
71520 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71521 if (vma && vma->vm_start < addr + len) {
71522 if (do_munmap(mm, addr, len))
71523 return -ENOMEM;
71524 - goto munmap_back;
71525 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71526 + BUG_ON(vma && vma->vm_start < addr + len);
71527 }
71528
71529 /* Check against address space limits *after* clearing old maps... */
71530 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
71531 + if (!may_expand_vm(mm, charged))
71532 return -ENOMEM;
71533
71534 if (mm->map_count > sysctl_max_map_count)
71535 return -ENOMEM;
71536
71537 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
71538 + if (security_vm_enough_memory(charged))
71539 return -ENOMEM;
71540
71541 /* Can we just expand an old private anonymous mapping? */
71542 @@ -2203,7 +2624,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71543 */
71544 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71545 if (!vma) {
71546 - vm_unacct_memory(len >> PAGE_SHIFT);
71547 + vm_unacct_memory(charged);
71548 return -ENOMEM;
71549 }
71550
71551 @@ -2217,11 +2638,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71552 vma_link(mm, vma, prev, rb_link, rb_parent);
71553 out:
71554 perf_event_mmap(vma);
71555 - mm->total_vm += len >> PAGE_SHIFT;
71556 + mm->total_vm += charged;
71557 if (flags & VM_LOCKED) {
71558 if (!mlock_vma_pages_range(vma, addr, addr + len))
71559 - mm->locked_vm += (len >> PAGE_SHIFT);
71560 + mm->locked_vm += charged;
71561 }
71562 + track_exec_limit(mm, addr, addr + len, flags);
71563 return addr;
71564 }
71565
71566 @@ -2268,8 +2690,10 @@ void exit_mmap(struct mm_struct *mm)
71567 * Walk the list again, actually closing and freeing it,
71568 * with preemption enabled, without holding any MM locks.
71569 */
71570 - while (vma)
71571 + while (vma) {
71572 + vma->vm_mirror = NULL;
71573 vma = remove_vma(vma);
71574 + }
71575
71576 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
71577 }
71578 @@ -2283,6 +2707,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
71579 struct vm_area_struct * __vma, * prev;
71580 struct rb_node ** rb_link, * rb_parent;
71581
71582 +#ifdef CONFIG_PAX_SEGMEXEC
71583 + struct vm_area_struct *vma_m = NULL;
71584 +#endif
71585 +
71586 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
71587 + return -EPERM;
71588 +
71589 /*
71590 * The vm_pgoff of a purely anonymous vma should be irrelevant
71591 * until its first write fault, when page's anon_vma and index
71592 @@ -2305,7 +2736,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
71593 if ((vma->vm_flags & VM_ACCOUNT) &&
71594 security_vm_enough_memory_mm(mm, vma_pages(vma)))
71595 return -ENOMEM;
71596 +
71597 +#ifdef CONFIG_PAX_SEGMEXEC
71598 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
71599 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71600 + if (!vma_m)
71601 + return -ENOMEM;
71602 + }
71603 +#endif
71604 +
71605 vma_link(mm, vma, prev, rb_link, rb_parent);
71606 +
71607 +#ifdef CONFIG_PAX_SEGMEXEC
71608 + if (vma_m)
71609 + BUG_ON(pax_mirror_vma(vma_m, vma));
71610 +#endif
71611 +
71612 return 0;
71613 }
71614
71615 @@ -2323,6 +2769,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
71616 struct rb_node **rb_link, *rb_parent;
71617 struct mempolicy *pol;
71618
71619 + BUG_ON(vma->vm_mirror);
71620 +
71621 /*
71622 * If anonymous vma has not yet been faulted, update new pgoff
71623 * to match new location, to increase its chance of merging.
71624 @@ -2373,6 +2821,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
71625 return NULL;
71626 }
71627
71628 +#ifdef CONFIG_PAX_SEGMEXEC
71629 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
71630 +{
71631 + struct vm_area_struct *prev_m;
71632 + struct rb_node **rb_link_m, *rb_parent_m;
71633 + struct mempolicy *pol_m;
71634 +
71635 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
71636 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
71637 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
71638 + *vma_m = *vma;
71639 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
71640 + if (anon_vma_clone(vma_m, vma))
71641 + return -ENOMEM;
71642 + pol_m = vma_policy(vma_m);
71643 + mpol_get(pol_m);
71644 + vma_set_policy(vma_m, pol_m);
71645 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
71646 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
71647 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
71648 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
71649 + if (vma_m->vm_file)
71650 + get_file(vma_m->vm_file);
71651 + if (vma_m->vm_ops && vma_m->vm_ops->open)
71652 + vma_m->vm_ops->open(vma_m);
71653 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
71654 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
71655 + vma_m->vm_mirror = vma;
71656 + vma->vm_mirror = vma_m;
71657 + return 0;
71658 +}
71659 +#endif
71660 +
71661 /*
71662 * Return true if the calling process may expand its vm space by the passed
71663 * number of pages
71664 @@ -2383,7 +2864,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
71665 unsigned long lim;
71666
71667 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
71668 -
71669 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
71670 if (cur + npages > lim)
71671 return 0;
71672 return 1;
71673 @@ -2454,6 +2935,22 @@ int install_special_mapping(struct mm_struct *mm,
71674 vma->vm_start = addr;
71675 vma->vm_end = addr + len;
71676
71677 +#ifdef CONFIG_PAX_MPROTECT
71678 + if (mm->pax_flags & MF_PAX_MPROTECT) {
71679 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
71680 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
71681 + return -EPERM;
71682 + if (!(vm_flags & VM_EXEC))
71683 + vm_flags &= ~VM_MAYEXEC;
71684 +#else
71685 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
71686 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
71687 +#endif
71688 + else
71689 + vm_flags &= ~VM_MAYWRITE;
71690 + }
71691 +#endif
71692 +
71693 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
71694 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
71695
71696 diff --git a/mm/mprotect.c b/mm/mprotect.c
71697 index 5a688a2..27e031c 100644
71698 --- a/mm/mprotect.c
71699 +++ b/mm/mprotect.c
71700 @@ -23,10 +23,16 @@
71701 #include <linux/mmu_notifier.h>
71702 #include <linux/migrate.h>
71703 #include <linux/perf_event.h>
71704 +
71705 +#ifdef CONFIG_PAX_MPROTECT
71706 +#include <linux/elf.h>
71707 +#endif
71708 +
71709 #include <asm/uaccess.h>
71710 #include <asm/pgtable.h>
71711 #include <asm/cacheflush.h>
71712 #include <asm/tlbflush.h>
71713 +#include <asm/mmu_context.h>
71714
71715 #ifndef pgprot_modify
71716 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
71717 @@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
71718 flush_tlb_range(vma, start, end);
71719 }
71720
71721 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
71722 +/* called while holding the mmap semaphor for writing except stack expansion */
71723 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
71724 +{
71725 + unsigned long oldlimit, newlimit = 0UL;
71726 +
71727 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
71728 + return;
71729 +
71730 + spin_lock(&mm->page_table_lock);
71731 + oldlimit = mm->context.user_cs_limit;
71732 + if ((prot & VM_EXEC) && oldlimit < end)
71733 + /* USER_CS limit moved up */
71734 + newlimit = end;
71735 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
71736 + /* USER_CS limit moved down */
71737 + newlimit = start;
71738 +
71739 + if (newlimit) {
71740 + mm->context.user_cs_limit = newlimit;
71741 +
71742 +#ifdef CONFIG_SMP
71743 + wmb();
71744 + cpus_clear(mm->context.cpu_user_cs_mask);
71745 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
71746 +#endif
71747 +
71748 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
71749 + }
71750 + spin_unlock(&mm->page_table_lock);
71751 + if (newlimit == end) {
71752 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
71753 +
71754 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
71755 + if (is_vm_hugetlb_page(vma))
71756 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
71757 + else
71758 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
71759 + }
71760 +}
71761 +#endif
71762 +
71763 int
71764 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71765 unsigned long start, unsigned long end, unsigned long newflags)
71766 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71767 int error;
71768 int dirty_accountable = 0;
71769
71770 +#ifdef CONFIG_PAX_SEGMEXEC
71771 + struct vm_area_struct *vma_m = NULL;
71772 + unsigned long start_m, end_m;
71773 +
71774 + start_m = start + SEGMEXEC_TASK_SIZE;
71775 + end_m = end + SEGMEXEC_TASK_SIZE;
71776 +#endif
71777 +
71778 if (newflags == oldflags) {
71779 *pprev = vma;
71780 return 0;
71781 }
71782
71783 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
71784 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
71785 +
71786 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
71787 + return -ENOMEM;
71788 +
71789 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
71790 + return -ENOMEM;
71791 + }
71792 +
71793 /*
71794 * If we make a private mapping writable we increase our commit;
71795 * but (without finer accounting) cannot reduce our commit if we
71796 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71797 }
71798 }
71799
71800 +#ifdef CONFIG_PAX_SEGMEXEC
71801 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
71802 + if (start != vma->vm_start) {
71803 + error = split_vma(mm, vma, start, 1);
71804 + if (error)
71805 + goto fail;
71806 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
71807 + *pprev = (*pprev)->vm_next;
71808 + }
71809 +
71810 + if (end != vma->vm_end) {
71811 + error = split_vma(mm, vma, end, 0);
71812 + if (error)
71813 + goto fail;
71814 + }
71815 +
71816 + if (pax_find_mirror_vma(vma)) {
71817 + error = __do_munmap(mm, start_m, end_m - start_m);
71818 + if (error)
71819 + goto fail;
71820 + } else {
71821 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71822 + if (!vma_m) {
71823 + error = -ENOMEM;
71824 + goto fail;
71825 + }
71826 + vma->vm_flags = newflags;
71827 + error = pax_mirror_vma(vma_m, vma);
71828 + if (error) {
71829 + vma->vm_flags = oldflags;
71830 + goto fail;
71831 + }
71832 + }
71833 + }
71834 +#endif
71835 +
71836 /*
71837 * First try to merge with previous and/or next vma.
71838 */
71839 @@ -204,9 +306,21 @@ success:
71840 * vm_flags and vm_page_prot are protected by the mmap_sem
71841 * held in write mode.
71842 */
71843 +
71844 +#ifdef CONFIG_PAX_SEGMEXEC
71845 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
71846 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
71847 +#endif
71848 +
71849 vma->vm_flags = newflags;
71850 +
71851 +#ifdef CONFIG_PAX_MPROTECT
71852 + if (mm->binfmt && mm->binfmt->handle_mprotect)
71853 + mm->binfmt->handle_mprotect(vma, newflags);
71854 +#endif
71855 +
71856 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
71857 - vm_get_page_prot(newflags));
71858 + vm_get_page_prot(vma->vm_flags));
71859
71860 if (vma_wants_writenotify(vma)) {
71861 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
71862 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71863 end = start + len;
71864 if (end <= start)
71865 return -ENOMEM;
71866 +
71867 +#ifdef CONFIG_PAX_SEGMEXEC
71868 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
71869 + if (end > SEGMEXEC_TASK_SIZE)
71870 + return -EINVAL;
71871 + } else
71872 +#endif
71873 +
71874 + if (end > TASK_SIZE)
71875 + return -EINVAL;
71876 +
71877 if (!arch_validate_prot(prot))
71878 return -EINVAL;
71879
71880 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71881 /*
71882 * Does the application expect PROT_READ to imply PROT_EXEC:
71883 */
71884 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
71885 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
71886 prot |= PROT_EXEC;
71887
71888 vm_flags = calc_vm_prot_bits(prot);
71889 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71890 if (start > vma->vm_start)
71891 prev = vma;
71892
71893 +#ifdef CONFIG_PAX_MPROTECT
71894 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
71895 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
71896 +#endif
71897 +
71898 for (nstart = start ; ; ) {
71899 unsigned long newflags;
71900
71901 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71902
71903 /* newflags >> 4 shift VM_MAY% in place of VM_% */
71904 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
71905 + if (prot & (PROT_WRITE | PROT_EXEC))
71906 + gr_log_rwxmprotect(vma->vm_file);
71907 +
71908 + error = -EACCES;
71909 + goto out;
71910 + }
71911 +
71912 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
71913 error = -EACCES;
71914 goto out;
71915 }
71916 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71917 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
71918 if (error)
71919 goto out;
71920 +
71921 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
71922 +
71923 nstart = tmp;
71924
71925 if (nstart < prev->vm_end)
71926 diff --git a/mm/mremap.c b/mm/mremap.c
71927 index d6959cb..18a402a 100644
71928 --- a/mm/mremap.c
71929 +++ b/mm/mremap.c
71930 @@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
71931 continue;
71932 pte = ptep_get_and_clear(mm, old_addr, old_pte);
71933 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
71934 +
71935 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
71936 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
71937 + pte = pte_exprotect(pte);
71938 +#endif
71939 +
71940 set_pte_at(mm, new_addr, new_pte, pte);
71941 }
71942
71943 @@ -290,6 +296,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
71944 if (is_vm_hugetlb_page(vma))
71945 goto Einval;
71946
71947 +#ifdef CONFIG_PAX_SEGMEXEC
71948 + if (pax_find_mirror_vma(vma))
71949 + goto Einval;
71950 +#endif
71951 +
71952 /* We can't remap across vm area boundaries */
71953 if (old_len > vma->vm_end - addr)
71954 goto Efault;
71955 @@ -346,20 +357,25 @@ static unsigned long mremap_to(unsigned long addr,
71956 unsigned long ret = -EINVAL;
71957 unsigned long charged = 0;
71958 unsigned long map_flags;
71959 + unsigned long pax_task_size = TASK_SIZE;
71960
71961 if (new_addr & ~PAGE_MASK)
71962 goto out;
71963
71964 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
71965 +#ifdef CONFIG_PAX_SEGMEXEC
71966 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
71967 + pax_task_size = SEGMEXEC_TASK_SIZE;
71968 +#endif
71969 +
71970 + pax_task_size -= PAGE_SIZE;
71971 +
71972 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
71973 goto out;
71974
71975 /* Check if the location we're moving into overlaps the
71976 * old location at all, and fail if it does.
71977 */
71978 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
71979 - goto out;
71980 -
71981 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
71982 + if (addr + old_len > new_addr && new_addr + new_len > addr)
71983 goto out;
71984
71985 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
71986 @@ -431,6 +447,7 @@ unsigned long do_mremap(unsigned long addr,
71987 struct vm_area_struct *vma;
71988 unsigned long ret = -EINVAL;
71989 unsigned long charged = 0;
71990 + unsigned long pax_task_size = TASK_SIZE;
71991
71992 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
71993 goto out;
71994 @@ -449,6 +466,17 @@ unsigned long do_mremap(unsigned long addr,
71995 if (!new_len)
71996 goto out;
71997
71998 +#ifdef CONFIG_PAX_SEGMEXEC
71999 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
72000 + pax_task_size = SEGMEXEC_TASK_SIZE;
72001 +#endif
72002 +
72003 + pax_task_size -= PAGE_SIZE;
72004 +
72005 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
72006 + old_len > pax_task_size || addr > pax_task_size-old_len)
72007 + goto out;
72008 +
72009 if (flags & MREMAP_FIXED) {
72010 if (flags & MREMAP_MAYMOVE)
72011 ret = mremap_to(addr, old_len, new_addr, new_len);
72012 @@ -498,6 +526,7 @@ unsigned long do_mremap(unsigned long addr,
72013 addr + new_len);
72014 }
72015 ret = addr;
72016 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
72017 goto out;
72018 }
72019 }
72020 @@ -524,7 +553,13 @@ unsigned long do_mremap(unsigned long addr,
72021 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72022 if (ret)
72023 goto out;
72024 +
72025 + map_flags = vma->vm_flags;
72026 ret = move_vma(vma, addr, old_len, new_len, new_addr);
72027 + if (!(ret & ~PAGE_MASK)) {
72028 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
72029 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
72030 + }
72031 }
72032 out:
72033 if (ret & ~PAGE_MASK)
72034 diff --git a/mm/nobootmem.c b/mm/nobootmem.c
72035 index 7fa41b4..6087460 100644
72036 --- a/mm/nobootmem.c
72037 +++ b/mm/nobootmem.c
72038 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
72039 unsigned long __init free_all_memory_core_early(int nodeid)
72040 {
72041 int i;
72042 - u64 start, end;
72043 + u64 start, end, startrange, endrange;
72044 unsigned long count = 0;
72045 - struct range *range = NULL;
72046 + struct range *range = NULL, rangerange = { 0, 0 };
72047 int nr_range;
72048
72049 nr_range = get_free_all_memory_range(&range, nodeid);
72050 + startrange = __pa(range) >> PAGE_SHIFT;
72051 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
72052
72053 for (i = 0; i < nr_range; i++) {
72054 start = range[i].start;
72055 end = range[i].end;
72056 + if (start <= endrange && startrange < end) {
72057 + BUG_ON(rangerange.start | rangerange.end);
72058 + rangerange = range[i];
72059 + continue;
72060 + }
72061 count += end - start;
72062 __free_pages_memory(start, end);
72063 }
72064 + start = rangerange.start;
72065 + end = rangerange.end;
72066 + count += end - start;
72067 + __free_pages_memory(start, end);
72068
72069 return count;
72070 }
72071 diff --git a/mm/nommu.c b/mm/nommu.c
72072 index f59e170..34e2a2b 100644
72073 --- a/mm/nommu.c
72074 +++ b/mm/nommu.c
72075 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
72076 int sysctl_overcommit_ratio = 50; /* default is 50% */
72077 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
72078 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
72079 -int heap_stack_gap = 0;
72080
72081 atomic_long_t mmap_pages_allocated;
72082
72083 @@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
72084 EXPORT_SYMBOL(find_vma);
72085
72086 /*
72087 - * find a VMA
72088 - * - we don't extend stack VMAs under NOMMU conditions
72089 - */
72090 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
72091 -{
72092 - return find_vma(mm, addr);
72093 -}
72094 -
72095 -/*
72096 * expand a stack to a given address
72097 * - not supported under NOMMU conditions
72098 */
72099 @@ -1555,6 +1545,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72100
72101 /* most fields are the same, copy all, and then fixup */
72102 *new = *vma;
72103 + INIT_LIST_HEAD(&new->anon_vma_chain);
72104 *region = *vma->vm_region;
72105 new->vm_region = region;
72106
72107 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
72108 index 485be89..c059ad3 100644
72109 --- a/mm/page_alloc.c
72110 +++ b/mm/page_alloc.c
72111 @@ -341,7 +341,7 @@ out:
72112 * This usage means that zero-order pages may not be compound.
72113 */
72114
72115 -static void free_compound_page(struct page *page)
72116 +void free_compound_page(struct page *page)
72117 {
72118 __free_pages_ok(page, compound_order(page));
72119 }
72120 @@ -654,6 +654,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72121 int i;
72122 int bad = 0;
72123
72124 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
72125 + unsigned long index = 1UL << order;
72126 +#endif
72127 +
72128 trace_mm_page_free_direct(page, order);
72129 kmemcheck_free_shadow(page, order);
72130
72131 @@ -669,6 +673,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72132 debug_check_no_obj_freed(page_address(page),
72133 PAGE_SIZE << order);
72134 }
72135 +
72136 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
72137 + for (; index; --index)
72138 + sanitize_highpage(page + index - 1);
72139 +#endif
72140 +
72141 arch_free_page(page, order);
72142 kernel_map_pages(page, 1 << order, 0);
72143
72144 @@ -784,8 +794,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
72145 arch_alloc_page(page, order);
72146 kernel_map_pages(page, 1 << order, 1);
72147
72148 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
72149 if (gfp_flags & __GFP_ZERO)
72150 prep_zero_page(page, order, gfp_flags);
72151 +#endif
72152
72153 if (order && (gfp_flags & __GFP_COMP))
72154 prep_compound_page(page, order);
72155 @@ -3357,7 +3369,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
72156 unsigned long pfn;
72157
72158 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
72159 +#ifdef CONFIG_X86_32
72160 + /* boot failures in VMware 8 on 32bit vanilla since
72161 + this change */
72162 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
72163 +#else
72164 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
72165 +#endif
72166 return 1;
72167 }
72168 return 0;
72169 diff --git a/mm/percpu.c b/mm/percpu.c
72170 index 716eb4a..8d10419 100644
72171 --- a/mm/percpu.c
72172 +++ b/mm/percpu.c
72173 @@ -121,7 +121,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
72174 static unsigned int pcpu_high_unit_cpu __read_mostly;
72175
72176 /* the address of the first chunk which starts with the kernel static area */
72177 -void *pcpu_base_addr __read_mostly;
72178 +void *pcpu_base_addr __read_only;
72179 EXPORT_SYMBOL_GPL(pcpu_base_addr);
72180
72181 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
72182 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
72183 index e920aa3..137702a 100644
72184 --- a/mm/process_vm_access.c
72185 +++ b/mm/process_vm_access.c
72186 @@ -13,6 +13,7 @@
72187 #include <linux/uio.h>
72188 #include <linux/sched.h>
72189 #include <linux/highmem.h>
72190 +#include <linux/security.h>
72191 #include <linux/ptrace.h>
72192 #include <linux/slab.h>
72193 #include <linux/syscalls.h>
72194 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
72195 size_t iov_l_curr_offset = 0;
72196 ssize_t iov_len;
72197
72198 + return -ENOSYS; // PaX: until properly audited
72199 +
72200 /*
72201 * Work out how many pages of struct pages we're going to need
72202 * when eventually calling get_user_pages
72203 */
72204 for (i = 0; i < riovcnt; i++) {
72205 iov_len = rvec[i].iov_len;
72206 - if (iov_len > 0) {
72207 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
72208 - + iov_len)
72209 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
72210 - / PAGE_SIZE + 1;
72211 - nr_pages = max(nr_pages, nr_pages_iov);
72212 - }
72213 + if (iov_len <= 0)
72214 + continue;
72215 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
72216 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
72217 + nr_pages = max(nr_pages, nr_pages_iov);
72218 }
72219
72220 if (nr_pages == 0)
72221 @@ -298,23 +299,23 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
72222 goto free_proc_pages;
72223 }
72224
72225 - task_lock(task);
72226 - if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
72227 - task_unlock(task);
72228 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
72229 rc = -EPERM;
72230 goto put_task_struct;
72231 }
72232 - mm = task->mm;
72233
72234 - if (!mm || (task->flags & PF_KTHREAD)) {
72235 - task_unlock(task);
72236 - rc = -EINVAL;
72237 + mm = mm_access(task, PTRACE_MODE_ATTACH);
72238 + if (!mm || IS_ERR(mm)) {
72239 + rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
72240 + /*
72241 + * Explicitly map EACCES to EPERM as EPERM is a more a
72242 + * appropriate error code for process_vw_readv/writev
72243 + */
72244 + if (rc == -EACCES)
72245 + rc = -EPERM;
72246 goto put_task_struct;
72247 }
72248
72249 - atomic_inc(&mm->mm_users);
72250 - task_unlock(task);
72251 -
72252 for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
72253 rc = process_vm_rw_single_vec(
72254 (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
72255 diff --git a/mm/rmap.c b/mm/rmap.c
72256 index a4fd368..e0ffec7 100644
72257 --- a/mm/rmap.c
72258 +++ b/mm/rmap.c
72259 @@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72260 struct anon_vma *anon_vma = vma->anon_vma;
72261 struct anon_vma_chain *avc;
72262
72263 +#ifdef CONFIG_PAX_SEGMEXEC
72264 + struct anon_vma_chain *avc_m = NULL;
72265 +#endif
72266 +
72267 might_sleep();
72268 if (unlikely(!anon_vma)) {
72269 struct mm_struct *mm = vma->vm_mm;
72270 @@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72271 if (!avc)
72272 goto out_enomem;
72273
72274 +#ifdef CONFIG_PAX_SEGMEXEC
72275 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
72276 + if (!avc_m)
72277 + goto out_enomem_free_avc;
72278 +#endif
72279 +
72280 anon_vma = find_mergeable_anon_vma(vma);
72281 allocated = NULL;
72282 if (!anon_vma) {
72283 @@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72284 /* page_table_lock to protect against threads */
72285 spin_lock(&mm->page_table_lock);
72286 if (likely(!vma->anon_vma)) {
72287 +
72288 +#ifdef CONFIG_PAX_SEGMEXEC
72289 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
72290 +
72291 + if (vma_m) {
72292 + BUG_ON(vma_m->anon_vma);
72293 + vma_m->anon_vma = anon_vma;
72294 + avc_m->anon_vma = anon_vma;
72295 + avc_m->vma = vma;
72296 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
72297 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
72298 + avc_m = NULL;
72299 + }
72300 +#endif
72301 +
72302 vma->anon_vma = anon_vma;
72303 avc->anon_vma = anon_vma;
72304 avc->vma = vma;
72305 @@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72306
72307 if (unlikely(allocated))
72308 put_anon_vma(allocated);
72309 +
72310 +#ifdef CONFIG_PAX_SEGMEXEC
72311 + if (unlikely(avc_m))
72312 + anon_vma_chain_free(avc_m);
72313 +#endif
72314 +
72315 if (unlikely(avc))
72316 anon_vma_chain_free(avc);
72317 }
72318 return 0;
72319
72320 out_enomem_free_avc:
72321 +
72322 +#ifdef CONFIG_PAX_SEGMEXEC
72323 + if (avc_m)
72324 + anon_vma_chain_free(avc_m);
72325 +#endif
72326 +
72327 anon_vma_chain_free(avc);
72328 out_enomem:
72329 return -ENOMEM;
72330 @@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
72331 * Attach the anon_vmas from src to dst.
72332 * Returns 0 on success, -ENOMEM on failure.
72333 */
72334 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
72335 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
72336 {
72337 struct anon_vma_chain *avc, *pavc;
72338 struct anon_vma *root = NULL;
72339 @@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
72340 * the corresponding VMA in the parent process is attached to.
72341 * Returns 0 on success, non-zero on failure.
72342 */
72343 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
72344 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
72345 {
72346 struct anon_vma_chain *avc;
72347 struct anon_vma *anon_vma;
72348 diff --git a/mm/shmem.c b/mm/shmem.c
72349 index 6c253f7..367e20a 100644
72350 --- a/mm/shmem.c
72351 +++ b/mm/shmem.c
72352 @@ -31,7 +31,7 @@
72353 #include <linux/export.h>
72354 #include <linux/swap.h>
72355
72356 -static struct vfsmount *shm_mnt;
72357 +struct vfsmount *shm_mnt;
72358
72359 #ifdef CONFIG_SHMEM
72360 /*
72361 @@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
72362 #define BOGO_DIRENT_SIZE 20
72363
72364 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
72365 -#define SHORT_SYMLINK_LEN 128
72366 +#define SHORT_SYMLINK_LEN 64
72367
72368 struct shmem_xattr {
72369 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
72370 @@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
72371 int err = -ENOMEM;
72372
72373 /* Round up to L1_CACHE_BYTES to resist false sharing */
72374 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
72375 - L1_CACHE_BYTES), GFP_KERNEL);
72376 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
72377 if (!sbinfo)
72378 return -ENOMEM;
72379
72380 diff --git a/mm/slab.c b/mm/slab.c
72381 index 83311c9a..fcf8f86 100644
72382 --- a/mm/slab.c
72383 +++ b/mm/slab.c
72384 @@ -151,7 +151,7 @@
72385
72386 /* Legal flag mask for kmem_cache_create(). */
72387 #if DEBUG
72388 -# define CREATE_MASK (SLAB_RED_ZONE | \
72389 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
72390 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
72391 SLAB_CACHE_DMA | \
72392 SLAB_STORE_USER | \
72393 @@ -159,7 +159,7 @@
72394 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
72395 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
72396 #else
72397 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
72398 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
72399 SLAB_CACHE_DMA | \
72400 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
72401 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
72402 @@ -288,7 +288,7 @@ struct kmem_list3 {
72403 * Need this for bootstrapping a per node allocator.
72404 */
72405 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
72406 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
72407 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
72408 #define CACHE_CACHE 0
72409 #define SIZE_AC MAX_NUMNODES
72410 #define SIZE_L3 (2 * MAX_NUMNODES)
72411 @@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
72412 if ((x)->max_freeable < i) \
72413 (x)->max_freeable = i; \
72414 } while (0)
72415 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
72416 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
72417 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
72418 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
72419 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
72420 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
72421 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
72422 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
72423 #else
72424 #define STATS_INC_ACTIVE(x) do { } while (0)
72425 #define STATS_DEC_ACTIVE(x) do { } while (0)
72426 @@ -538,7 +538,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
72427 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
72428 */
72429 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
72430 - const struct slab *slab, void *obj)
72431 + const struct slab *slab, const void *obj)
72432 {
72433 u32 offset = (obj - slab->s_mem);
72434 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
72435 @@ -564,7 +564,7 @@ struct cache_names {
72436 static struct cache_names __initdata cache_names[] = {
72437 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
72438 #include <linux/kmalloc_sizes.h>
72439 - {NULL,}
72440 + {NULL}
72441 #undef CACHE
72442 };
72443
72444 @@ -1572,7 +1572,7 @@ void __init kmem_cache_init(void)
72445 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
72446 sizes[INDEX_AC].cs_size,
72447 ARCH_KMALLOC_MINALIGN,
72448 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72449 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72450 NULL);
72451
72452 if (INDEX_AC != INDEX_L3) {
72453 @@ -1580,7 +1580,7 @@ void __init kmem_cache_init(void)
72454 kmem_cache_create(names[INDEX_L3].name,
72455 sizes[INDEX_L3].cs_size,
72456 ARCH_KMALLOC_MINALIGN,
72457 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72458 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72459 NULL);
72460 }
72461
72462 @@ -1598,7 +1598,7 @@ void __init kmem_cache_init(void)
72463 sizes->cs_cachep = kmem_cache_create(names->name,
72464 sizes->cs_size,
72465 ARCH_KMALLOC_MINALIGN,
72466 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72467 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72468 NULL);
72469 }
72470 #ifdef CONFIG_ZONE_DMA
72471 @@ -4322,10 +4322,10 @@ static int s_show(struct seq_file *m, void *p)
72472 }
72473 /* cpu stats */
72474 {
72475 - unsigned long allochit = atomic_read(&cachep->allochit);
72476 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
72477 - unsigned long freehit = atomic_read(&cachep->freehit);
72478 - unsigned long freemiss = atomic_read(&cachep->freemiss);
72479 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
72480 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
72481 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
72482 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
72483
72484 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
72485 allochit, allocmiss, freehit, freemiss);
72486 @@ -4584,13 +4584,62 @@ static int __init slab_proc_init(void)
72487 {
72488 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
72489 #ifdef CONFIG_DEBUG_SLAB_LEAK
72490 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
72491 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
72492 #endif
72493 return 0;
72494 }
72495 module_init(slab_proc_init);
72496 #endif
72497
72498 +void check_object_size(const void *ptr, unsigned long n, bool to)
72499 +{
72500 +
72501 +#ifdef CONFIG_PAX_USERCOPY
72502 + struct page *page;
72503 + struct kmem_cache *cachep = NULL;
72504 + struct slab *slabp;
72505 + unsigned int objnr;
72506 + unsigned long offset;
72507 + const char *type;
72508 +
72509 + if (!n)
72510 + return;
72511 +
72512 + type = "<null>";
72513 + if (ZERO_OR_NULL_PTR(ptr))
72514 + goto report;
72515 +
72516 + if (!virt_addr_valid(ptr))
72517 + return;
72518 +
72519 + page = virt_to_head_page(ptr);
72520 +
72521 + type = "<process stack>";
72522 + if (!PageSlab(page)) {
72523 + if (object_is_on_stack(ptr, n) == -1)
72524 + goto report;
72525 + return;
72526 + }
72527 +
72528 + cachep = page_get_cache(page);
72529 + type = cachep->name;
72530 + if (!(cachep->flags & SLAB_USERCOPY))
72531 + goto report;
72532 +
72533 + slabp = page_get_slab(page);
72534 + objnr = obj_to_index(cachep, slabp, ptr);
72535 + BUG_ON(objnr >= cachep->num);
72536 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
72537 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
72538 + return;
72539 +
72540 +report:
72541 + pax_report_usercopy(ptr, n, to, type);
72542 +#endif
72543 +
72544 +}
72545 +EXPORT_SYMBOL(check_object_size);
72546 +
72547 /**
72548 * ksize - get the actual amount of memory allocated for a given object
72549 * @objp: Pointer to the object
72550 diff --git a/mm/slob.c b/mm/slob.c
72551 index 8105be4..e045f96 100644
72552 --- a/mm/slob.c
72553 +++ b/mm/slob.c
72554 @@ -29,7 +29,7 @@
72555 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
72556 * alloc_pages() directly, allocating compound pages so the page order
72557 * does not have to be separately tracked, and also stores the exact
72558 - * allocation size in page->private so that it can be used to accurately
72559 + * allocation size in slob_page->size so that it can be used to accurately
72560 * provide ksize(). These objects are detected in kfree() because slob_page()
72561 * is false for them.
72562 *
72563 @@ -58,6 +58,7 @@
72564 */
72565
72566 #include <linux/kernel.h>
72567 +#include <linux/sched.h>
72568 #include <linux/slab.h>
72569 #include <linux/mm.h>
72570 #include <linux/swap.h> /* struct reclaim_state */
72571 @@ -102,7 +103,8 @@ struct slob_page {
72572 unsigned long flags; /* mandatory */
72573 atomic_t _count; /* mandatory */
72574 slobidx_t units; /* free units left in page */
72575 - unsigned long pad[2];
72576 + unsigned long pad[1];
72577 + unsigned long size; /* size when >=PAGE_SIZE */
72578 slob_t *free; /* first free slob_t in page */
72579 struct list_head list; /* linked list of free pages */
72580 };
72581 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
72582 */
72583 static inline int is_slob_page(struct slob_page *sp)
72584 {
72585 - return PageSlab((struct page *)sp);
72586 + return PageSlab((struct page *)sp) && !sp->size;
72587 }
72588
72589 static inline void set_slob_page(struct slob_page *sp)
72590 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
72591
72592 static inline struct slob_page *slob_page(const void *addr)
72593 {
72594 - return (struct slob_page *)virt_to_page(addr);
72595 + return (struct slob_page *)virt_to_head_page(addr);
72596 }
72597
72598 /*
72599 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
72600 /*
72601 * Return the size of a slob block.
72602 */
72603 -static slobidx_t slob_units(slob_t *s)
72604 +static slobidx_t slob_units(const slob_t *s)
72605 {
72606 if (s->units > 0)
72607 return s->units;
72608 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
72609 /*
72610 * Return the next free slob block pointer after this one.
72611 */
72612 -static slob_t *slob_next(slob_t *s)
72613 +static slob_t *slob_next(const slob_t *s)
72614 {
72615 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
72616 slobidx_t next;
72617 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
72618 /*
72619 * Returns true if s is the last free block in its page.
72620 */
72621 -static int slob_last(slob_t *s)
72622 +static int slob_last(const slob_t *s)
72623 {
72624 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
72625 }
72626 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
72627 if (!page)
72628 return NULL;
72629
72630 + set_slob_page(page);
72631 return page_address(page);
72632 }
72633
72634 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
72635 if (!b)
72636 return NULL;
72637 sp = slob_page(b);
72638 - set_slob_page(sp);
72639
72640 spin_lock_irqsave(&slob_lock, flags);
72641 sp->units = SLOB_UNITS(PAGE_SIZE);
72642 sp->free = b;
72643 + sp->size = 0;
72644 INIT_LIST_HEAD(&sp->list);
72645 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
72646 set_slob_page_free(sp, slob_list);
72647 @@ -476,10 +479,9 @@ out:
72648 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
72649 */
72650
72651 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72652 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
72653 {
72654 - unsigned int *m;
72655 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72656 + slob_t *m;
72657 void *ret;
72658
72659 gfp &= gfp_allowed_mask;
72660 @@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72661
72662 if (!m)
72663 return NULL;
72664 - *m = size;
72665 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
72666 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
72667 + m[0].units = size;
72668 + m[1].units = align;
72669 ret = (void *)m + align;
72670
72671 trace_kmalloc_node(_RET_IP_, ret,
72672 @@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72673 gfp |= __GFP_COMP;
72674 ret = slob_new_pages(gfp, order, node);
72675 if (ret) {
72676 - struct page *page;
72677 - page = virt_to_page(ret);
72678 - page->private = size;
72679 + struct slob_page *sp;
72680 + sp = slob_page(ret);
72681 + sp->size = size;
72682 }
72683
72684 trace_kmalloc_node(_RET_IP_, ret,
72685 size, PAGE_SIZE << order, gfp, node);
72686 }
72687
72688 - kmemleak_alloc(ret, size, 1, gfp);
72689 + return ret;
72690 +}
72691 +
72692 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72693 +{
72694 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72695 + void *ret = __kmalloc_node_align(size, gfp, node, align);
72696 +
72697 + if (!ZERO_OR_NULL_PTR(ret))
72698 + kmemleak_alloc(ret, size, 1, gfp);
72699 return ret;
72700 }
72701 EXPORT_SYMBOL(__kmalloc_node);
72702 @@ -533,13 +547,92 @@ void kfree(const void *block)
72703 sp = slob_page(block);
72704 if (is_slob_page(sp)) {
72705 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72706 - unsigned int *m = (unsigned int *)(block - align);
72707 - slob_free(m, *m + align);
72708 - } else
72709 + slob_t *m = (slob_t *)(block - align);
72710 + slob_free(m, m[0].units + align);
72711 + } else {
72712 + clear_slob_page(sp);
72713 + free_slob_page(sp);
72714 + sp->size = 0;
72715 put_page(&sp->page);
72716 + }
72717 }
72718 EXPORT_SYMBOL(kfree);
72719
72720 +void check_object_size(const void *ptr, unsigned long n, bool to)
72721 +{
72722 +
72723 +#ifdef CONFIG_PAX_USERCOPY
72724 + struct slob_page *sp;
72725 + const slob_t *free;
72726 + const void *base;
72727 + unsigned long flags;
72728 + const char *type;
72729 +
72730 + if (!n)
72731 + return;
72732 +
72733 + type = "<null>";
72734 + if (ZERO_OR_NULL_PTR(ptr))
72735 + goto report;
72736 +
72737 + if (!virt_addr_valid(ptr))
72738 + return;
72739 +
72740 + type = "<process stack>";
72741 + sp = slob_page(ptr);
72742 + if (!PageSlab((struct page *)sp)) {
72743 + if (object_is_on_stack(ptr, n) == -1)
72744 + goto report;
72745 + return;
72746 + }
72747 +
72748 + type = "<slob>";
72749 + if (sp->size) {
72750 + base = page_address(&sp->page);
72751 + if (base <= ptr && n <= sp->size - (ptr - base))
72752 + return;
72753 + goto report;
72754 + }
72755 +
72756 + /* some tricky double walking to find the chunk */
72757 + spin_lock_irqsave(&slob_lock, flags);
72758 + base = (void *)((unsigned long)ptr & PAGE_MASK);
72759 + free = sp->free;
72760 +
72761 + while (!slob_last(free) && (void *)free <= ptr) {
72762 + base = free + slob_units(free);
72763 + free = slob_next(free);
72764 + }
72765 +
72766 + while (base < (void *)free) {
72767 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
72768 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
72769 + int offset;
72770 +
72771 + if (ptr < base + align)
72772 + break;
72773 +
72774 + offset = ptr - base - align;
72775 + if (offset >= m) {
72776 + base += size;
72777 + continue;
72778 + }
72779 +
72780 + if (n > m - offset)
72781 + break;
72782 +
72783 + spin_unlock_irqrestore(&slob_lock, flags);
72784 + return;
72785 + }
72786 +
72787 + spin_unlock_irqrestore(&slob_lock, flags);
72788 +report:
72789 + pax_report_usercopy(ptr, n, to, type);
72790 +#endif
72791 +
72792 +}
72793 +EXPORT_SYMBOL(check_object_size);
72794 +
72795 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
72796 size_t ksize(const void *block)
72797 {
72798 @@ -552,10 +645,10 @@ size_t ksize(const void *block)
72799 sp = slob_page(block);
72800 if (is_slob_page(sp)) {
72801 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72802 - unsigned int *m = (unsigned int *)(block - align);
72803 - return SLOB_UNITS(*m) * SLOB_UNIT;
72804 + slob_t *m = (slob_t *)(block - align);
72805 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
72806 } else
72807 - return sp->page.private;
72808 + return sp->size;
72809 }
72810 EXPORT_SYMBOL(ksize);
72811
72812 @@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
72813 {
72814 struct kmem_cache *c;
72815
72816 +#ifdef CONFIG_PAX_USERCOPY
72817 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
72818 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
72819 +#else
72820 c = slob_alloc(sizeof(struct kmem_cache),
72821 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
72822 +#endif
72823
72824 if (c) {
72825 c->name = name;
72826 @@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
72827
72828 lockdep_trace_alloc(flags);
72829
72830 +#ifdef CONFIG_PAX_USERCOPY
72831 + b = __kmalloc_node_align(c->size, flags, node, c->align);
72832 +#else
72833 if (c->size < PAGE_SIZE) {
72834 b = slob_alloc(c->size, flags, c->align, node);
72835 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
72836 SLOB_UNITS(c->size) * SLOB_UNIT,
72837 flags, node);
72838 } else {
72839 + struct slob_page *sp;
72840 +
72841 b = slob_new_pages(flags, get_order(c->size), node);
72842 + sp = slob_page(b);
72843 + sp->size = c->size;
72844 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
72845 PAGE_SIZE << get_order(c->size),
72846 flags, node);
72847 }
72848 +#endif
72849
72850 if (c->ctor)
72851 c->ctor(b);
72852 @@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
72853
72854 static void __kmem_cache_free(void *b, int size)
72855 {
72856 - if (size < PAGE_SIZE)
72857 + struct slob_page *sp = slob_page(b);
72858 +
72859 + if (is_slob_page(sp))
72860 slob_free(b, size);
72861 - else
72862 + else {
72863 + clear_slob_page(sp);
72864 + free_slob_page(sp);
72865 + sp->size = 0;
72866 slob_free_pages(b, get_order(size));
72867 + }
72868 }
72869
72870 static void kmem_rcu_free(struct rcu_head *head)
72871 @@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
72872
72873 void kmem_cache_free(struct kmem_cache *c, void *b)
72874 {
72875 + int size = c->size;
72876 +
72877 +#ifdef CONFIG_PAX_USERCOPY
72878 + if (size + c->align < PAGE_SIZE) {
72879 + size += c->align;
72880 + b -= c->align;
72881 + }
72882 +#endif
72883 +
72884 kmemleak_free_recursive(b, c->flags);
72885 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
72886 struct slob_rcu *slob_rcu;
72887 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
72888 - slob_rcu->size = c->size;
72889 + slob_rcu = b + (size - sizeof(struct slob_rcu));
72890 + slob_rcu->size = size;
72891 call_rcu(&slob_rcu->head, kmem_rcu_free);
72892 } else {
72893 - __kmem_cache_free(b, c->size);
72894 + __kmem_cache_free(b, size);
72895 }
72896
72897 +#ifdef CONFIG_PAX_USERCOPY
72898 + trace_kfree(_RET_IP_, b);
72899 +#else
72900 trace_kmem_cache_free(_RET_IP_, b);
72901 +#endif
72902 +
72903 }
72904 EXPORT_SYMBOL(kmem_cache_free);
72905
72906 diff --git a/mm/slub.c b/mm/slub.c
72907 index 1a919f0..1739c9b 100644
72908 --- a/mm/slub.c
72909 +++ b/mm/slub.c
72910 @@ -208,7 +208,7 @@ struct track {
72911
72912 enum track_item { TRACK_ALLOC, TRACK_FREE };
72913
72914 -#ifdef CONFIG_SYSFS
72915 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
72916 static int sysfs_slab_add(struct kmem_cache *);
72917 static int sysfs_slab_alias(struct kmem_cache *, const char *);
72918 static void sysfs_slab_remove(struct kmem_cache *);
72919 @@ -530,7 +530,7 @@ static void print_track(const char *s, struct track *t)
72920 if (!t->addr)
72921 return;
72922
72923 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
72924 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
72925 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
72926 #ifdef CONFIG_STACKTRACE
72927 {
72928 @@ -2559,6 +2559,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
72929
72930 page = virt_to_head_page(x);
72931
72932 + BUG_ON(!PageSlab(page));
72933 +
72934 slab_free(s, page, x, _RET_IP_);
72935
72936 trace_kmem_cache_free(_RET_IP_, x);
72937 @@ -2592,7 +2594,7 @@ static int slub_min_objects;
72938 * Merge control. If this is set then no merging of slab caches will occur.
72939 * (Could be removed. This was introduced to pacify the merge skeptics.)
72940 */
72941 -static int slub_nomerge;
72942 +static int slub_nomerge = 1;
72943
72944 /*
72945 * Calculate the order of allocation given an slab object size.
72946 @@ -3042,7 +3044,7 @@ static int kmem_cache_open(struct kmem_cache *s,
72947 else
72948 s->cpu_partial = 30;
72949
72950 - s->refcount = 1;
72951 + atomic_set(&s->refcount, 1);
72952 #ifdef CONFIG_NUMA
72953 s->remote_node_defrag_ratio = 1000;
72954 #endif
72955 @@ -3146,8 +3148,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
72956 void kmem_cache_destroy(struct kmem_cache *s)
72957 {
72958 down_write(&slub_lock);
72959 - s->refcount--;
72960 - if (!s->refcount) {
72961 + if (atomic_dec_and_test(&s->refcount)) {
72962 list_del(&s->list);
72963 up_write(&slub_lock);
72964 if (kmem_cache_close(s)) {
72965 @@ -3358,6 +3359,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
72966 EXPORT_SYMBOL(__kmalloc_node);
72967 #endif
72968
72969 +void check_object_size(const void *ptr, unsigned long n, bool to)
72970 +{
72971 +
72972 +#ifdef CONFIG_PAX_USERCOPY
72973 + struct page *page;
72974 + struct kmem_cache *s = NULL;
72975 + unsigned long offset;
72976 + const char *type;
72977 +
72978 + if (!n)
72979 + return;
72980 +
72981 + type = "<null>";
72982 + if (ZERO_OR_NULL_PTR(ptr))
72983 + goto report;
72984 +
72985 + if (!virt_addr_valid(ptr))
72986 + return;
72987 +
72988 + page = virt_to_head_page(ptr);
72989 +
72990 + type = "<process stack>";
72991 + if (!PageSlab(page)) {
72992 + if (object_is_on_stack(ptr, n) == -1)
72993 + goto report;
72994 + return;
72995 + }
72996 +
72997 + s = page->slab;
72998 + type = s->name;
72999 + if (!(s->flags & SLAB_USERCOPY))
73000 + goto report;
73001 +
73002 + offset = (ptr - page_address(page)) % s->size;
73003 + if (offset <= s->objsize && n <= s->objsize - offset)
73004 + return;
73005 +
73006 +report:
73007 + pax_report_usercopy(ptr, n, to, type);
73008 +#endif
73009 +
73010 +}
73011 +EXPORT_SYMBOL(check_object_size);
73012 +
73013 size_t ksize(const void *object)
73014 {
73015 struct page *page;
73016 @@ -3632,7 +3677,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
73017 int node;
73018
73019 list_add(&s->list, &slab_caches);
73020 - s->refcount = -1;
73021 + atomic_set(&s->refcount, -1);
73022
73023 for_each_node_state(node, N_NORMAL_MEMORY) {
73024 struct kmem_cache_node *n = get_node(s, node);
73025 @@ -3749,17 +3794,17 @@ void __init kmem_cache_init(void)
73026
73027 /* Caches that are not of the two-to-the-power-of size */
73028 if (KMALLOC_MIN_SIZE <= 32) {
73029 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
73030 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
73031 caches++;
73032 }
73033
73034 if (KMALLOC_MIN_SIZE <= 64) {
73035 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
73036 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
73037 caches++;
73038 }
73039
73040 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
73041 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
73042 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
73043 caches++;
73044 }
73045
73046 @@ -3827,7 +3872,7 @@ static int slab_unmergeable(struct kmem_cache *s)
73047 /*
73048 * We may have set a slab to be unmergeable during bootstrap.
73049 */
73050 - if (s->refcount < 0)
73051 + if (atomic_read(&s->refcount) < 0)
73052 return 1;
73053
73054 return 0;
73055 @@ -3886,7 +3931,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73056 down_write(&slub_lock);
73057 s = find_mergeable(size, align, flags, name, ctor);
73058 if (s) {
73059 - s->refcount++;
73060 + atomic_inc(&s->refcount);
73061 /*
73062 * Adjust the object sizes so that we clear
73063 * the complete object on kzalloc.
73064 @@ -3895,7 +3940,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73065 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
73066
73067 if (sysfs_slab_alias(s, name)) {
73068 - s->refcount--;
73069 + atomic_dec(&s->refcount);
73070 goto err;
73071 }
73072 up_write(&slub_lock);
73073 @@ -4023,7 +4068,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
73074 }
73075 #endif
73076
73077 -#ifdef CONFIG_SYSFS
73078 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73079 static int count_inuse(struct page *page)
73080 {
73081 return page->inuse;
73082 @@ -4410,12 +4455,12 @@ static void resiliency_test(void)
73083 validate_slab_cache(kmalloc_caches[9]);
73084 }
73085 #else
73086 -#ifdef CONFIG_SYSFS
73087 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73088 static void resiliency_test(void) {};
73089 #endif
73090 #endif
73091
73092 -#ifdef CONFIG_SYSFS
73093 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73094 enum slab_stat_type {
73095 SL_ALL, /* All slabs */
73096 SL_PARTIAL, /* Only partially allocated slabs */
73097 @@ -4656,7 +4701,7 @@ SLAB_ATTR_RO(ctor);
73098
73099 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
73100 {
73101 - return sprintf(buf, "%d\n", s->refcount - 1);
73102 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
73103 }
73104 SLAB_ATTR_RO(aliases);
73105
73106 @@ -5223,6 +5268,7 @@ static char *create_unique_id(struct kmem_cache *s)
73107 return name;
73108 }
73109
73110 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73111 static int sysfs_slab_add(struct kmem_cache *s)
73112 {
73113 int err;
73114 @@ -5285,6 +5331,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
73115 kobject_del(&s->kobj);
73116 kobject_put(&s->kobj);
73117 }
73118 +#endif
73119
73120 /*
73121 * Need to buffer aliases during bootup until sysfs becomes
73122 @@ -5298,6 +5345,7 @@ struct saved_alias {
73123
73124 static struct saved_alias *alias_list;
73125
73126 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73127 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73128 {
73129 struct saved_alias *al;
73130 @@ -5320,6 +5368,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73131 alias_list = al;
73132 return 0;
73133 }
73134 +#endif
73135
73136 static int __init slab_sysfs_init(void)
73137 {
73138 diff --git a/mm/swap.c b/mm/swap.c
73139 index 55b266d..a532537 100644
73140 --- a/mm/swap.c
73141 +++ b/mm/swap.c
73142 @@ -31,6 +31,7 @@
73143 #include <linux/backing-dev.h>
73144 #include <linux/memcontrol.h>
73145 #include <linux/gfp.h>
73146 +#include <linux/hugetlb.h>
73147
73148 #include "internal.h"
73149
73150 @@ -71,6 +72,8 @@ static void __put_compound_page(struct page *page)
73151
73152 __page_cache_release(page);
73153 dtor = get_compound_page_dtor(page);
73154 + if (!PageHuge(page))
73155 + BUG_ON(dtor != free_compound_page);
73156 (*dtor)(page);
73157 }
73158
73159 diff --git a/mm/swapfile.c b/mm/swapfile.c
73160 index b1cd120..aaae885 100644
73161 --- a/mm/swapfile.c
73162 +++ b/mm/swapfile.c
73163 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
73164
73165 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
73166 /* Activity counter to indicate that a swapon or swapoff has occurred */
73167 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
73168 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
73169
73170 static inline unsigned char swap_count(unsigned char ent)
73171 {
73172 @@ -1670,7 +1670,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
73173 }
73174 filp_close(swap_file, NULL);
73175 err = 0;
73176 - atomic_inc(&proc_poll_event);
73177 + atomic_inc_unchecked(&proc_poll_event);
73178 wake_up_interruptible(&proc_poll_wait);
73179
73180 out_dput:
73181 @@ -1686,8 +1686,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
73182
73183 poll_wait(file, &proc_poll_wait, wait);
73184
73185 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
73186 - seq->poll_event = atomic_read(&proc_poll_event);
73187 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
73188 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
73189 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
73190 }
73191
73192 @@ -1785,7 +1785,7 @@ static int swaps_open(struct inode *inode, struct file *file)
73193 return ret;
73194
73195 seq = file->private_data;
73196 - seq->poll_event = atomic_read(&proc_poll_event);
73197 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
73198 return 0;
73199 }
73200
73201 @@ -2123,7 +2123,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
73202 (p->flags & SWP_DISCARDABLE) ? "D" : "");
73203
73204 mutex_unlock(&swapon_mutex);
73205 - atomic_inc(&proc_poll_event);
73206 + atomic_inc_unchecked(&proc_poll_event);
73207 wake_up_interruptible(&proc_poll_wait);
73208
73209 if (S_ISREG(inode->i_mode))
73210 diff --git a/mm/util.c b/mm/util.c
73211 index 136ac4f..5117eef 100644
73212 --- a/mm/util.c
73213 +++ b/mm/util.c
73214 @@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
73215 * allocated buffer. Use this if you don't want to free the buffer immediately
73216 * like, for example, with RCU.
73217 */
73218 +#undef __krealloc
73219 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
73220 {
73221 void *ret;
73222 @@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
73223 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
73224 * %NULL pointer, the object pointed to is freed.
73225 */
73226 +#undef krealloc
73227 void *krealloc(const void *p, size_t new_size, gfp_t flags)
73228 {
73229 void *ret;
73230 @@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
73231 void arch_pick_mmap_layout(struct mm_struct *mm)
73232 {
73233 mm->mmap_base = TASK_UNMAPPED_BASE;
73234 +
73235 +#ifdef CONFIG_PAX_RANDMMAP
73236 + if (mm->pax_flags & MF_PAX_RANDMMAP)
73237 + mm->mmap_base += mm->delta_mmap;
73238 +#endif
73239 +
73240 mm->get_unmapped_area = arch_get_unmapped_area;
73241 mm->unmap_area = arch_unmap_area;
73242 }
73243 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
73244 index 27be2f0..0aef2c2 100644
73245 --- a/mm/vmalloc.c
73246 +++ b/mm/vmalloc.c
73247 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
73248
73249 pte = pte_offset_kernel(pmd, addr);
73250 do {
73251 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
73252 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
73253 +
73254 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73255 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
73256 + BUG_ON(!pte_exec(*pte));
73257 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
73258 + continue;
73259 + }
73260 +#endif
73261 +
73262 + {
73263 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
73264 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
73265 + }
73266 } while (pte++, addr += PAGE_SIZE, addr != end);
73267 }
73268
73269 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
73270 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
73271 {
73272 pte_t *pte;
73273 + int ret = -ENOMEM;
73274
73275 /*
73276 * nr is a running index into the array which helps higher level
73277 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
73278 pte = pte_alloc_kernel(pmd, addr);
73279 if (!pte)
73280 return -ENOMEM;
73281 +
73282 + pax_open_kernel();
73283 do {
73284 struct page *page = pages[*nr];
73285
73286 - if (WARN_ON(!pte_none(*pte)))
73287 - return -EBUSY;
73288 - if (WARN_ON(!page))
73289 - return -ENOMEM;
73290 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73291 + if (pgprot_val(prot) & _PAGE_NX)
73292 +#endif
73293 +
73294 + if (WARN_ON(!pte_none(*pte))) {
73295 + ret = -EBUSY;
73296 + goto out;
73297 + }
73298 + if (WARN_ON(!page)) {
73299 + ret = -ENOMEM;
73300 + goto out;
73301 + }
73302 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
73303 (*nr)++;
73304 } while (pte++, addr += PAGE_SIZE, addr != end);
73305 - return 0;
73306 + ret = 0;
73307 +out:
73308 + pax_close_kernel();
73309 + return ret;
73310 }
73311
73312 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
73313 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
73314 * and fall back on vmalloc() if that fails. Others
73315 * just put it in the vmalloc space.
73316 */
73317 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
73318 +#ifdef CONFIG_MODULES
73319 +#ifdef MODULES_VADDR
73320 unsigned long addr = (unsigned long)x;
73321 if (addr >= MODULES_VADDR && addr < MODULES_END)
73322 return 1;
73323 #endif
73324 +
73325 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73326 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
73327 + return 1;
73328 +#endif
73329 +
73330 +#endif
73331 +
73332 return is_vmalloc_addr(x);
73333 }
73334
73335 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
73336
73337 if (!pgd_none(*pgd)) {
73338 pud_t *pud = pud_offset(pgd, addr);
73339 +#ifdef CONFIG_X86
73340 + if (!pud_large(*pud))
73341 +#endif
73342 if (!pud_none(*pud)) {
73343 pmd_t *pmd = pmd_offset(pud, addr);
73344 +#ifdef CONFIG_X86
73345 + if (!pmd_large(*pmd))
73346 +#endif
73347 if (!pmd_none(*pmd)) {
73348 pte_t *ptep, pte;
73349
73350 @@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
73351 struct vm_struct *area;
73352
73353 BUG_ON(in_interrupt());
73354 +
73355 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73356 + if (flags & VM_KERNEXEC) {
73357 + if (start != VMALLOC_START || end != VMALLOC_END)
73358 + return NULL;
73359 + start = (unsigned long)MODULES_EXEC_VADDR;
73360 + end = (unsigned long)MODULES_EXEC_END;
73361 + }
73362 +#endif
73363 +
73364 if (flags & VM_IOREMAP) {
73365 int bit = fls(size);
73366
73367 @@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned int count,
73368 if (count > totalram_pages)
73369 return NULL;
73370
73371 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73372 + if (!(pgprot_val(prot) & _PAGE_NX))
73373 + flags |= VM_KERNEXEC;
73374 +#endif
73375 +
73376 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
73377 __builtin_return_address(0));
73378 if (!area)
73379 @@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
73380 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
73381 goto fail;
73382
73383 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73384 + if (!(pgprot_val(prot) & _PAGE_NX))
73385 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
73386 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
73387 + else
73388 +#endif
73389 +
73390 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
73391 start, end, node, gfp_mask, caller);
73392 if (!area)
73393 @@ -1679,6 +1741,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
73394 gfp_mask, prot, node, caller);
73395 }
73396
73397 +#undef __vmalloc
73398 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
73399 {
73400 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
73401 @@ -1702,6 +1765,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
73402 * For tight control over page level allocator and protection flags
73403 * use __vmalloc() instead.
73404 */
73405 +#undef vmalloc
73406 void *vmalloc(unsigned long size)
73407 {
73408 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
73409 @@ -1718,6 +1782,7 @@ EXPORT_SYMBOL(vmalloc);
73410 * For tight control over page level allocator and protection flags
73411 * use __vmalloc() instead.
73412 */
73413 +#undef vzalloc
73414 void *vzalloc(unsigned long size)
73415 {
73416 return __vmalloc_node_flags(size, -1,
73417 @@ -1732,6 +1797,7 @@ EXPORT_SYMBOL(vzalloc);
73418 * The resulting memory area is zeroed so it can be mapped to userspace
73419 * without leaking data.
73420 */
73421 +#undef vmalloc_user
73422 void *vmalloc_user(unsigned long size)
73423 {
73424 struct vm_struct *area;
73425 @@ -1759,6 +1825,7 @@ EXPORT_SYMBOL(vmalloc_user);
73426 * For tight control over page level allocator and protection flags
73427 * use __vmalloc() instead.
73428 */
73429 +#undef vmalloc_node
73430 void *vmalloc_node(unsigned long size, int node)
73431 {
73432 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
73433 @@ -1778,6 +1845,7 @@ EXPORT_SYMBOL(vmalloc_node);
73434 * For tight control over page level allocator and protection flags
73435 * use __vmalloc_node() instead.
73436 */
73437 +#undef vzalloc_node
73438 void *vzalloc_node(unsigned long size, int node)
73439 {
73440 return __vmalloc_node_flags(size, node,
73441 @@ -1800,10 +1868,10 @@ EXPORT_SYMBOL(vzalloc_node);
73442 * For tight control over page level allocator and protection flags
73443 * use __vmalloc() instead.
73444 */
73445 -
73446 +#undef vmalloc_exec
73447 void *vmalloc_exec(unsigned long size)
73448 {
73449 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
73450 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
73451 -1, __builtin_return_address(0));
73452 }
73453
73454 @@ -1822,6 +1890,7 @@ void *vmalloc_exec(unsigned long size)
73455 * Allocate enough 32bit PA addressable pages to cover @size from the
73456 * page level allocator and map them into contiguous kernel virtual space.
73457 */
73458 +#undef vmalloc_32
73459 void *vmalloc_32(unsigned long size)
73460 {
73461 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
73462 @@ -1836,6 +1905,7 @@ EXPORT_SYMBOL(vmalloc_32);
73463 * The resulting memory area is 32bit addressable and zeroed so it can be
73464 * mapped to userspace without leaking data.
73465 */
73466 +#undef vmalloc_32_user
73467 void *vmalloc_32_user(unsigned long size)
73468 {
73469 struct vm_struct *area;
73470 @@ -2098,6 +2168,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
73471 unsigned long uaddr = vma->vm_start;
73472 unsigned long usize = vma->vm_end - vma->vm_start;
73473
73474 + BUG_ON(vma->vm_mirror);
73475 +
73476 if ((PAGE_SIZE-1) & (unsigned long)addr)
73477 return -EINVAL;
73478
73479 diff --git a/mm/vmstat.c b/mm/vmstat.c
73480 index 8fd603b..cf0d930 100644
73481 --- a/mm/vmstat.c
73482 +++ b/mm/vmstat.c
73483 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
73484 *
73485 * vm_stat contains the global counters
73486 */
73487 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
73488 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
73489 EXPORT_SYMBOL(vm_stat);
73490
73491 #ifdef CONFIG_SMP
73492 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
73493 v = p->vm_stat_diff[i];
73494 p->vm_stat_diff[i] = 0;
73495 local_irq_restore(flags);
73496 - atomic_long_add(v, &zone->vm_stat[i]);
73497 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
73498 global_diff[i] += v;
73499 #ifdef CONFIG_NUMA
73500 /* 3 seconds idle till flush */
73501 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
73502
73503 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
73504 if (global_diff[i])
73505 - atomic_long_add(global_diff[i], &vm_stat[i]);
73506 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
73507 }
73508
73509 #endif
73510 @@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
73511 start_cpu_timer(cpu);
73512 #endif
73513 #ifdef CONFIG_PROC_FS
73514 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
73515 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
73516 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
73517 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
73518 + {
73519 + mode_t gr_mode = S_IRUGO;
73520 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
73521 + gr_mode = S_IRUSR;
73522 +#endif
73523 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
73524 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
73525 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
73526 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
73527 +#else
73528 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
73529 +#endif
73530 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
73531 + }
73532 #endif
73533 return 0;
73534 }
73535 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
73536 index 5471628..cef8398 100644
73537 --- a/net/8021q/vlan.c
73538 +++ b/net/8021q/vlan.c
73539 @@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
73540 err = -EPERM;
73541 if (!capable(CAP_NET_ADMIN))
73542 break;
73543 - if ((args.u.name_type >= 0) &&
73544 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
73545 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
73546 struct vlan_net *vn;
73547
73548 vn = net_generic(net, vlan_net_id);
73549 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
73550 index fdfdb57..38d368c 100644
73551 --- a/net/9p/trans_fd.c
73552 +++ b/net/9p/trans_fd.c
73553 @@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
73554 oldfs = get_fs();
73555 set_fs(get_ds());
73556 /* The cast to a user pointer is valid due to the set_fs() */
73557 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
73558 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
73559 set_fs(oldfs);
73560
73561 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
73562 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
73563 index f41f026..fe76ea8 100644
73564 --- a/net/atm/atm_misc.c
73565 +++ b/net/atm/atm_misc.c
73566 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
73567 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
73568 return 1;
73569 atm_return(vcc, truesize);
73570 - atomic_inc(&vcc->stats->rx_drop);
73571 + atomic_inc_unchecked(&vcc->stats->rx_drop);
73572 return 0;
73573 }
73574 EXPORT_SYMBOL(atm_charge);
73575 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
73576 }
73577 }
73578 atm_return(vcc, guess);
73579 - atomic_inc(&vcc->stats->rx_drop);
73580 + atomic_inc_unchecked(&vcc->stats->rx_drop);
73581 return NULL;
73582 }
73583 EXPORT_SYMBOL(atm_alloc_charge);
73584 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
73585
73586 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
73587 {
73588 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
73589 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
73590 __SONET_ITEMS
73591 #undef __HANDLE_ITEM
73592 }
73593 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
73594
73595 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
73596 {
73597 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
73598 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
73599 __SONET_ITEMS
73600 #undef __HANDLE_ITEM
73601 }
73602 diff --git a/net/atm/lec.h b/net/atm/lec.h
73603 index dfc0719..47c5322 100644
73604 --- a/net/atm/lec.h
73605 +++ b/net/atm/lec.h
73606 @@ -48,7 +48,7 @@ struct lane2_ops {
73607 const u8 *tlvs, u32 sizeoftlvs);
73608 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
73609 const u8 *tlvs, u32 sizeoftlvs);
73610 -};
73611 +} __no_const;
73612
73613 /*
73614 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
73615 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
73616 index 0919a88..a23d54e 100644
73617 --- a/net/atm/mpc.h
73618 +++ b/net/atm/mpc.h
73619 @@ -33,7 +33,7 @@ struct mpoa_client {
73620 struct mpc_parameters parameters; /* parameters for this client */
73621
73622 const struct net_device_ops *old_ops;
73623 - struct net_device_ops new_ops;
73624 + net_device_ops_no_const new_ops;
73625 };
73626
73627
73628 diff --git a/net/atm/proc.c b/net/atm/proc.c
73629 index 0d020de..011c7bb 100644
73630 --- a/net/atm/proc.c
73631 +++ b/net/atm/proc.c
73632 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
73633 const struct k_atm_aal_stats *stats)
73634 {
73635 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
73636 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
73637 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
73638 - atomic_read(&stats->rx_drop));
73639 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
73640 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
73641 + atomic_read_unchecked(&stats->rx_drop));
73642 }
73643
73644 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
73645 diff --git a/net/atm/resources.c b/net/atm/resources.c
73646 index 23f45ce..c748f1a 100644
73647 --- a/net/atm/resources.c
73648 +++ b/net/atm/resources.c
73649 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
73650 static void copy_aal_stats(struct k_atm_aal_stats *from,
73651 struct atm_aal_stats *to)
73652 {
73653 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
73654 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
73655 __AAL_STAT_ITEMS
73656 #undef __HANDLE_ITEM
73657 }
73658 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
73659 static void subtract_aal_stats(struct k_atm_aal_stats *from,
73660 struct atm_aal_stats *to)
73661 {
73662 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
73663 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
73664 __AAL_STAT_ITEMS
73665 #undef __HANDLE_ITEM
73666 }
73667 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
73668 index 3512e25..2b33401 100644
73669 --- a/net/batman-adv/bat_iv_ogm.c
73670 +++ b/net/batman-adv/bat_iv_ogm.c
73671 @@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
73672
73673 /* change sequence number to network order */
73674 batman_ogm_packet->seqno =
73675 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
73676 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
73677
73678 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
73679 batman_ogm_packet->tt_crc = htons((uint16_t)
73680 @@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
73681 else
73682 batman_ogm_packet->gw_flags = NO_FLAGS;
73683
73684 - atomic_inc(&hard_iface->seqno);
73685 + atomic_inc_unchecked(&hard_iface->seqno);
73686
73687 slide_own_bcast_window(hard_iface);
73688 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
73689 @@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
73690 return;
73691
73692 /* could be changed by schedule_own_packet() */
73693 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
73694 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
73695
73696 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
73697
73698 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
73699 index 7704df4..beb4e16 100644
73700 --- a/net/batman-adv/hard-interface.c
73701 +++ b/net/batman-adv/hard-interface.c
73702 @@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
73703 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
73704 dev_add_pack(&hard_iface->batman_adv_ptype);
73705
73706 - atomic_set(&hard_iface->seqno, 1);
73707 - atomic_set(&hard_iface->frag_seqno, 1);
73708 + atomic_set_unchecked(&hard_iface->seqno, 1);
73709 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
73710 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
73711 hard_iface->net_dev->name);
73712
73713 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
73714 index f9cc957..efd9dae 100644
73715 --- a/net/batman-adv/soft-interface.c
73716 +++ b/net/batman-adv/soft-interface.c
73717 @@ -634,7 +634,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
73718
73719 /* set broadcast sequence number */
73720 bcast_packet->seqno =
73721 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
73722 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
73723
73724 add_bcast_packet_to_list(bat_priv, skb, 1);
73725
73726 @@ -828,7 +828,7 @@ struct net_device *softif_create(const char *name)
73727 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
73728
73729 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
73730 - atomic_set(&bat_priv->bcast_seqno, 1);
73731 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
73732 atomic_set(&bat_priv->ttvn, 0);
73733 atomic_set(&bat_priv->tt_local_changes, 0);
73734 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
73735 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
73736 index ab8d0fe..ceba3fd 100644
73737 --- a/net/batman-adv/types.h
73738 +++ b/net/batman-adv/types.h
73739 @@ -38,8 +38,8 @@ struct hard_iface {
73740 int16_t if_num;
73741 char if_status;
73742 struct net_device *net_dev;
73743 - atomic_t seqno;
73744 - atomic_t frag_seqno;
73745 + atomic_unchecked_t seqno;
73746 + atomic_unchecked_t frag_seqno;
73747 unsigned char *packet_buff;
73748 int packet_len;
73749 struct kobject *hardif_obj;
73750 @@ -154,7 +154,7 @@ struct bat_priv {
73751 atomic_t orig_interval; /* uint */
73752 atomic_t hop_penalty; /* uint */
73753 atomic_t log_level; /* uint */
73754 - atomic_t bcast_seqno;
73755 + atomic_unchecked_t bcast_seqno;
73756 atomic_t bcast_queue_left;
73757 atomic_t batman_queue_left;
73758 atomic_t ttvn; /* translation table version number */
73759 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
73760 index 07d1c1d..7e9bea9 100644
73761 --- a/net/batman-adv/unicast.c
73762 +++ b/net/batman-adv/unicast.c
73763 @@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
73764 frag1->flags = UNI_FRAG_HEAD | large_tail;
73765 frag2->flags = large_tail;
73766
73767 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
73768 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
73769 frag1->seqno = htons(seqno - 1);
73770 frag2->seqno = htons(seqno);
73771
73772 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
73773 index c1c597e..05ebb40 100644
73774 --- a/net/bluetooth/hci_conn.c
73775 +++ b/net/bluetooth/hci_conn.c
73776 @@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
73777 memset(&cp, 0, sizeof(cp));
73778
73779 cp.handle = cpu_to_le16(conn->handle);
73780 - memcpy(cp.ltk, ltk, sizeof(ltk));
73781 + memcpy(cp.ltk, ltk, sizeof(cp.ltk));
73782
73783 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
73784 }
73785 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
73786 index 17b5b1c..826d872 100644
73787 --- a/net/bluetooth/l2cap_core.c
73788 +++ b/net/bluetooth/l2cap_core.c
73789 @@ -2176,8 +2176,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
73790 break;
73791
73792 case L2CAP_CONF_RFC:
73793 - if (olen == sizeof(rfc))
73794 - memcpy(&rfc, (void *)val, olen);
73795 + if (olen != sizeof(rfc))
73796 + break;
73797 +
73798 + memcpy(&rfc, (void *)val, olen);
73799
73800 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
73801 rfc.mode != chan->mode)
73802 @@ -2265,8 +2267,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
73803
73804 switch (type) {
73805 case L2CAP_CONF_RFC:
73806 - if (olen == sizeof(rfc))
73807 - memcpy(&rfc, (void *)val, olen);
73808 + if (olen != sizeof(rfc))
73809 + break;
73810 +
73811 + memcpy(&rfc, (void *)val, olen);
73812 goto done;
73813 }
73814 }
73815 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
73816 index a5f4e57..910ee6d 100644
73817 --- a/net/bridge/br_multicast.c
73818 +++ b/net/bridge/br_multicast.c
73819 @@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
73820 nexthdr = ip6h->nexthdr;
73821 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
73822
73823 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
73824 + if (nexthdr != IPPROTO_ICMPV6)
73825 return 0;
73826
73827 /* Okay, we found ICMPv6 header */
73828 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
73829 index 5864cc4..121f3a30 100644
73830 --- a/net/bridge/netfilter/ebtables.c
73831 +++ b/net/bridge/netfilter/ebtables.c
73832 @@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
73833 tmp.valid_hooks = t->table->valid_hooks;
73834 }
73835 mutex_unlock(&ebt_mutex);
73836 - if (copy_to_user(user, &tmp, *len) != 0){
73837 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
73838 BUGPRINT("c2u Didn't work\n");
73839 ret = -EFAULT;
73840 break;
73841 diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
73842 index a986280..13444a1 100644
73843 --- a/net/caif/caif_socket.c
73844 +++ b/net/caif/caif_socket.c
73845 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
73846 #ifdef CONFIG_DEBUG_FS
73847 struct debug_fs_counter {
73848 atomic_t caif_nr_socks;
73849 - atomic_t caif_sock_create;
73850 - atomic_t num_connect_req;
73851 - atomic_t num_connect_resp;
73852 - atomic_t num_connect_fail_resp;
73853 - atomic_t num_disconnect;
73854 - atomic_t num_remote_shutdown_ind;
73855 - atomic_t num_tx_flow_off_ind;
73856 - atomic_t num_tx_flow_on_ind;
73857 - atomic_t num_rx_flow_off;
73858 - atomic_t num_rx_flow_on;
73859 + atomic_unchecked_t caif_sock_create;
73860 + atomic_unchecked_t num_connect_req;
73861 + atomic_unchecked_t num_connect_resp;
73862 + atomic_unchecked_t num_connect_fail_resp;
73863 + atomic_unchecked_t num_disconnect;
73864 + atomic_unchecked_t num_remote_shutdown_ind;
73865 + atomic_unchecked_t num_tx_flow_off_ind;
73866 + atomic_unchecked_t num_tx_flow_on_ind;
73867 + atomic_unchecked_t num_rx_flow_off;
73868 + atomic_unchecked_t num_rx_flow_on;
73869 };
73870 static struct debug_fs_counter cnt;
73871 #define dbfs_atomic_inc(v) atomic_inc_return(v)
73872 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
73873 #define dbfs_atomic_dec(v) atomic_dec_return(v)
73874 #else
73875 #define dbfs_atomic_inc(v) 0
73876 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
73877 atomic_read(&cf_sk->sk.sk_rmem_alloc),
73878 sk_rcvbuf_lowwater(cf_sk));
73879 set_rx_flow_off(cf_sk);
73880 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
73881 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
73882 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
73883 }
73884
73885 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
73886 set_rx_flow_off(cf_sk);
73887 if (net_ratelimit())
73888 pr_debug("sending flow OFF due to rmem_schedule\n");
73889 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
73890 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
73891 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
73892 }
73893 skb->dev = NULL;
73894 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
73895 switch (flow) {
73896 case CAIF_CTRLCMD_FLOW_ON_IND:
73897 /* OK from modem to start sending again */
73898 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
73899 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
73900 set_tx_flow_on(cf_sk);
73901 cf_sk->sk.sk_state_change(&cf_sk->sk);
73902 break;
73903
73904 case CAIF_CTRLCMD_FLOW_OFF_IND:
73905 /* Modem asks us to shut up */
73906 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
73907 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
73908 set_tx_flow_off(cf_sk);
73909 cf_sk->sk.sk_state_change(&cf_sk->sk);
73910 break;
73911 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
73912 /* We're now connected */
73913 caif_client_register_refcnt(&cf_sk->layer,
73914 cfsk_hold, cfsk_put);
73915 - dbfs_atomic_inc(&cnt.num_connect_resp);
73916 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
73917 cf_sk->sk.sk_state = CAIF_CONNECTED;
73918 set_tx_flow_on(cf_sk);
73919 cf_sk->sk.sk_state_change(&cf_sk->sk);
73920 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
73921
73922 case CAIF_CTRLCMD_INIT_FAIL_RSP:
73923 /* Connect request failed */
73924 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
73925 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
73926 cf_sk->sk.sk_err = ECONNREFUSED;
73927 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
73928 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
73929 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
73930
73931 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
73932 /* Modem has closed this connection, or device is down. */
73933 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
73934 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
73935 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
73936 cf_sk->sk.sk_err = ECONNRESET;
73937 set_rx_flow_on(cf_sk);
73938 @@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
73939 return;
73940
73941 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
73942 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
73943 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
73944 set_rx_flow_on(cf_sk);
73945 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
73946 }
73947 @@ -854,7 +855,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
73948 /*ifindex = id of the interface.*/
73949 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
73950
73951 - dbfs_atomic_inc(&cnt.num_connect_req);
73952 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
73953 cf_sk->layer.receive = caif_sktrecv_cb;
73954
73955 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
73956 @@ -943,7 +944,7 @@ static int caif_release(struct socket *sock)
73957 spin_unlock_bh(&sk->sk_receive_queue.lock);
73958 sock->sk = NULL;
73959
73960 - dbfs_atomic_inc(&cnt.num_disconnect);
73961 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
73962
73963 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
73964 if (cf_sk->debugfs_socket_dir != NULL)
73965 @@ -1122,7 +1123,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
73966 cf_sk->conn_req.protocol = protocol;
73967 /* Increase the number of sockets created. */
73968 dbfs_atomic_inc(&cnt.caif_nr_socks);
73969 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
73970 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
73971 #ifdef CONFIG_DEBUG_FS
73972 if (!IS_ERR(debugfsdir)) {
73973
73974 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
73975 index 5cf5222..6f704ad 100644
73976 --- a/net/caif/cfctrl.c
73977 +++ b/net/caif/cfctrl.c
73978 @@ -9,6 +9,7 @@
73979 #include <linux/stddef.h>
73980 #include <linux/spinlock.h>
73981 #include <linux/slab.h>
73982 +#include <linux/sched.h>
73983 #include <net/caif/caif_layer.h>
73984 #include <net/caif/cfpkt.h>
73985 #include <net/caif/cfctrl.h>
73986 @@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
73987 memset(&dev_info, 0, sizeof(dev_info));
73988 dev_info.id = 0xff;
73989 cfsrvl_init(&this->serv, 0, &dev_info, false);
73990 - atomic_set(&this->req_seq_no, 1);
73991 - atomic_set(&this->rsp_seq_no, 1);
73992 + atomic_set_unchecked(&this->req_seq_no, 1);
73993 + atomic_set_unchecked(&this->rsp_seq_no, 1);
73994 this->serv.layer.receive = cfctrl_recv;
73995 sprintf(this->serv.layer.name, "ctrl");
73996 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
73997 @@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
73998 struct cfctrl_request_info *req)
73999 {
74000 spin_lock_bh(&ctrl->info_list_lock);
74001 - atomic_inc(&ctrl->req_seq_no);
74002 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
74003 + atomic_inc_unchecked(&ctrl->req_seq_no);
74004 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
74005 list_add_tail(&req->list, &ctrl->list);
74006 spin_unlock_bh(&ctrl->info_list_lock);
74007 }
74008 @@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
74009 if (p != first)
74010 pr_warn("Requests are not received in order\n");
74011
74012 - atomic_set(&ctrl->rsp_seq_no,
74013 + atomic_set_unchecked(&ctrl->rsp_seq_no,
74014 p->sequence_no);
74015 list_del(&p->list);
74016 goto out;
74017 diff --git a/net/can/gw.c b/net/can/gw.c
74018 index 3d79b12..8de85fa 100644
74019 --- a/net/can/gw.c
74020 +++ b/net/can/gw.c
74021 @@ -96,7 +96,7 @@ struct cf_mod {
74022 struct {
74023 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
74024 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
74025 - } csumfunc;
74026 + } __no_const csumfunc;
74027 };
74028
74029
74030 diff --git a/net/compat.c b/net/compat.c
74031 index 6def90e..c6992fa 100644
74032 --- a/net/compat.c
74033 +++ b/net/compat.c
74034 @@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
74035 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
74036 __get_user(kmsg->msg_flags, &umsg->msg_flags))
74037 return -EFAULT;
74038 - kmsg->msg_name = compat_ptr(tmp1);
74039 - kmsg->msg_iov = compat_ptr(tmp2);
74040 - kmsg->msg_control = compat_ptr(tmp3);
74041 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
74042 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
74043 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
74044 return 0;
74045 }
74046
74047 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74048
74049 if (kern_msg->msg_namelen) {
74050 if (mode == VERIFY_READ) {
74051 - int err = move_addr_to_kernel(kern_msg->msg_name,
74052 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
74053 kern_msg->msg_namelen,
74054 kern_address);
74055 if (err < 0)
74056 @@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74057 kern_msg->msg_name = NULL;
74058
74059 tot_len = iov_from_user_compat_to_kern(kern_iov,
74060 - (struct compat_iovec __user *)kern_msg->msg_iov,
74061 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
74062 kern_msg->msg_iovlen);
74063 if (tot_len >= 0)
74064 kern_msg->msg_iov = kern_iov;
74065 @@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74066
74067 #define CMSG_COMPAT_FIRSTHDR(msg) \
74068 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
74069 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
74070 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
74071 (struct compat_cmsghdr __user *)NULL)
74072
74073 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
74074 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
74075 (ucmlen) <= (unsigned long) \
74076 ((mhdr)->msg_controllen - \
74077 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
74078 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
74079
74080 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
74081 struct compat_cmsghdr __user *cmsg, int cmsg_len)
74082 {
74083 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
74084 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
74085 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
74086 msg->msg_controllen)
74087 return NULL;
74088 return (struct compat_cmsghdr __user *)ptr;
74089 @@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
74090 {
74091 struct compat_timeval ctv;
74092 struct compat_timespec cts[3];
74093 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74094 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74095 struct compat_cmsghdr cmhdr;
74096 int cmlen;
74097
74098 @@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
74099
74100 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
74101 {
74102 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74103 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74104 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
74105 int fdnum = scm->fp->count;
74106 struct file **fp = scm->fp->fp;
74107 @@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
74108 return -EFAULT;
74109 old_fs = get_fs();
74110 set_fs(KERNEL_DS);
74111 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
74112 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
74113 set_fs(old_fs);
74114
74115 return err;
74116 @@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
74117 len = sizeof(ktime);
74118 old_fs = get_fs();
74119 set_fs(KERNEL_DS);
74120 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
74121 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
74122 set_fs(old_fs);
74123
74124 if (!err) {
74125 @@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74126 case MCAST_JOIN_GROUP:
74127 case MCAST_LEAVE_GROUP:
74128 {
74129 - struct compat_group_req __user *gr32 = (void *)optval;
74130 + struct compat_group_req __user *gr32 = (void __user *)optval;
74131 struct group_req __user *kgr =
74132 compat_alloc_user_space(sizeof(struct group_req));
74133 u32 interface;
74134 @@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74135 case MCAST_BLOCK_SOURCE:
74136 case MCAST_UNBLOCK_SOURCE:
74137 {
74138 - struct compat_group_source_req __user *gsr32 = (void *)optval;
74139 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
74140 struct group_source_req __user *kgsr = compat_alloc_user_space(
74141 sizeof(struct group_source_req));
74142 u32 interface;
74143 @@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74144 }
74145 case MCAST_MSFILTER:
74146 {
74147 - struct compat_group_filter __user *gf32 = (void *)optval;
74148 + struct compat_group_filter __user *gf32 = (void __user *)optval;
74149 struct group_filter __user *kgf;
74150 u32 interface, fmode, numsrc;
74151
74152 @@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
74153 char __user *optval, int __user *optlen,
74154 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
74155 {
74156 - struct compat_group_filter __user *gf32 = (void *)optval;
74157 + struct compat_group_filter __user *gf32 = (void __user *)optval;
74158 struct group_filter __user *kgf;
74159 int __user *koptlen;
74160 u32 interface, fmode, numsrc;
74161 diff --git a/net/core/datagram.c b/net/core/datagram.c
74162 index 68bbf9f..5ef0d12 100644
74163 --- a/net/core/datagram.c
74164 +++ b/net/core/datagram.c
74165 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
74166 }
74167
74168 kfree_skb(skb);
74169 - atomic_inc(&sk->sk_drops);
74170 + atomic_inc_unchecked(&sk->sk_drops);
74171 sk_mem_reclaim_partial(sk);
74172
74173 return err;
74174 diff --git a/net/core/dev.c b/net/core/dev.c
74175 index c56cacf..b28e35f 100644
74176 --- a/net/core/dev.c
74177 +++ b/net/core/dev.c
74178 @@ -1139,10 +1139,14 @@ void dev_load(struct net *net, const char *name)
74179 if (no_module && capable(CAP_NET_ADMIN))
74180 no_module = request_module("netdev-%s", name);
74181 if (no_module && capable(CAP_SYS_MODULE)) {
74182 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
74183 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
74184 +#else
74185 if (!request_module("%s", name))
74186 pr_err("Loading kernel module for a network device "
74187 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
74188 "instead\n", name);
74189 +#endif
74190 }
74191 }
74192 EXPORT_SYMBOL(dev_load);
74193 @@ -1573,7 +1577,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
74194 {
74195 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
74196 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
74197 - atomic_long_inc(&dev->rx_dropped);
74198 + atomic_long_inc_unchecked(&dev->rx_dropped);
74199 kfree_skb(skb);
74200 return NET_RX_DROP;
74201 }
74202 @@ -1583,7 +1587,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
74203 nf_reset(skb);
74204
74205 if (unlikely(!is_skb_forwardable(dev, skb))) {
74206 - atomic_long_inc(&dev->rx_dropped);
74207 + atomic_long_inc_unchecked(&dev->rx_dropped);
74208 kfree_skb(skb);
74209 return NET_RX_DROP;
74210 }
74211 @@ -2036,7 +2040,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
74212
74213 struct dev_gso_cb {
74214 void (*destructor)(struct sk_buff *skb);
74215 -};
74216 +} __no_const;
74217
74218 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
74219
74220 @@ -2970,7 +2974,7 @@ enqueue:
74221
74222 local_irq_restore(flags);
74223
74224 - atomic_long_inc(&skb->dev->rx_dropped);
74225 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
74226 kfree_skb(skb);
74227 return NET_RX_DROP;
74228 }
74229 @@ -3044,7 +3048,7 @@ int netif_rx_ni(struct sk_buff *skb)
74230 }
74231 EXPORT_SYMBOL(netif_rx_ni);
74232
74233 -static void net_tx_action(struct softirq_action *h)
74234 +static void net_tx_action(void)
74235 {
74236 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74237
74238 @@ -3333,7 +3337,7 @@ ncls:
74239 if (pt_prev) {
74240 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
74241 } else {
74242 - atomic_long_inc(&skb->dev->rx_dropped);
74243 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
74244 kfree_skb(skb);
74245 /* Jamal, now you will not able to escape explaining
74246 * me how you were going to use this. :-)
74247 @@ -3897,7 +3901,7 @@ void netif_napi_del(struct napi_struct *napi)
74248 }
74249 EXPORT_SYMBOL(netif_napi_del);
74250
74251 -static void net_rx_action(struct softirq_action *h)
74252 +static void net_rx_action(void)
74253 {
74254 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74255 unsigned long time_limit = jiffies + 2;
74256 @@ -5955,7 +5959,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
74257 } else {
74258 netdev_stats_to_stats64(storage, &dev->stats);
74259 }
74260 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
74261 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
74262 return storage;
74263 }
74264 EXPORT_SYMBOL(dev_get_stats);
74265 diff --git a/net/core/flow.c b/net/core/flow.c
74266 index e318c7e..168b1d0 100644
74267 --- a/net/core/flow.c
74268 +++ b/net/core/flow.c
74269 @@ -61,7 +61,7 @@ struct flow_cache {
74270 struct timer_list rnd_timer;
74271 };
74272
74273 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
74274 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
74275 EXPORT_SYMBOL(flow_cache_genid);
74276 static struct flow_cache flow_cache_global;
74277 static struct kmem_cache *flow_cachep __read_mostly;
74278 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
74279
74280 static int flow_entry_valid(struct flow_cache_entry *fle)
74281 {
74282 - if (atomic_read(&flow_cache_genid) != fle->genid)
74283 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
74284 return 0;
74285 if (fle->object && !fle->object->ops->check(fle->object))
74286 return 0;
74287 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
74288 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
74289 fcp->hash_count++;
74290 }
74291 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
74292 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
74293 flo = fle->object;
74294 if (!flo)
74295 goto ret_object;
74296 @@ -280,7 +280,7 @@ nocache:
74297 }
74298 flo = resolver(net, key, family, dir, flo, ctx);
74299 if (fle) {
74300 - fle->genid = atomic_read(&flow_cache_genid);
74301 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
74302 if (!IS_ERR(flo))
74303 fle->object = flo;
74304 else
74305 diff --git a/net/core/iovec.c b/net/core/iovec.c
74306 index c40f27e..7f49254 100644
74307 --- a/net/core/iovec.c
74308 +++ b/net/core/iovec.c
74309 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
74310 if (m->msg_namelen) {
74311 if (mode == VERIFY_READ) {
74312 void __user *namep;
74313 - namep = (void __user __force *) m->msg_name;
74314 + namep = (void __force_user *) m->msg_name;
74315 err = move_addr_to_kernel(namep, m->msg_namelen,
74316 address);
74317 if (err < 0)
74318 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
74319 }
74320
74321 size = m->msg_iovlen * sizeof(struct iovec);
74322 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
74323 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
74324 return -EFAULT;
74325
74326 m->msg_iov = iov;
74327 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
74328 index 9083e82..1673203 100644
74329 --- a/net/core/rtnetlink.c
74330 +++ b/net/core/rtnetlink.c
74331 @@ -57,7 +57,7 @@ struct rtnl_link {
74332 rtnl_doit_func doit;
74333 rtnl_dumpit_func dumpit;
74334 rtnl_calcit_func calcit;
74335 -};
74336 +} __no_const;
74337
74338 static DEFINE_MUTEX(rtnl_mutex);
74339 static u16 min_ifinfo_dump_size;
74340 diff --git a/net/core/scm.c b/net/core/scm.c
74341 index ff52ad0..aff1c0f 100644
74342 --- a/net/core/scm.c
74343 +++ b/net/core/scm.c
74344 @@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
74345 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
74346 {
74347 struct cmsghdr __user *cm
74348 - = (__force struct cmsghdr __user *)msg->msg_control;
74349 + = (struct cmsghdr __force_user *)msg->msg_control;
74350 struct cmsghdr cmhdr;
74351 int cmlen = CMSG_LEN(len);
74352 int err;
74353 @@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
74354 err = -EFAULT;
74355 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
74356 goto out;
74357 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
74358 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
74359 goto out;
74360 cmlen = CMSG_SPACE(len);
74361 if (msg->msg_controllen < cmlen)
74362 @@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
74363 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
74364 {
74365 struct cmsghdr __user *cm
74366 - = (__force struct cmsghdr __user*)msg->msg_control;
74367 + = (struct cmsghdr __force_user *)msg->msg_control;
74368
74369 int fdmax = 0;
74370 int fdnum = scm->fp->count;
74371 @@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
74372 if (fdnum < fdmax)
74373 fdmax = fdnum;
74374
74375 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
74376 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
74377 i++, cmfptr++)
74378 {
74379 int new_fd;
74380 diff --git a/net/core/sock.c b/net/core/sock.c
74381 index b23f174..b9a0d26 100644
74382 --- a/net/core/sock.c
74383 +++ b/net/core/sock.c
74384 @@ -289,7 +289,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74385 struct sk_buff_head *list = &sk->sk_receive_queue;
74386
74387 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
74388 - atomic_inc(&sk->sk_drops);
74389 + atomic_inc_unchecked(&sk->sk_drops);
74390 trace_sock_rcvqueue_full(sk, skb);
74391 return -ENOMEM;
74392 }
74393 @@ -299,7 +299,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74394 return err;
74395
74396 if (!sk_rmem_schedule(sk, skb->truesize)) {
74397 - atomic_inc(&sk->sk_drops);
74398 + atomic_inc_unchecked(&sk->sk_drops);
74399 return -ENOBUFS;
74400 }
74401
74402 @@ -319,7 +319,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74403 skb_dst_force(skb);
74404
74405 spin_lock_irqsave(&list->lock, flags);
74406 - skb->dropcount = atomic_read(&sk->sk_drops);
74407 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
74408 __skb_queue_tail(list, skb);
74409 spin_unlock_irqrestore(&list->lock, flags);
74410
74411 @@ -339,7 +339,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
74412 skb->dev = NULL;
74413
74414 if (sk_rcvqueues_full(sk, skb)) {
74415 - atomic_inc(&sk->sk_drops);
74416 + atomic_inc_unchecked(&sk->sk_drops);
74417 goto discard_and_relse;
74418 }
74419 if (nested)
74420 @@ -357,7 +357,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
74421 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
74422 } else if (sk_add_backlog(sk, skb)) {
74423 bh_unlock_sock(sk);
74424 - atomic_inc(&sk->sk_drops);
74425 + atomic_inc_unchecked(&sk->sk_drops);
74426 goto discard_and_relse;
74427 }
74428
74429 @@ -917,7 +917,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74430 if (len > sizeof(peercred))
74431 len = sizeof(peercred);
74432 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
74433 - if (copy_to_user(optval, &peercred, len))
74434 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
74435 return -EFAULT;
74436 goto lenout;
74437 }
74438 @@ -930,7 +930,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74439 return -ENOTCONN;
74440 if (lv < len)
74441 return -EINVAL;
74442 - if (copy_to_user(optval, address, len))
74443 + if (len > sizeof(address) || copy_to_user(optval, address, len))
74444 return -EFAULT;
74445 goto lenout;
74446 }
74447 @@ -963,7 +963,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74448
74449 if (len > lv)
74450 len = lv;
74451 - if (copy_to_user(optval, &v, len))
74452 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
74453 return -EFAULT;
74454 lenout:
74455 if (put_user(len, optlen))
74456 @@ -2020,7 +2020,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
74457 */
74458 smp_wmb();
74459 atomic_set(&sk->sk_refcnt, 1);
74460 - atomic_set(&sk->sk_drops, 0);
74461 + atomic_set_unchecked(&sk->sk_drops, 0);
74462 }
74463 EXPORT_SYMBOL(sock_init_data);
74464
74465 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
74466 index 02e75d1..9a57a7c 100644
74467 --- a/net/decnet/sysctl_net_decnet.c
74468 +++ b/net/decnet/sysctl_net_decnet.c
74469 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
74470
74471 if (len > *lenp) len = *lenp;
74472
74473 - if (copy_to_user(buffer, addr, len))
74474 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
74475 return -EFAULT;
74476
74477 *lenp = len;
74478 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
74479
74480 if (len > *lenp) len = *lenp;
74481
74482 - if (copy_to_user(buffer, devname, len))
74483 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
74484 return -EFAULT;
74485
74486 *lenp = len;
74487 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
74488 index 39a2d29..f39c0fe 100644
74489 --- a/net/econet/Kconfig
74490 +++ b/net/econet/Kconfig
74491 @@ -4,7 +4,7 @@
74492
74493 config ECONET
74494 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
74495 - depends on EXPERIMENTAL && INET
74496 + depends on EXPERIMENTAL && INET && BROKEN
74497 ---help---
74498 Econet is a fairly old and slow networking protocol mainly used by
74499 Acorn computers to access file and print servers. It uses native
74500 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
74501 index 92fc5f6..b790d91 100644
74502 --- a/net/ipv4/fib_frontend.c
74503 +++ b/net/ipv4/fib_frontend.c
74504 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
74505 #ifdef CONFIG_IP_ROUTE_MULTIPATH
74506 fib_sync_up(dev);
74507 #endif
74508 - atomic_inc(&net->ipv4.dev_addr_genid);
74509 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74510 rt_cache_flush(dev_net(dev), -1);
74511 break;
74512 case NETDEV_DOWN:
74513 fib_del_ifaddr(ifa, NULL);
74514 - atomic_inc(&net->ipv4.dev_addr_genid);
74515 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74516 if (ifa->ifa_dev->ifa_list == NULL) {
74517 /* Last address was deleted from this interface.
74518 * Disable IP.
74519 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
74520 #ifdef CONFIG_IP_ROUTE_MULTIPATH
74521 fib_sync_up(dev);
74522 #endif
74523 - atomic_inc(&net->ipv4.dev_addr_genid);
74524 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74525 rt_cache_flush(dev_net(dev), -1);
74526 break;
74527 case NETDEV_DOWN:
74528 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
74529 index 80106d8..232e898 100644
74530 --- a/net/ipv4/fib_semantics.c
74531 +++ b/net/ipv4/fib_semantics.c
74532 @@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
74533 nh->nh_saddr = inet_select_addr(nh->nh_dev,
74534 nh->nh_gw,
74535 nh->nh_parent->fib_scope);
74536 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
74537 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
74538
74539 return nh->nh_saddr;
74540 }
74541 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
74542 index ccee270..db23c3c 100644
74543 --- a/net/ipv4/inet_diag.c
74544 +++ b/net/ipv4/inet_diag.c
74545 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct sock *sk,
74546 r->idiag_retrans = 0;
74547
74548 r->id.idiag_if = sk->sk_bound_dev_if;
74549 +
74550 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74551 + r->id.idiag_cookie[0] = 0;
74552 + r->id.idiag_cookie[1] = 0;
74553 +#else
74554 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
74555 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
74556 +#endif
74557
74558 r->id.idiag_sport = inet->inet_sport;
74559 r->id.idiag_dport = inet->inet_dport;
74560 @@ -210,8 +216,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
74561 r->idiag_family = tw->tw_family;
74562 r->idiag_retrans = 0;
74563 r->id.idiag_if = tw->tw_bound_dev_if;
74564 +
74565 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74566 + r->id.idiag_cookie[0] = 0;
74567 + r->id.idiag_cookie[1] = 0;
74568 +#else
74569 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
74570 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
74571 +#endif
74572 +
74573 r->id.idiag_sport = tw->tw_sport;
74574 r->id.idiag_dport = tw->tw_dport;
74575 r->id.idiag_src[0] = tw->tw_rcv_saddr;
74576 @@ -294,12 +307,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
74577 if (sk == NULL)
74578 goto unlock;
74579
74580 +#ifndef CONFIG_GRKERNSEC_HIDESYM
74581 err = -ESTALE;
74582 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
74583 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
74584 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
74585 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
74586 goto out;
74587 +#endif
74588
74589 err = -ENOMEM;
74590 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
74591 @@ -589,8 +604,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
74592 r->idiag_retrans = req->retrans;
74593
74594 r->id.idiag_if = sk->sk_bound_dev_if;
74595 +
74596 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74597 + r->id.idiag_cookie[0] = 0;
74598 + r->id.idiag_cookie[1] = 0;
74599 +#else
74600 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
74601 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
74602 +#endif
74603
74604 tmo = req->expires - jiffies;
74605 if (tmo < 0)
74606 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
74607 index 984ec65..97ac518 100644
74608 --- a/net/ipv4/inet_hashtables.c
74609 +++ b/net/ipv4/inet_hashtables.c
74610 @@ -18,12 +18,15 @@
74611 #include <linux/sched.h>
74612 #include <linux/slab.h>
74613 #include <linux/wait.h>
74614 +#include <linux/security.h>
74615
74616 #include <net/inet_connection_sock.h>
74617 #include <net/inet_hashtables.h>
74618 #include <net/secure_seq.h>
74619 #include <net/ip.h>
74620
74621 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
74622 +
74623 /*
74624 * Allocate and initialize a new local port bind bucket.
74625 * The bindhash mutex for snum's hash chain must be held here.
74626 @@ -530,6 +533,8 @@ ok:
74627 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
74628 spin_unlock(&head->lock);
74629
74630 + gr_update_task_in_ip_table(current, inet_sk(sk));
74631 +
74632 if (tw) {
74633 inet_twsk_deschedule(tw, death_row);
74634 while (twrefcnt) {
74635 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
74636 index 86f13c67..59a35b5 100644
74637 --- a/net/ipv4/inetpeer.c
74638 +++ b/net/ipv4/inetpeer.c
74639 @@ -436,8 +436,8 @@ relookup:
74640 if (p) {
74641 p->daddr = *daddr;
74642 atomic_set(&p->refcnt, 1);
74643 - atomic_set(&p->rid, 0);
74644 - atomic_set(&p->ip_id_count,
74645 + atomic_set_unchecked(&p->rid, 0);
74646 + atomic_set_unchecked(&p->ip_id_count,
74647 (daddr->family == AF_INET) ?
74648 secure_ip_id(daddr->addr.a4) :
74649 secure_ipv6_id(daddr->addr.a6));
74650 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
74651 index fdaabf2..0ec3205 100644
74652 --- a/net/ipv4/ip_fragment.c
74653 +++ b/net/ipv4/ip_fragment.c
74654 @@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
74655 return 0;
74656
74657 start = qp->rid;
74658 - end = atomic_inc_return(&peer->rid);
74659 + end = atomic_inc_return_unchecked(&peer->rid);
74660 qp->rid = end;
74661
74662 rc = qp->q.fragments && (end - start) > max;
74663 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
74664 index 09ff51b..d3968eb 100644
74665 --- a/net/ipv4/ip_sockglue.c
74666 +++ b/net/ipv4/ip_sockglue.c
74667 @@ -1111,7 +1111,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
74668 len = min_t(unsigned int, len, opt->optlen);
74669 if (put_user(len, optlen))
74670 return -EFAULT;
74671 - if (copy_to_user(optval, opt->__data, len))
74672 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
74673 + copy_to_user(optval, opt->__data, len))
74674 return -EFAULT;
74675 return 0;
74676 }
74677 @@ -1239,7 +1240,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
74678 if (sk->sk_type != SOCK_STREAM)
74679 return -ENOPROTOOPT;
74680
74681 - msg.msg_control = optval;
74682 + msg.msg_control = (void __force_kernel *)optval;
74683 msg.msg_controllen = len;
74684 msg.msg_flags = flags;
74685
74686 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
74687 index 99ec116..c5628fe 100644
74688 --- a/net/ipv4/ipconfig.c
74689 +++ b/net/ipv4/ipconfig.c
74690 @@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
74691
74692 mm_segment_t oldfs = get_fs();
74693 set_fs(get_ds());
74694 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
74695 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
74696 set_fs(oldfs);
74697 return res;
74698 }
74699 @@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
74700
74701 mm_segment_t oldfs = get_fs();
74702 set_fs(get_ds());
74703 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
74704 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
74705 set_fs(oldfs);
74706 return res;
74707 }
74708 @@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
74709
74710 mm_segment_t oldfs = get_fs();
74711 set_fs(get_ds());
74712 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
74713 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
74714 set_fs(oldfs);
74715 return res;
74716 }
74717 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
74718 index 2133c30..5c4b40b 100644
74719 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
74720 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
74721 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
74722
74723 *len = 0;
74724
74725 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
74726 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
74727 if (*octets == NULL)
74728 return 0;
74729
74730 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
74731 index 43d4c3b..1914409 100644
74732 --- a/net/ipv4/ping.c
74733 +++ b/net/ipv4/ping.c
74734 @@ -836,7 +836,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
74735 sk_rmem_alloc_get(sp),
74736 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74737 atomic_read(&sp->sk_refcnt), sp,
74738 - atomic_read(&sp->sk_drops), len);
74739 + atomic_read_unchecked(&sp->sk_drops), len);
74740 }
74741
74742 static int ping_seq_show(struct seq_file *seq, void *v)
74743 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
74744 index 007e2eb..85a18a0 100644
74745 --- a/net/ipv4/raw.c
74746 +++ b/net/ipv4/raw.c
74747 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
74748 int raw_rcv(struct sock *sk, struct sk_buff *skb)
74749 {
74750 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
74751 - atomic_inc(&sk->sk_drops);
74752 + atomic_inc_unchecked(&sk->sk_drops);
74753 kfree_skb(skb);
74754 return NET_RX_DROP;
74755 }
74756 @@ -738,16 +738,20 @@ static int raw_init(struct sock *sk)
74757
74758 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
74759 {
74760 + struct icmp_filter filter;
74761 +
74762 if (optlen > sizeof(struct icmp_filter))
74763 optlen = sizeof(struct icmp_filter);
74764 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
74765 + if (copy_from_user(&filter, optval, optlen))
74766 return -EFAULT;
74767 + raw_sk(sk)->filter = filter;
74768 return 0;
74769 }
74770
74771 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
74772 {
74773 int len, ret = -EFAULT;
74774 + struct icmp_filter filter;
74775
74776 if (get_user(len, optlen))
74777 goto out;
74778 @@ -757,8 +761,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
74779 if (len > sizeof(struct icmp_filter))
74780 len = sizeof(struct icmp_filter);
74781 ret = -EFAULT;
74782 - if (put_user(len, optlen) ||
74783 - copy_to_user(optval, &raw_sk(sk)->filter, len))
74784 + filter = raw_sk(sk)->filter;
74785 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
74786 goto out;
74787 ret = 0;
74788 out: return ret;
74789 @@ -986,7 +990,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
74790 sk_wmem_alloc_get(sp),
74791 sk_rmem_alloc_get(sp),
74792 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74793 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
74794 + atomic_read(&sp->sk_refcnt),
74795 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74796 + NULL,
74797 +#else
74798 + sp,
74799 +#endif
74800 + atomic_read_unchecked(&sp->sk_drops));
74801 }
74802
74803 static int raw_seq_show(struct seq_file *seq, void *v)
74804 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
74805 index 94cdbc5..0cb0063 100644
74806 --- a/net/ipv4/route.c
74807 +++ b/net/ipv4/route.c
74808 @@ -313,7 +313,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
74809
74810 static inline int rt_genid(struct net *net)
74811 {
74812 - return atomic_read(&net->ipv4.rt_genid);
74813 + return atomic_read_unchecked(&net->ipv4.rt_genid);
74814 }
74815
74816 #ifdef CONFIG_PROC_FS
74817 @@ -937,7 +937,7 @@ static void rt_cache_invalidate(struct net *net)
74818 unsigned char shuffle;
74819
74820 get_random_bytes(&shuffle, sizeof(shuffle));
74821 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
74822 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
74823 redirect_genid++;
74824 }
74825
74826 @@ -3022,7 +3022,7 @@ static int rt_fill_info(struct net *net,
74827 error = rt->dst.error;
74828 if (peer) {
74829 inet_peer_refcheck(rt->peer);
74830 - id = atomic_read(&peer->ip_id_count) & 0xffff;
74831 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
74832 if (peer->tcp_ts_stamp) {
74833 ts = peer->tcp_ts;
74834 tsage = get_seconds() - peer->tcp_ts_stamp;
74835 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
74836 index eb90aa8..22bf114 100644
74837 --- a/net/ipv4/tcp_ipv4.c
74838 +++ b/net/ipv4/tcp_ipv4.c
74839 @@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
74840 int sysctl_tcp_low_latency __read_mostly;
74841 EXPORT_SYMBOL(sysctl_tcp_low_latency);
74842
74843 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74844 +extern int grsec_enable_blackhole;
74845 +#endif
74846
74847 #ifdef CONFIG_TCP_MD5SIG
74848 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
74849 @@ -1632,6 +1635,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
74850 return 0;
74851
74852 reset:
74853 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74854 + if (!grsec_enable_blackhole)
74855 +#endif
74856 tcp_v4_send_reset(rsk, skb);
74857 discard:
74858 kfree_skb(skb);
74859 @@ -1694,12 +1700,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
74860 TCP_SKB_CB(skb)->sacked = 0;
74861
74862 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
74863 - if (!sk)
74864 + if (!sk) {
74865 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74866 + ret = 1;
74867 +#endif
74868 goto no_tcp_socket;
74869 -
74870 + }
74871 process:
74872 - if (sk->sk_state == TCP_TIME_WAIT)
74873 + if (sk->sk_state == TCP_TIME_WAIT) {
74874 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74875 + ret = 2;
74876 +#endif
74877 goto do_time_wait;
74878 + }
74879
74880 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
74881 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
74882 @@ -1749,6 +1762,10 @@ no_tcp_socket:
74883 bad_packet:
74884 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
74885 } else {
74886 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74887 + if (!grsec_enable_blackhole || (ret == 1 &&
74888 + (skb->dev->flags & IFF_LOOPBACK)))
74889 +#endif
74890 tcp_v4_send_reset(NULL, skb);
74891 }
74892
74893 @@ -2409,7 +2426,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
74894 0, /* non standard timer */
74895 0, /* open_requests have no inode */
74896 atomic_read(&sk->sk_refcnt),
74897 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74898 + NULL,
74899 +#else
74900 req,
74901 +#endif
74902 len);
74903 }
74904
74905 @@ -2459,7 +2480,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
74906 sock_i_uid(sk),
74907 icsk->icsk_probes_out,
74908 sock_i_ino(sk),
74909 - atomic_read(&sk->sk_refcnt), sk,
74910 + atomic_read(&sk->sk_refcnt),
74911 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74912 + NULL,
74913 +#else
74914 + sk,
74915 +#endif
74916 jiffies_to_clock_t(icsk->icsk_rto),
74917 jiffies_to_clock_t(icsk->icsk_ack.ato),
74918 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
74919 @@ -2487,7 +2513,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
74920 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
74921 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
74922 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
74923 - atomic_read(&tw->tw_refcnt), tw, len);
74924 + atomic_read(&tw->tw_refcnt),
74925 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74926 + NULL,
74927 +#else
74928 + tw,
74929 +#endif
74930 + len);
74931 }
74932
74933 #define TMPSZ 150
74934 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
74935 index 66363b6..b0654a3 100644
74936 --- a/net/ipv4/tcp_minisocks.c
74937 +++ b/net/ipv4/tcp_minisocks.c
74938 @@ -27,6 +27,10 @@
74939 #include <net/inet_common.h>
74940 #include <net/xfrm.h>
74941
74942 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74943 +extern int grsec_enable_blackhole;
74944 +#endif
74945 +
74946 int sysctl_tcp_syncookies __read_mostly = 1;
74947 EXPORT_SYMBOL(sysctl_tcp_syncookies);
74948
74949 @@ -751,6 +755,10 @@ listen_overflow:
74950
74951 embryonic_reset:
74952 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
74953 +
74954 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74955 + if (!grsec_enable_blackhole)
74956 +#endif
74957 if (!(flg & TCP_FLAG_RST))
74958 req->rsk_ops->send_reset(sk, skb);
74959
74960 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
74961 index 85ee7eb..53277ab 100644
74962 --- a/net/ipv4/tcp_probe.c
74963 +++ b/net/ipv4/tcp_probe.c
74964 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
74965 if (cnt + width >= len)
74966 break;
74967
74968 - if (copy_to_user(buf + cnt, tbuf, width))
74969 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
74970 return -EFAULT;
74971 cnt += width;
74972 }
74973 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
74974 index 2e0f0af..e2948bf 100644
74975 --- a/net/ipv4/tcp_timer.c
74976 +++ b/net/ipv4/tcp_timer.c
74977 @@ -22,6 +22,10 @@
74978 #include <linux/gfp.h>
74979 #include <net/tcp.h>
74980
74981 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74982 +extern int grsec_lastack_retries;
74983 +#endif
74984 +
74985 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
74986 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
74987 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
74988 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock *sk)
74989 }
74990 }
74991
74992 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74993 + if ((sk->sk_state == TCP_LAST_ACK) &&
74994 + (grsec_lastack_retries > 0) &&
74995 + (grsec_lastack_retries < retry_until))
74996 + retry_until = grsec_lastack_retries;
74997 +#endif
74998 +
74999 if (retransmits_timed_out(sk, retry_until,
75000 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
75001 /* Has it gone just too far? */
75002 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
75003 index 5a65eea..bd913a1 100644
75004 --- a/net/ipv4/udp.c
75005 +++ b/net/ipv4/udp.c
75006 @@ -86,6 +86,7 @@
75007 #include <linux/types.h>
75008 #include <linux/fcntl.h>
75009 #include <linux/module.h>
75010 +#include <linux/security.h>
75011 #include <linux/socket.h>
75012 #include <linux/sockios.h>
75013 #include <linux/igmp.h>
75014 @@ -108,6 +109,10 @@
75015 #include <trace/events/udp.h>
75016 #include "udp_impl.h"
75017
75018 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75019 +extern int grsec_enable_blackhole;
75020 +#endif
75021 +
75022 struct udp_table udp_table __read_mostly;
75023 EXPORT_SYMBOL(udp_table);
75024
75025 @@ -565,6 +570,9 @@ found:
75026 return s;
75027 }
75028
75029 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
75030 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
75031 +
75032 /*
75033 * This routine is called by the ICMP module when it gets some
75034 * sort of error condition. If err < 0 then the socket should
75035 @@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
75036 dport = usin->sin_port;
75037 if (dport == 0)
75038 return -EINVAL;
75039 +
75040 + err = gr_search_udp_sendmsg(sk, usin);
75041 + if (err)
75042 + return err;
75043 } else {
75044 if (sk->sk_state != TCP_ESTABLISHED)
75045 return -EDESTADDRREQ;
75046 +
75047 + err = gr_search_udp_sendmsg(sk, NULL);
75048 + if (err)
75049 + return err;
75050 +
75051 daddr = inet->inet_daddr;
75052 dport = inet->inet_dport;
75053 /* Open fast path for connected socket.
75054 @@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(struct sock *sk)
75055 udp_lib_checksum_complete(skb)) {
75056 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75057 IS_UDPLITE(sk));
75058 - atomic_inc(&sk->sk_drops);
75059 + atomic_inc_unchecked(&sk->sk_drops);
75060 __skb_unlink(skb, rcvq);
75061 __skb_queue_tail(&list_kill, skb);
75062 }
75063 @@ -1185,6 +1202,10 @@ try_again:
75064 if (!skb)
75065 goto out;
75066
75067 + err = gr_search_udp_recvmsg(sk, skb);
75068 + if (err)
75069 + goto out_free;
75070 +
75071 ulen = skb->len - sizeof(struct udphdr);
75072 copied = len;
75073 if (copied > ulen)
75074 @@ -1487,7 +1508,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75075
75076 drop:
75077 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
75078 - atomic_inc(&sk->sk_drops);
75079 + atomic_inc_unchecked(&sk->sk_drops);
75080 kfree_skb(skb);
75081 return -1;
75082 }
75083 @@ -1506,7 +1527,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
75084 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
75085
75086 if (!skb1) {
75087 - atomic_inc(&sk->sk_drops);
75088 + atomic_inc_unchecked(&sk->sk_drops);
75089 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
75090 IS_UDPLITE(sk));
75091 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75092 @@ -1675,6 +1696,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75093 goto csum_error;
75094
75095 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
75096 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75097 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
75098 +#endif
75099 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
75100
75101 /*
75102 @@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
75103 sk_wmem_alloc_get(sp),
75104 sk_rmem_alloc_get(sp),
75105 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75106 - atomic_read(&sp->sk_refcnt), sp,
75107 - atomic_read(&sp->sk_drops), len);
75108 + atomic_read(&sp->sk_refcnt),
75109 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75110 + NULL,
75111 +#else
75112 + sp,
75113 +#endif
75114 + atomic_read_unchecked(&sp->sk_drops), len);
75115 }
75116
75117 int udp4_seq_show(struct seq_file *seq, void *v)
75118 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
75119 index 836c4ea..cbb74dc 100644
75120 --- a/net/ipv6/addrconf.c
75121 +++ b/net/ipv6/addrconf.c
75122 @@ -2149,7 +2149,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
75123 p.iph.ihl = 5;
75124 p.iph.protocol = IPPROTO_IPV6;
75125 p.iph.ttl = 64;
75126 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
75127 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
75128
75129 if (ops->ndo_do_ioctl) {
75130 mm_segment_t oldfs = get_fs();
75131 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
75132 index 1567fb1..29af910 100644
75133 --- a/net/ipv6/inet6_connection_sock.c
75134 +++ b/net/ipv6/inet6_connection_sock.c
75135 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
75136 #ifdef CONFIG_XFRM
75137 {
75138 struct rt6_info *rt = (struct rt6_info *)dst;
75139 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
75140 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
75141 }
75142 #endif
75143 }
75144 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
75145 #ifdef CONFIG_XFRM
75146 if (dst) {
75147 struct rt6_info *rt = (struct rt6_info *)dst;
75148 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
75149 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
75150 __sk_dst_reset(sk);
75151 dst = NULL;
75152 }
75153 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
75154 index 26cb08c..8af9877 100644
75155 --- a/net/ipv6/ipv6_sockglue.c
75156 +++ b/net/ipv6/ipv6_sockglue.c
75157 @@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
75158 if (sk->sk_type != SOCK_STREAM)
75159 return -ENOPROTOOPT;
75160
75161 - msg.msg_control = optval;
75162 + msg.msg_control = (void __force_kernel *)optval;
75163 msg.msg_controllen = len;
75164 msg.msg_flags = flags;
75165
75166 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
75167 index 361ebf3..d5628fb 100644
75168 --- a/net/ipv6/raw.c
75169 +++ b/net/ipv6/raw.c
75170 @@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
75171 {
75172 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
75173 skb_checksum_complete(skb)) {
75174 - atomic_inc(&sk->sk_drops);
75175 + atomic_inc_unchecked(&sk->sk_drops);
75176 kfree_skb(skb);
75177 return NET_RX_DROP;
75178 }
75179 @@ -404,7 +404,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
75180 struct raw6_sock *rp = raw6_sk(sk);
75181
75182 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
75183 - atomic_inc(&sk->sk_drops);
75184 + atomic_inc_unchecked(&sk->sk_drops);
75185 kfree_skb(skb);
75186 return NET_RX_DROP;
75187 }
75188 @@ -428,7 +428,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
75189
75190 if (inet->hdrincl) {
75191 if (skb_checksum_complete(skb)) {
75192 - atomic_inc(&sk->sk_drops);
75193 + atomic_inc_unchecked(&sk->sk_drops);
75194 kfree_skb(skb);
75195 return NET_RX_DROP;
75196 }
75197 @@ -601,7 +601,7 @@ out:
75198 return err;
75199 }
75200
75201 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
75202 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
75203 struct flowi6 *fl6, struct dst_entry **dstp,
75204 unsigned int flags)
75205 {
75206 @@ -909,12 +909,15 @@ do_confirm:
75207 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
75208 char __user *optval, int optlen)
75209 {
75210 + struct icmp6_filter filter;
75211 +
75212 switch (optname) {
75213 case ICMPV6_FILTER:
75214 if (optlen > sizeof(struct icmp6_filter))
75215 optlen = sizeof(struct icmp6_filter);
75216 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
75217 + if (copy_from_user(&filter, optval, optlen))
75218 return -EFAULT;
75219 + raw6_sk(sk)->filter = filter;
75220 return 0;
75221 default:
75222 return -ENOPROTOOPT;
75223 @@ -927,6 +930,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
75224 char __user *optval, int __user *optlen)
75225 {
75226 int len;
75227 + struct icmp6_filter filter;
75228
75229 switch (optname) {
75230 case ICMPV6_FILTER:
75231 @@ -938,7 +942,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
75232 len = sizeof(struct icmp6_filter);
75233 if (put_user(len, optlen))
75234 return -EFAULT;
75235 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
75236 + filter = raw6_sk(sk)->filter;
75237 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
75238 return -EFAULT;
75239 return 0;
75240 default:
75241 @@ -1245,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
75242 0, 0L, 0,
75243 sock_i_uid(sp), 0,
75244 sock_i_ino(sp),
75245 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
75246 + atomic_read(&sp->sk_refcnt),
75247 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75248 + NULL,
75249 +#else
75250 + sp,
75251 +#endif
75252 + atomic_read_unchecked(&sp->sk_drops));
75253 }
75254
75255 static int raw6_seq_show(struct seq_file *seq, void *v)
75256 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
75257 index b859e4a..f9d1589 100644
75258 --- a/net/ipv6/tcp_ipv6.c
75259 +++ b/net/ipv6/tcp_ipv6.c
75260 @@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
75261 }
75262 #endif
75263
75264 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75265 +extern int grsec_enable_blackhole;
75266 +#endif
75267 +
75268 static void tcp_v6_hash(struct sock *sk)
75269 {
75270 if (sk->sk_state != TCP_CLOSE) {
75271 @@ -1651,6 +1655,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
75272 return 0;
75273
75274 reset:
75275 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75276 + if (!grsec_enable_blackhole)
75277 +#endif
75278 tcp_v6_send_reset(sk, skb);
75279 discard:
75280 if (opt_skb)
75281 @@ -1730,12 +1737,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
75282 TCP_SKB_CB(skb)->sacked = 0;
75283
75284 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
75285 - if (!sk)
75286 + if (!sk) {
75287 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75288 + ret = 1;
75289 +#endif
75290 goto no_tcp_socket;
75291 + }
75292
75293 process:
75294 - if (sk->sk_state == TCP_TIME_WAIT)
75295 + if (sk->sk_state == TCP_TIME_WAIT) {
75296 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75297 + ret = 2;
75298 +#endif
75299 goto do_time_wait;
75300 + }
75301
75302 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
75303 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
75304 @@ -1783,6 +1798,10 @@ no_tcp_socket:
75305 bad_packet:
75306 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
75307 } else {
75308 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75309 + if (!grsec_enable_blackhole || (ret == 1 &&
75310 + (skb->dev->flags & IFF_LOOPBACK)))
75311 +#endif
75312 tcp_v6_send_reset(NULL, skb);
75313 }
75314
75315 @@ -2043,7 +2062,13 @@ static void get_openreq6(struct seq_file *seq,
75316 uid,
75317 0, /* non standard timer */
75318 0, /* open_requests have no inode */
75319 - 0, req);
75320 + 0,
75321 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75322 + NULL
75323 +#else
75324 + req
75325 +#endif
75326 + );
75327 }
75328
75329 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
75330 @@ -2093,7 +2118,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
75331 sock_i_uid(sp),
75332 icsk->icsk_probes_out,
75333 sock_i_ino(sp),
75334 - atomic_read(&sp->sk_refcnt), sp,
75335 + atomic_read(&sp->sk_refcnt),
75336 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75337 + NULL,
75338 +#else
75339 + sp,
75340 +#endif
75341 jiffies_to_clock_t(icsk->icsk_rto),
75342 jiffies_to_clock_t(icsk->icsk_ack.ato),
75343 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
75344 @@ -2128,7 +2158,13 @@ static void get_timewait6_sock(struct seq_file *seq,
75345 dest->s6_addr32[2], dest->s6_addr32[3], destp,
75346 tw->tw_substate, 0, 0,
75347 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
75348 - atomic_read(&tw->tw_refcnt), tw);
75349 + atomic_read(&tw->tw_refcnt),
75350 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75351 + NULL
75352 +#else
75353 + tw
75354 +#endif
75355 + );
75356 }
75357
75358 static int tcp6_seq_show(struct seq_file *seq, void *v)
75359 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
75360 index 8c25419..47a51ae 100644
75361 --- a/net/ipv6/udp.c
75362 +++ b/net/ipv6/udp.c
75363 @@ -50,6 +50,10 @@
75364 #include <linux/seq_file.h>
75365 #include "udp_impl.h"
75366
75367 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75368 +extern int grsec_enable_blackhole;
75369 +#endif
75370 +
75371 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
75372 {
75373 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
75374 @@ -549,7 +553,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
75375
75376 return 0;
75377 drop:
75378 - atomic_inc(&sk->sk_drops);
75379 + atomic_inc_unchecked(&sk->sk_drops);
75380 drop_no_sk_drops_inc:
75381 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
75382 kfree_skb(skb);
75383 @@ -625,7 +629,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
75384 continue;
75385 }
75386 drop:
75387 - atomic_inc(&sk->sk_drops);
75388 + atomic_inc_unchecked(&sk->sk_drops);
75389 UDP6_INC_STATS_BH(sock_net(sk),
75390 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
75391 UDP6_INC_STATS_BH(sock_net(sk),
75392 @@ -780,6 +784,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75393 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
75394 proto == IPPROTO_UDPLITE);
75395
75396 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75397 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
75398 +#endif
75399 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
75400
75401 kfree_skb(skb);
75402 @@ -796,7 +803,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75403 if (!sock_owned_by_user(sk))
75404 udpv6_queue_rcv_skb(sk, skb);
75405 else if (sk_add_backlog(sk, skb)) {
75406 - atomic_inc(&sk->sk_drops);
75407 + atomic_inc_unchecked(&sk->sk_drops);
75408 bh_unlock_sock(sk);
75409 sock_put(sk);
75410 goto discard;
75411 @@ -1407,8 +1414,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
75412 0, 0L, 0,
75413 sock_i_uid(sp), 0,
75414 sock_i_ino(sp),
75415 - atomic_read(&sp->sk_refcnt), sp,
75416 - atomic_read(&sp->sk_drops));
75417 + atomic_read(&sp->sk_refcnt),
75418 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75419 + NULL,
75420 +#else
75421 + sp,
75422 +#endif
75423 + atomic_read_unchecked(&sp->sk_drops));
75424 }
75425
75426 int udp6_seq_show(struct seq_file *seq, void *v)
75427 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
75428 index 253695d..9481ce8 100644
75429 --- a/net/irda/ircomm/ircomm_tty.c
75430 +++ b/net/irda/ircomm/ircomm_tty.c
75431 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75432 add_wait_queue(&self->open_wait, &wait);
75433
75434 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
75435 - __FILE__,__LINE__, tty->driver->name, self->open_count );
75436 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
75437
75438 /* As far as I can see, we protect open_count - Jean II */
75439 spin_lock_irqsave(&self->spinlock, flags);
75440 if (!tty_hung_up_p(filp)) {
75441 extra_count = 1;
75442 - self->open_count--;
75443 + local_dec(&self->open_count);
75444 }
75445 spin_unlock_irqrestore(&self->spinlock, flags);
75446 - self->blocked_open++;
75447 + local_inc(&self->blocked_open);
75448
75449 while (1) {
75450 if (tty->termios->c_cflag & CBAUD) {
75451 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75452 }
75453
75454 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
75455 - __FILE__,__LINE__, tty->driver->name, self->open_count );
75456 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
75457
75458 schedule();
75459 }
75460 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75461 if (extra_count) {
75462 /* ++ is not atomic, so this should be protected - Jean II */
75463 spin_lock_irqsave(&self->spinlock, flags);
75464 - self->open_count++;
75465 + local_inc(&self->open_count);
75466 spin_unlock_irqrestore(&self->spinlock, flags);
75467 }
75468 - self->blocked_open--;
75469 + local_dec(&self->blocked_open);
75470
75471 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
75472 - __FILE__,__LINE__, tty->driver->name, self->open_count);
75473 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
75474
75475 if (!retval)
75476 self->flags |= ASYNC_NORMAL_ACTIVE;
75477 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
75478 }
75479 /* ++ is not atomic, so this should be protected - Jean II */
75480 spin_lock_irqsave(&self->spinlock, flags);
75481 - self->open_count++;
75482 + local_inc(&self->open_count);
75483
75484 tty->driver_data = self;
75485 self->tty = tty;
75486 spin_unlock_irqrestore(&self->spinlock, flags);
75487
75488 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
75489 - self->line, self->open_count);
75490 + self->line, local_read(&self->open_count));
75491
75492 /* Not really used by us, but lets do it anyway */
75493 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
75494 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75495 return;
75496 }
75497
75498 - if ((tty->count == 1) && (self->open_count != 1)) {
75499 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
75500 /*
75501 * Uh, oh. tty->count is 1, which means that the tty
75502 * structure will be freed. state->count should always
75503 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75504 */
75505 IRDA_DEBUG(0, "%s(), bad serial port count; "
75506 "tty->count is 1, state->count is %d\n", __func__ ,
75507 - self->open_count);
75508 - self->open_count = 1;
75509 + local_read(&self->open_count));
75510 + local_set(&self->open_count, 1);
75511 }
75512
75513 - if (--self->open_count < 0) {
75514 + if (local_dec_return(&self->open_count) < 0) {
75515 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
75516 - __func__, self->line, self->open_count);
75517 - self->open_count = 0;
75518 + __func__, self->line, local_read(&self->open_count));
75519 + local_set(&self->open_count, 0);
75520 }
75521 - if (self->open_count) {
75522 + if (local_read(&self->open_count)) {
75523 spin_unlock_irqrestore(&self->spinlock, flags);
75524
75525 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
75526 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75527 tty->closing = 0;
75528 self->tty = NULL;
75529
75530 - if (self->blocked_open) {
75531 + if (local_read(&self->blocked_open)) {
75532 if (self->close_delay)
75533 schedule_timeout_interruptible(self->close_delay);
75534 wake_up_interruptible(&self->open_wait);
75535 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
75536 spin_lock_irqsave(&self->spinlock, flags);
75537 self->flags &= ~ASYNC_NORMAL_ACTIVE;
75538 self->tty = NULL;
75539 - self->open_count = 0;
75540 + local_set(&self->open_count, 0);
75541 spin_unlock_irqrestore(&self->spinlock, flags);
75542
75543 wake_up_interruptible(&self->open_wait);
75544 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
75545 seq_putc(m, '\n');
75546
75547 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
75548 - seq_printf(m, "Open count: %d\n", self->open_count);
75549 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
75550 seq_printf(m, "Max data size: %d\n", self->max_data_size);
75551 seq_printf(m, "Max header size: %d\n", self->max_header_size);
75552
75553 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
75554 index 274d150..656a144 100644
75555 --- a/net/iucv/af_iucv.c
75556 +++ b/net/iucv/af_iucv.c
75557 @@ -787,10 +787,10 @@ static int iucv_sock_autobind(struct sock *sk)
75558
75559 write_lock_bh(&iucv_sk_list.lock);
75560
75561 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
75562 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
75563 while (__iucv_get_sock_by_name(name)) {
75564 sprintf(name, "%08x",
75565 - atomic_inc_return(&iucv_sk_list.autobind_name));
75566 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
75567 }
75568
75569 write_unlock_bh(&iucv_sk_list.lock);
75570 diff --git a/net/key/af_key.c b/net/key/af_key.c
75571 index 1e733e9..3d73c9f 100644
75572 --- a/net/key/af_key.c
75573 +++ b/net/key/af_key.c
75574 @@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
75575 static u32 get_acqseq(void)
75576 {
75577 u32 res;
75578 - static atomic_t acqseq;
75579 + static atomic_unchecked_t acqseq;
75580
75581 do {
75582 - res = atomic_inc_return(&acqseq);
75583 + res = atomic_inc_return_unchecked(&acqseq);
75584 } while (!res);
75585 return res;
75586 }
75587 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
75588 index 73495f1..ad51356 100644
75589 --- a/net/mac80211/ieee80211_i.h
75590 +++ b/net/mac80211/ieee80211_i.h
75591 @@ -27,6 +27,7 @@
75592 #include <net/ieee80211_radiotap.h>
75593 #include <net/cfg80211.h>
75594 #include <net/mac80211.h>
75595 +#include <asm/local.h>
75596 #include "key.h"
75597 #include "sta_info.h"
75598
75599 @@ -764,7 +765,7 @@ struct ieee80211_local {
75600 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
75601 spinlock_t queue_stop_reason_lock;
75602
75603 - int open_count;
75604 + local_t open_count;
75605 int monitors, cooked_mntrs;
75606 /* number of interfaces with corresponding FIF_ flags */
75607 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
75608 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
75609 index 30d7355..e260095 100644
75610 --- a/net/mac80211/iface.c
75611 +++ b/net/mac80211/iface.c
75612 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75613 break;
75614 }
75615
75616 - if (local->open_count == 0) {
75617 + if (local_read(&local->open_count) == 0) {
75618 res = drv_start(local);
75619 if (res)
75620 goto err_del_bss;
75621 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75622 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
75623
75624 if (!is_valid_ether_addr(dev->dev_addr)) {
75625 - if (!local->open_count)
75626 + if (!local_read(&local->open_count))
75627 drv_stop(local);
75628 return -EADDRNOTAVAIL;
75629 }
75630 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75631 mutex_unlock(&local->mtx);
75632
75633 if (coming_up)
75634 - local->open_count++;
75635 + local_inc(&local->open_count);
75636
75637 if (hw_reconf_flags) {
75638 ieee80211_hw_config(local, hw_reconf_flags);
75639 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75640 err_del_interface:
75641 drv_remove_interface(local, &sdata->vif);
75642 err_stop:
75643 - if (!local->open_count)
75644 + if (!local_read(&local->open_count))
75645 drv_stop(local);
75646 err_del_bss:
75647 sdata->bss = NULL;
75648 @@ -472,7 +472,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
75649 }
75650
75651 if (going_down)
75652 - local->open_count--;
75653 + local_dec(&local->open_count);
75654
75655 switch (sdata->vif.type) {
75656 case NL80211_IFTYPE_AP_VLAN:
75657 @@ -531,7 +531,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
75658
75659 ieee80211_recalc_ps(local, -1);
75660
75661 - if (local->open_count == 0) {
75662 + if (local_read(&local->open_count) == 0) {
75663 if (local->ops->napi_poll)
75664 napi_disable(&local->napi);
75665 ieee80211_clear_tx_pending(local);
75666 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
75667 index 7d9b21d..0687004 100644
75668 --- a/net/mac80211/main.c
75669 +++ b/net/mac80211/main.c
75670 @@ -163,7 +163,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
75671 local->hw.conf.power_level = power;
75672 }
75673
75674 - if (changed && local->open_count) {
75675 + if (changed && local_read(&local->open_count)) {
75676 ret = drv_config(local, changed);
75677 /*
75678 * Goal:
75679 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
75680 index 9ee7164..56c5061 100644
75681 --- a/net/mac80211/pm.c
75682 +++ b/net/mac80211/pm.c
75683 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75684 struct ieee80211_sub_if_data *sdata;
75685 struct sta_info *sta;
75686
75687 - if (!local->open_count)
75688 + if (!local_read(&local->open_count))
75689 goto suspend;
75690
75691 ieee80211_scan_cancel(local);
75692 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75693 cancel_work_sync(&local->dynamic_ps_enable_work);
75694 del_timer_sync(&local->dynamic_ps_timer);
75695
75696 - local->wowlan = wowlan && local->open_count;
75697 + local->wowlan = wowlan && local_read(&local->open_count);
75698 if (local->wowlan) {
75699 int err = drv_suspend(local, wowlan);
75700 if (err < 0) {
75701 @@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75702 }
75703
75704 /* stop hardware - this must stop RX */
75705 - if (local->open_count)
75706 + if (local_read(&local->open_count))
75707 ieee80211_stop_device(local);
75708
75709 suspend:
75710 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
75711 index 7d84b87..6a69cd9 100644
75712 --- a/net/mac80211/rate.c
75713 +++ b/net/mac80211/rate.c
75714 @@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
75715
75716 ASSERT_RTNL();
75717
75718 - if (local->open_count)
75719 + if (local_read(&local->open_count))
75720 return -EBUSY;
75721
75722 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
75723 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
75724 index c97a065..ff61928 100644
75725 --- a/net/mac80211/rc80211_pid_debugfs.c
75726 +++ b/net/mac80211/rc80211_pid_debugfs.c
75727 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
75728
75729 spin_unlock_irqrestore(&events->lock, status);
75730
75731 - if (copy_to_user(buf, pb, p))
75732 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
75733 return -EFAULT;
75734
75735 return p;
75736 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
75737 index d5230ec..c604b21 100644
75738 --- a/net/mac80211/util.c
75739 +++ b/net/mac80211/util.c
75740 @@ -1000,7 +1000,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
75741 drv_set_coverage_class(local, hw->wiphy->coverage_class);
75742
75743 /* everything else happens only if HW was up & running */
75744 - if (!local->open_count)
75745 + if (!local_read(&local->open_count))
75746 goto wake_up;
75747
75748 /*
75749 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
75750 index d5597b7..ab6d39c 100644
75751 --- a/net/netfilter/Kconfig
75752 +++ b/net/netfilter/Kconfig
75753 @@ -779,6 +779,16 @@ config NETFILTER_XT_MATCH_ESP
75754
75755 To compile it as a module, choose M here. If unsure, say N.
75756
75757 +config NETFILTER_XT_MATCH_GRADM
75758 + tristate '"gradm" match support'
75759 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
75760 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
75761 + ---help---
75762 + The gradm match allows to match on grsecurity RBAC being enabled.
75763 + It is useful when iptables rules are applied early on bootup to
75764 + prevent connections to the machine (except from a trusted host)
75765 + while the RBAC system is disabled.
75766 +
75767 config NETFILTER_XT_MATCH_HASHLIMIT
75768 tristate '"hashlimit" match support'
75769 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
75770 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
75771 index 1a02853..5d8c22e 100644
75772 --- a/net/netfilter/Makefile
75773 +++ b/net/netfilter/Makefile
75774 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
75775 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
75776 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
75777 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
75778 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
75779 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
75780 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
75781 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
75782 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
75783 index 29fa5ba..8debc79 100644
75784 --- a/net/netfilter/ipvs/ip_vs_conn.c
75785 +++ b/net/netfilter/ipvs/ip_vs_conn.c
75786 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
75787 /* Increase the refcnt counter of the dest */
75788 atomic_inc(&dest->refcnt);
75789
75790 - conn_flags = atomic_read(&dest->conn_flags);
75791 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
75792 if (cp->protocol != IPPROTO_UDP)
75793 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
75794 /* Bind with the destination and its corresponding transmitter */
75795 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
75796 atomic_set(&cp->refcnt, 1);
75797
75798 atomic_set(&cp->n_control, 0);
75799 - atomic_set(&cp->in_pkts, 0);
75800 + atomic_set_unchecked(&cp->in_pkts, 0);
75801
75802 atomic_inc(&ipvs->conn_count);
75803 if (flags & IP_VS_CONN_F_NO_CPORT)
75804 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
75805
75806 /* Don't drop the entry if its number of incoming packets is not
75807 located in [0, 8] */
75808 - i = atomic_read(&cp->in_pkts);
75809 + i = atomic_read_unchecked(&cp->in_pkts);
75810 if (i > 8 || i < 0) return 0;
75811
75812 if (!todrop_rate[i]) return 0;
75813 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
75814 index 6dc7d7d..e45913a 100644
75815 --- a/net/netfilter/ipvs/ip_vs_core.c
75816 +++ b/net/netfilter/ipvs/ip_vs_core.c
75817 @@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
75818 ret = cp->packet_xmit(skb, cp, pd->pp);
75819 /* do not touch skb anymore */
75820
75821 - atomic_inc(&cp->in_pkts);
75822 + atomic_inc_unchecked(&cp->in_pkts);
75823 ip_vs_conn_put(cp);
75824 return ret;
75825 }
75826 @@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
75827 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
75828 pkts = sysctl_sync_threshold(ipvs);
75829 else
75830 - pkts = atomic_add_return(1, &cp->in_pkts);
75831 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
75832
75833 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
75834 cp->protocol == IPPROTO_SCTP) {
75835 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
75836 index e1a66cf..0910076 100644
75837 --- a/net/netfilter/ipvs/ip_vs_ctl.c
75838 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
75839 @@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
75840 ip_vs_rs_hash(ipvs, dest);
75841 write_unlock_bh(&ipvs->rs_lock);
75842 }
75843 - atomic_set(&dest->conn_flags, conn_flags);
75844 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
75845
75846 /* bind the service */
75847 if (!dest->svc) {
75848 @@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
75849 " %-7s %-6d %-10d %-10d\n",
75850 &dest->addr.in6,
75851 ntohs(dest->port),
75852 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
75853 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
75854 atomic_read(&dest->weight),
75855 atomic_read(&dest->activeconns),
75856 atomic_read(&dest->inactconns));
75857 @@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
75858 "%-7s %-6d %-10d %-10d\n",
75859 ntohl(dest->addr.ip),
75860 ntohs(dest->port),
75861 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
75862 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
75863 atomic_read(&dest->weight),
75864 atomic_read(&dest->activeconns),
75865 atomic_read(&dest->inactconns));
75866 @@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
75867
75868 entry.addr = dest->addr.ip;
75869 entry.port = dest->port;
75870 - entry.conn_flags = atomic_read(&dest->conn_flags);
75871 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
75872 entry.weight = atomic_read(&dest->weight);
75873 entry.u_threshold = dest->u_threshold;
75874 entry.l_threshold = dest->l_threshold;
75875 @@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
75876 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
75877
75878 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
75879 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
75880 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
75881 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
75882 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
75883 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
75884 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
75885 index 2b6678c0..aaa41fc 100644
75886 --- a/net/netfilter/ipvs/ip_vs_sync.c
75887 +++ b/net/netfilter/ipvs/ip_vs_sync.c
75888 @@ -649,7 +649,7 @@ control:
75889 * i.e only increment in_pkts for Templates.
75890 */
75891 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
75892 - int pkts = atomic_add_return(1, &cp->in_pkts);
75893 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
75894
75895 if (pkts % sysctl_sync_period(ipvs) != 1)
75896 return;
75897 @@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
75898
75899 if (opt)
75900 memcpy(&cp->in_seq, opt, sizeof(*opt));
75901 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
75902 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
75903 cp->state = state;
75904 cp->old_state = cp->state;
75905 /*
75906 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
75907 index aa2d720..d8aa111 100644
75908 --- a/net/netfilter/ipvs/ip_vs_xmit.c
75909 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
75910 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
75911 else
75912 rc = NF_ACCEPT;
75913 /* do not touch skb anymore */
75914 - atomic_inc(&cp->in_pkts);
75915 + atomic_inc_unchecked(&cp->in_pkts);
75916 goto out;
75917 }
75918
75919 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
75920 else
75921 rc = NF_ACCEPT;
75922 /* do not touch skb anymore */
75923 - atomic_inc(&cp->in_pkts);
75924 + atomic_inc_unchecked(&cp->in_pkts);
75925 goto out;
75926 }
75927
75928 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
75929 index 66b2c54..c7884e3 100644
75930 --- a/net/netfilter/nfnetlink_log.c
75931 +++ b/net/netfilter/nfnetlink_log.c
75932 @@ -70,7 +70,7 @@ struct nfulnl_instance {
75933 };
75934
75935 static DEFINE_SPINLOCK(instances_lock);
75936 -static atomic_t global_seq;
75937 +static atomic_unchecked_t global_seq;
75938
75939 #define INSTANCE_BUCKETS 16
75940 static struct hlist_head instance_table[INSTANCE_BUCKETS];
75941 @@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
75942 /* global sequence number */
75943 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
75944 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
75945 - htonl(atomic_inc_return(&global_seq)));
75946 + htonl(atomic_inc_return_unchecked(&global_seq)));
75947
75948 if (data_len) {
75949 struct nlattr *nla;
75950 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
75951 new file mode 100644
75952 index 0000000..6905327
75953 --- /dev/null
75954 +++ b/net/netfilter/xt_gradm.c
75955 @@ -0,0 +1,51 @@
75956 +/*
75957 + * gradm match for netfilter
75958 + * Copyright © Zbigniew Krzystolik, 2010
75959 + *
75960 + * This program is free software; you can redistribute it and/or modify
75961 + * it under the terms of the GNU General Public License; either version
75962 + * 2 or 3 as published by the Free Software Foundation.
75963 + */
75964 +#include <linux/module.h>
75965 +#include <linux/moduleparam.h>
75966 +#include <linux/skbuff.h>
75967 +#include <linux/netfilter/x_tables.h>
75968 +#include <linux/grsecurity.h>
75969 +#include <linux/netfilter/xt_gradm.h>
75970 +
75971 +static bool
75972 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
75973 +{
75974 + const struct xt_gradm_mtinfo *info = par->matchinfo;
75975 + bool retval = false;
75976 + if (gr_acl_is_enabled())
75977 + retval = true;
75978 + return retval ^ info->invflags;
75979 +}
75980 +
75981 +static struct xt_match gradm_mt_reg __read_mostly = {
75982 + .name = "gradm",
75983 + .revision = 0,
75984 + .family = NFPROTO_UNSPEC,
75985 + .match = gradm_mt,
75986 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
75987 + .me = THIS_MODULE,
75988 +};
75989 +
75990 +static int __init gradm_mt_init(void)
75991 +{
75992 + return xt_register_match(&gradm_mt_reg);
75993 +}
75994 +
75995 +static void __exit gradm_mt_exit(void)
75996 +{
75997 + xt_unregister_match(&gradm_mt_reg);
75998 +}
75999 +
76000 +module_init(gradm_mt_init);
76001 +module_exit(gradm_mt_exit);
76002 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
76003 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
76004 +MODULE_LICENSE("GPL");
76005 +MODULE_ALIAS("ipt_gradm");
76006 +MODULE_ALIAS("ip6t_gradm");
76007 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
76008 index 4fe4fb4..87a89e5 100644
76009 --- a/net/netfilter/xt_statistic.c
76010 +++ b/net/netfilter/xt_statistic.c
76011 @@ -19,7 +19,7 @@
76012 #include <linux/module.h>
76013
76014 struct xt_statistic_priv {
76015 - atomic_t count;
76016 + atomic_unchecked_t count;
76017 } ____cacheline_aligned_in_smp;
76018
76019 MODULE_LICENSE("GPL");
76020 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
76021 break;
76022 case XT_STATISTIC_MODE_NTH:
76023 do {
76024 - oval = atomic_read(&info->master->count);
76025 + oval = atomic_read_unchecked(&info->master->count);
76026 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
76027 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
76028 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
76029 if (nval == 0)
76030 ret = !ret;
76031 break;
76032 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
76033 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
76034 if (info->master == NULL)
76035 return -ENOMEM;
76036 - atomic_set(&info->master->count, info->u.nth.count);
76037 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
76038
76039 return 0;
76040 }
76041 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
76042 index 1201b6d..bcff8c6 100644
76043 --- a/net/netlink/af_netlink.c
76044 +++ b/net/netlink/af_netlink.c
76045 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock *sk)
76046 sk->sk_error_report(sk);
76047 }
76048 }
76049 - atomic_inc(&sk->sk_drops);
76050 + atomic_inc_unchecked(&sk->sk_drops);
76051 }
76052
76053 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
76054 @@ -1999,7 +1999,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
76055 sk_wmem_alloc_get(s),
76056 nlk->cb,
76057 atomic_read(&s->sk_refcnt),
76058 - atomic_read(&s->sk_drops),
76059 + atomic_read_unchecked(&s->sk_drops),
76060 sock_i_ino(s)
76061 );
76062
76063 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
76064 index 732152f..60bb09e 100644
76065 --- a/net/netrom/af_netrom.c
76066 +++ b/net/netrom/af_netrom.c
76067 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
76068 struct sock *sk = sock->sk;
76069 struct nr_sock *nr = nr_sk(sk);
76070
76071 + memset(sax, 0, sizeof(*sax));
76072 lock_sock(sk);
76073 if (peer != 0) {
76074 if (sk->sk_state != TCP_ESTABLISHED) {
76075 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
76076 *uaddr_len = sizeof(struct full_sockaddr_ax25);
76077 } else {
76078 sax->fsa_ax25.sax25_family = AF_NETROM;
76079 - sax->fsa_ax25.sax25_ndigis = 0;
76080 sax->fsa_ax25.sax25_call = nr->source_addr;
76081 *uaddr_len = sizeof(struct sockaddr_ax25);
76082 }
76083 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
76084 index d9d4970..d5a6a68 100644
76085 --- a/net/packet/af_packet.c
76086 +++ b/net/packet/af_packet.c
76087 @@ -1675,7 +1675,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
76088
76089 spin_lock(&sk->sk_receive_queue.lock);
76090 po->stats.tp_packets++;
76091 - skb->dropcount = atomic_read(&sk->sk_drops);
76092 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
76093 __skb_queue_tail(&sk->sk_receive_queue, skb);
76094 spin_unlock(&sk->sk_receive_queue.lock);
76095 sk->sk_data_ready(sk, skb->len);
76096 @@ -1684,7 +1684,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
76097 drop_n_acct:
76098 spin_lock(&sk->sk_receive_queue.lock);
76099 po->stats.tp_drops++;
76100 - atomic_inc(&sk->sk_drops);
76101 + atomic_inc_unchecked(&sk->sk_drops);
76102 spin_unlock(&sk->sk_receive_queue.lock);
76103
76104 drop_n_restore:
76105 @@ -3266,7 +3266,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
76106 case PACKET_HDRLEN:
76107 if (len > sizeof(int))
76108 len = sizeof(int);
76109 - if (copy_from_user(&val, optval, len))
76110 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
76111 return -EFAULT;
76112 switch (val) {
76113 case TPACKET_V1:
76114 @@ -3316,7 +3316,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
76115
76116 if (put_user(len, optlen))
76117 return -EFAULT;
76118 - if (copy_to_user(optval, data, len))
76119 + if (len > sizeof(st) || copy_to_user(optval, data, len))
76120 return -EFAULT;
76121 return 0;
76122 }
76123 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
76124 index d65f699..05aa6ce 100644
76125 --- a/net/phonet/af_phonet.c
76126 +++ b/net/phonet/af_phonet.c
76127 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
76128 {
76129 struct phonet_protocol *pp;
76130
76131 - if (protocol >= PHONET_NPROTO)
76132 + if (protocol < 0 || protocol >= PHONET_NPROTO)
76133 return NULL;
76134
76135 rcu_read_lock();
76136 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
76137 {
76138 int err = 0;
76139
76140 - if (protocol >= PHONET_NPROTO)
76141 + if (protocol < 0 || protocol >= PHONET_NPROTO)
76142 return -EINVAL;
76143
76144 err = proto_register(pp->prot, 1);
76145 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
76146 index 2ba6e9f..409573f 100644
76147 --- a/net/phonet/pep.c
76148 +++ b/net/phonet/pep.c
76149 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
76150
76151 case PNS_PEP_CTRL_REQ:
76152 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
76153 - atomic_inc(&sk->sk_drops);
76154 + atomic_inc_unchecked(&sk->sk_drops);
76155 break;
76156 }
76157 __skb_pull(skb, 4);
76158 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
76159 }
76160
76161 if (pn->rx_credits == 0) {
76162 - atomic_inc(&sk->sk_drops);
76163 + atomic_inc_unchecked(&sk->sk_drops);
76164 err = -ENOBUFS;
76165 break;
76166 }
76167 @@ -557,7 +557,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
76168 }
76169
76170 if (pn->rx_credits == 0) {
76171 - atomic_inc(&sk->sk_drops);
76172 + atomic_inc_unchecked(&sk->sk_drops);
76173 err = NET_RX_DROP;
76174 break;
76175 }
76176 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
76177 index 4c7eff3..59c727f 100644
76178 --- a/net/phonet/socket.c
76179 +++ b/net/phonet/socket.c
76180 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
76181 pn->resource, sk->sk_state,
76182 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
76183 sock_i_uid(sk), sock_i_ino(sk),
76184 - atomic_read(&sk->sk_refcnt), sk,
76185 - atomic_read(&sk->sk_drops), &len);
76186 + atomic_read(&sk->sk_refcnt),
76187 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76188 + NULL,
76189 +#else
76190 + sk,
76191 +#endif
76192 + atomic_read_unchecked(&sk->sk_drops), &len);
76193 }
76194 seq_printf(seq, "%*s\n", 127 - len, "");
76195 return 0;
76196 diff --git a/net/rds/cong.c b/net/rds/cong.c
76197 index e5b65ac..f3b6fb7 100644
76198 --- a/net/rds/cong.c
76199 +++ b/net/rds/cong.c
76200 @@ -78,7 +78,7 @@
76201 * finds that the saved generation number is smaller than the global generation
76202 * number, it wakes up the process.
76203 */
76204 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
76205 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
76206
76207 /*
76208 * Congestion monitoring
76209 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
76210 rdsdebug("waking map %p for %pI4\n",
76211 map, &map->m_addr);
76212 rds_stats_inc(s_cong_update_received);
76213 - atomic_inc(&rds_cong_generation);
76214 + atomic_inc_unchecked(&rds_cong_generation);
76215 if (waitqueue_active(&map->m_waitq))
76216 wake_up(&map->m_waitq);
76217 if (waitqueue_active(&rds_poll_waitq))
76218 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
76219
76220 int rds_cong_updated_since(unsigned long *recent)
76221 {
76222 - unsigned long gen = atomic_read(&rds_cong_generation);
76223 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
76224
76225 if (likely(*recent == gen))
76226 return 0;
76227 diff --git a/net/rds/ib.h b/net/rds/ib.h
76228 index edfaaaf..8c89879 100644
76229 --- a/net/rds/ib.h
76230 +++ b/net/rds/ib.h
76231 @@ -128,7 +128,7 @@ struct rds_ib_connection {
76232 /* sending acks */
76233 unsigned long i_ack_flags;
76234 #ifdef KERNEL_HAS_ATOMIC64
76235 - atomic64_t i_ack_next; /* next ACK to send */
76236 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
76237 #else
76238 spinlock_t i_ack_lock; /* protect i_ack_next */
76239 u64 i_ack_next; /* next ACK to send */
76240 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
76241 index 51c8689..36c555f 100644
76242 --- a/net/rds/ib_cm.c
76243 +++ b/net/rds/ib_cm.c
76244 @@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
76245 /* Clear the ACK state */
76246 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
76247 #ifdef KERNEL_HAS_ATOMIC64
76248 - atomic64_set(&ic->i_ack_next, 0);
76249 + atomic64_set_unchecked(&ic->i_ack_next, 0);
76250 #else
76251 ic->i_ack_next = 0;
76252 #endif
76253 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
76254 index e29e0ca..fa3a6a3 100644
76255 --- a/net/rds/ib_recv.c
76256 +++ b/net/rds/ib_recv.c
76257 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
76258 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
76259 int ack_required)
76260 {
76261 - atomic64_set(&ic->i_ack_next, seq);
76262 + atomic64_set_unchecked(&ic->i_ack_next, seq);
76263 if (ack_required) {
76264 smp_mb__before_clear_bit();
76265 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76266 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
76267 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76268 smp_mb__after_clear_bit();
76269
76270 - return atomic64_read(&ic->i_ack_next);
76271 + return atomic64_read_unchecked(&ic->i_ack_next);
76272 }
76273 #endif
76274
76275 diff --git a/net/rds/iw.h b/net/rds/iw.h
76276 index 04ce3b1..48119a6 100644
76277 --- a/net/rds/iw.h
76278 +++ b/net/rds/iw.h
76279 @@ -134,7 +134,7 @@ struct rds_iw_connection {
76280 /* sending acks */
76281 unsigned long i_ack_flags;
76282 #ifdef KERNEL_HAS_ATOMIC64
76283 - atomic64_t i_ack_next; /* next ACK to send */
76284 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
76285 #else
76286 spinlock_t i_ack_lock; /* protect i_ack_next */
76287 u64 i_ack_next; /* next ACK to send */
76288 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
76289 index 9556d28..f046d0e 100644
76290 --- a/net/rds/iw_cm.c
76291 +++ b/net/rds/iw_cm.c
76292 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
76293 /* Clear the ACK state */
76294 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
76295 #ifdef KERNEL_HAS_ATOMIC64
76296 - atomic64_set(&ic->i_ack_next, 0);
76297 + atomic64_set_unchecked(&ic->i_ack_next, 0);
76298 #else
76299 ic->i_ack_next = 0;
76300 #endif
76301 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
76302 index 5e57347..3916042 100644
76303 --- a/net/rds/iw_recv.c
76304 +++ b/net/rds/iw_recv.c
76305 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
76306 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
76307 int ack_required)
76308 {
76309 - atomic64_set(&ic->i_ack_next, seq);
76310 + atomic64_set_unchecked(&ic->i_ack_next, seq);
76311 if (ack_required) {
76312 smp_mb__before_clear_bit();
76313 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76314 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
76315 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76316 smp_mb__after_clear_bit();
76317
76318 - return atomic64_read(&ic->i_ack_next);
76319 + return atomic64_read_unchecked(&ic->i_ack_next);
76320 }
76321 #endif
76322
76323 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
76324 index edac9ef..16bcb98 100644
76325 --- a/net/rds/tcp.c
76326 +++ b/net/rds/tcp.c
76327 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
76328 int val = 1;
76329
76330 set_fs(KERNEL_DS);
76331 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
76332 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
76333 sizeof(val));
76334 set_fs(oldfs);
76335 }
76336 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
76337 index 1b4fd68..2234175 100644
76338 --- a/net/rds/tcp_send.c
76339 +++ b/net/rds/tcp_send.c
76340 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
76341
76342 oldfs = get_fs();
76343 set_fs(KERNEL_DS);
76344 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
76345 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
76346 sizeof(val));
76347 set_fs(oldfs);
76348 }
76349 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
76350 index 74c064c..fdec26f 100644
76351 --- a/net/rxrpc/af_rxrpc.c
76352 +++ b/net/rxrpc/af_rxrpc.c
76353 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
76354 __be32 rxrpc_epoch;
76355
76356 /* current debugging ID */
76357 -atomic_t rxrpc_debug_id;
76358 +atomic_unchecked_t rxrpc_debug_id;
76359
76360 /* count of skbs currently in use */
76361 atomic_t rxrpc_n_skbs;
76362 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
76363 index f99cfce..cc529dd 100644
76364 --- a/net/rxrpc/ar-ack.c
76365 +++ b/net/rxrpc/ar-ack.c
76366 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
76367
76368 _enter("{%d,%d,%d,%d},",
76369 call->acks_hard, call->acks_unacked,
76370 - atomic_read(&call->sequence),
76371 + atomic_read_unchecked(&call->sequence),
76372 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
76373
76374 stop = 0;
76375 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
76376
76377 /* each Tx packet has a new serial number */
76378 sp->hdr.serial =
76379 - htonl(atomic_inc_return(&call->conn->serial));
76380 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
76381
76382 hdr = (struct rxrpc_header *) txb->head;
76383 hdr->serial = sp->hdr.serial;
76384 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
76385 */
76386 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
76387 {
76388 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
76389 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
76390 }
76391
76392 /*
76393 @@ -629,7 +629,7 @@ process_further:
76394
76395 latest = ntohl(sp->hdr.serial);
76396 hard = ntohl(ack.firstPacket);
76397 - tx = atomic_read(&call->sequence);
76398 + tx = atomic_read_unchecked(&call->sequence);
76399
76400 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
76401 latest,
76402 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
76403 goto maybe_reschedule;
76404
76405 send_ACK_with_skew:
76406 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
76407 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
76408 ntohl(ack.serial));
76409 send_ACK:
76410 mtu = call->conn->trans->peer->if_mtu;
76411 @@ -1173,7 +1173,7 @@ send_ACK:
76412 ackinfo.rxMTU = htonl(5692);
76413 ackinfo.jumbo_max = htonl(4);
76414
76415 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
76416 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
76417 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
76418 ntohl(hdr.serial),
76419 ntohs(ack.maxSkew),
76420 @@ -1191,7 +1191,7 @@ send_ACK:
76421 send_message:
76422 _debug("send message");
76423
76424 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
76425 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
76426 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
76427 send_message_2:
76428
76429 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
76430 index bf656c2..48f9d27 100644
76431 --- a/net/rxrpc/ar-call.c
76432 +++ b/net/rxrpc/ar-call.c
76433 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
76434 spin_lock_init(&call->lock);
76435 rwlock_init(&call->state_lock);
76436 atomic_set(&call->usage, 1);
76437 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
76438 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76439 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
76440
76441 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
76442 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
76443 index 4106ca9..a338d7a 100644
76444 --- a/net/rxrpc/ar-connection.c
76445 +++ b/net/rxrpc/ar-connection.c
76446 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
76447 rwlock_init(&conn->lock);
76448 spin_lock_init(&conn->state_lock);
76449 atomic_set(&conn->usage, 1);
76450 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
76451 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76452 conn->avail_calls = RXRPC_MAXCALLS;
76453 conn->size_align = 4;
76454 conn->header_size = sizeof(struct rxrpc_header);
76455 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
76456 index e7ed43a..6afa140 100644
76457 --- a/net/rxrpc/ar-connevent.c
76458 +++ b/net/rxrpc/ar-connevent.c
76459 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
76460
76461 len = iov[0].iov_len + iov[1].iov_len;
76462
76463 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
76464 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
76465 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
76466
76467 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
76468 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
76469 index 1a2b0633..e8d1382 100644
76470 --- a/net/rxrpc/ar-input.c
76471 +++ b/net/rxrpc/ar-input.c
76472 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
76473 /* track the latest serial number on this connection for ACK packet
76474 * information */
76475 serial = ntohl(sp->hdr.serial);
76476 - hi_serial = atomic_read(&call->conn->hi_serial);
76477 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
76478 while (serial > hi_serial)
76479 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
76480 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
76481 serial);
76482
76483 /* request ACK generation for any ACK or DATA packet that requests
76484 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
76485 index 8e22bd3..f66d1c0 100644
76486 --- a/net/rxrpc/ar-internal.h
76487 +++ b/net/rxrpc/ar-internal.h
76488 @@ -272,8 +272,8 @@ struct rxrpc_connection {
76489 int error; /* error code for local abort */
76490 int debug_id; /* debug ID for printks */
76491 unsigned call_counter; /* call ID counter */
76492 - atomic_t serial; /* packet serial number counter */
76493 - atomic_t hi_serial; /* highest serial number received */
76494 + atomic_unchecked_t serial; /* packet serial number counter */
76495 + atomic_unchecked_t hi_serial; /* highest serial number received */
76496 u8 avail_calls; /* number of calls available */
76497 u8 size_align; /* data size alignment (for security) */
76498 u8 header_size; /* rxrpc + security header size */
76499 @@ -346,7 +346,7 @@ struct rxrpc_call {
76500 spinlock_t lock;
76501 rwlock_t state_lock; /* lock for state transition */
76502 atomic_t usage;
76503 - atomic_t sequence; /* Tx data packet sequence counter */
76504 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
76505 u32 abort_code; /* local/remote abort code */
76506 enum { /* current state of call */
76507 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
76508 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
76509 */
76510 extern atomic_t rxrpc_n_skbs;
76511 extern __be32 rxrpc_epoch;
76512 -extern atomic_t rxrpc_debug_id;
76513 +extern atomic_unchecked_t rxrpc_debug_id;
76514 extern struct workqueue_struct *rxrpc_workqueue;
76515
76516 /*
76517 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
76518 index 87f7135..74d3703 100644
76519 --- a/net/rxrpc/ar-local.c
76520 +++ b/net/rxrpc/ar-local.c
76521 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
76522 spin_lock_init(&local->lock);
76523 rwlock_init(&local->services_lock);
76524 atomic_set(&local->usage, 1);
76525 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
76526 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76527 memcpy(&local->srx, srx, sizeof(*srx));
76528 }
76529
76530 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
76531 index 338d793..47391d0 100644
76532 --- a/net/rxrpc/ar-output.c
76533 +++ b/net/rxrpc/ar-output.c
76534 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
76535 sp->hdr.cid = call->cid;
76536 sp->hdr.callNumber = call->call_id;
76537 sp->hdr.seq =
76538 - htonl(atomic_inc_return(&call->sequence));
76539 + htonl(atomic_inc_return_unchecked(&call->sequence));
76540 sp->hdr.serial =
76541 - htonl(atomic_inc_return(&conn->serial));
76542 + htonl(atomic_inc_return_unchecked(&conn->serial));
76543 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
76544 sp->hdr.userStatus = 0;
76545 sp->hdr.securityIndex = conn->security_ix;
76546 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
76547 index 2754f09..b20e38f 100644
76548 --- a/net/rxrpc/ar-peer.c
76549 +++ b/net/rxrpc/ar-peer.c
76550 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
76551 INIT_LIST_HEAD(&peer->error_targets);
76552 spin_lock_init(&peer->lock);
76553 atomic_set(&peer->usage, 1);
76554 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
76555 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76556 memcpy(&peer->srx, srx, sizeof(*srx));
76557
76558 rxrpc_assess_MTU_size(peer);
76559 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
76560 index 38047f7..9f48511 100644
76561 --- a/net/rxrpc/ar-proc.c
76562 +++ b/net/rxrpc/ar-proc.c
76563 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
76564 atomic_read(&conn->usage),
76565 rxrpc_conn_states[conn->state],
76566 key_serial(conn->key),
76567 - atomic_read(&conn->serial),
76568 - atomic_read(&conn->hi_serial));
76569 + atomic_read_unchecked(&conn->serial),
76570 + atomic_read_unchecked(&conn->hi_serial));
76571
76572 return 0;
76573 }
76574 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
76575 index 92df566..87ec1bf 100644
76576 --- a/net/rxrpc/ar-transport.c
76577 +++ b/net/rxrpc/ar-transport.c
76578 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
76579 spin_lock_init(&trans->client_lock);
76580 rwlock_init(&trans->conn_lock);
76581 atomic_set(&trans->usage, 1);
76582 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
76583 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76584
76585 if (peer->srx.transport.family == AF_INET) {
76586 switch (peer->srx.transport_type) {
76587 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
76588 index 7635107..4670276 100644
76589 --- a/net/rxrpc/rxkad.c
76590 +++ b/net/rxrpc/rxkad.c
76591 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
76592
76593 len = iov[0].iov_len + iov[1].iov_len;
76594
76595 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
76596 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
76597 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
76598
76599 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
76600 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
76601
76602 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
76603
76604 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
76605 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
76606 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
76607
76608 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
76609 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
76610 index 1e2eee8..ce3967e 100644
76611 --- a/net/sctp/proc.c
76612 +++ b/net/sctp/proc.c
76613 @@ -319,7 +319,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
76614 seq_printf(seq,
76615 "%8pK %8pK %-3d %-3d %-2d %-4d "
76616 "%4d %8d %8d %7d %5lu %-5d %5d ",
76617 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
76618 + assoc, sk,
76619 + sctp_sk(sk)->type, sk->sk_state,
76620 assoc->state, hash,
76621 assoc->assoc_id,
76622 assoc->sndbuf_used,
76623 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
76624 index 54a7cd2..944edae 100644
76625 --- a/net/sctp/socket.c
76626 +++ b/net/sctp/socket.c
76627 @@ -4574,7 +4574,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
76628 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
76629 if (space_left < addrlen)
76630 return -ENOMEM;
76631 - if (copy_to_user(to, &temp, addrlen))
76632 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
76633 return -EFAULT;
76634 to += addrlen;
76635 cnt++;
76636 diff --git a/net/socket.c b/net/socket.c
76637 index 2dce67a..1e91168 100644
76638 --- a/net/socket.c
76639 +++ b/net/socket.c
76640 @@ -88,6 +88,7 @@
76641 #include <linux/nsproxy.h>
76642 #include <linux/magic.h>
76643 #include <linux/slab.h>
76644 +#include <linux/in.h>
76645
76646 #include <asm/uaccess.h>
76647 #include <asm/unistd.h>
76648 @@ -105,6 +106,8 @@
76649 #include <linux/sockios.h>
76650 #include <linux/atalk.h>
76651
76652 +#include <linux/grsock.h>
76653 +
76654 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
76655 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
76656 unsigned long nr_segs, loff_t pos);
76657 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
76658 &sockfs_dentry_operations, SOCKFS_MAGIC);
76659 }
76660
76661 -static struct vfsmount *sock_mnt __read_mostly;
76662 +struct vfsmount *sock_mnt __read_mostly;
76663
76664 static struct file_system_type sock_fs_type = {
76665 .name = "sockfs",
76666 @@ -1187,6 +1190,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
76667 return -EAFNOSUPPORT;
76668 if (type < 0 || type >= SOCK_MAX)
76669 return -EINVAL;
76670 + if (protocol < 0)
76671 + return -EINVAL;
76672
76673 /* Compatibility.
76674
76675 @@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
76676 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
76677 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
76678
76679 + if(!gr_search_socket(family, type, protocol)) {
76680 + retval = -EACCES;
76681 + goto out;
76682 + }
76683 +
76684 + if (gr_handle_sock_all(family, type, protocol)) {
76685 + retval = -EACCES;
76686 + goto out;
76687 + }
76688 +
76689 retval = sock_create(family, type, protocol, &sock);
76690 if (retval < 0)
76691 goto out;
76692 @@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
76693 if (sock) {
76694 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
76695 if (err >= 0) {
76696 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
76697 + err = -EACCES;
76698 + goto error;
76699 + }
76700 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
76701 + if (err)
76702 + goto error;
76703 +
76704 err = security_socket_bind(sock,
76705 (struct sockaddr *)&address,
76706 addrlen);
76707 @@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
76708 (struct sockaddr *)
76709 &address, addrlen);
76710 }
76711 +error:
76712 fput_light(sock->file, fput_needed);
76713 }
76714 return err;
76715 @@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
76716 if ((unsigned)backlog > somaxconn)
76717 backlog = somaxconn;
76718
76719 + if (gr_handle_sock_server_other(sock->sk)) {
76720 + err = -EPERM;
76721 + goto error;
76722 + }
76723 +
76724 + err = gr_search_listen(sock);
76725 + if (err)
76726 + goto error;
76727 +
76728 err = security_socket_listen(sock, backlog);
76729 if (!err)
76730 err = sock->ops->listen(sock, backlog);
76731
76732 +error:
76733 fput_light(sock->file, fput_needed);
76734 }
76735 return err;
76736 @@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
76737 newsock->type = sock->type;
76738 newsock->ops = sock->ops;
76739
76740 + if (gr_handle_sock_server_other(sock->sk)) {
76741 + err = -EPERM;
76742 + sock_release(newsock);
76743 + goto out_put;
76744 + }
76745 +
76746 + err = gr_search_accept(sock);
76747 + if (err) {
76748 + sock_release(newsock);
76749 + goto out_put;
76750 + }
76751 +
76752 /*
76753 * We don't need try_module_get here, as the listening socket (sock)
76754 * has the protocol module (sock->ops->owner) held.
76755 @@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
76756 fd_install(newfd, newfile);
76757 err = newfd;
76758
76759 + gr_attach_curr_ip(newsock->sk);
76760 +
76761 out_put:
76762 fput_light(sock->file, fput_needed);
76763 out:
76764 @@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
76765 int, addrlen)
76766 {
76767 struct socket *sock;
76768 + struct sockaddr *sck;
76769 struct sockaddr_storage address;
76770 int err, fput_needed;
76771
76772 @@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
76773 if (err < 0)
76774 goto out_put;
76775
76776 + sck = (struct sockaddr *)&address;
76777 +
76778 + if (gr_handle_sock_client(sck)) {
76779 + err = -EACCES;
76780 + goto out_put;
76781 + }
76782 +
76783 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
76784 + if (err)
76785 + goto out_put;
76786 +
76787 err =
76788 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
76789 if (err)
76790 @@ -1950,7 +2010,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
76791 * checking falls down on this.
76792 */
76793 if (copy_from_user(ctl_buf,
76794 - (void __user __force *)msg_sys->msg_control,
76795 + (void __force_user *)msg_sys->msg_control,
76796 ctl_len))
76797 goto out_freectl;
76798 msg_sys->msg_control = ctl_buf;
76799 @@ -2120,7 +2180,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
76800 * kernel msghdr to use the kernel address space)
76801 */
76802
76803 - uaddr = (__force void __user *)msg_sys->msg_name;
76804 + uaddr = (void __force_user *)msg_sys->msg_name;
76805 uaddr_len = COMPAT_NAMELEN(msg);
76806 if (MSG_CMSG_COMPAT & flags) {
76807 err = verify_compat_iovec(msg_sys, iov,
76808 @@ -2748,7 +2808,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
76809 }
76810
76811 ifr = compat_alloc_user_space(buf_size);
76812 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
76813 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
76814
76815 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
76816 return -EFAULT;
76817 @@ -2772,12 +2832,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
76818 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
76819
76820 if (copy_in_user(rxnfc, compat_rxnfc,
76821 - (void *)(&rxnfc->fs.m_ext + 1) -
76822 - (void *)rxnfc) ||
76823 + (void __user *)(&rxnfc->fs.m_ext + 1) -
76824 + (void __user *)rxnfc) ||
76825 copy_in_user(&rxnfc->fs.ring_cookie,
76826 &compat_rxnfc->fs.ring_cookie,
76827 - (void *)(&rxnfc->fs.location + 1) -
76828 - (void *)&rxnfc->fs.ring_cookie) ||
76829 + (void __user *)(&rxnfc->fs.location + 1) -
76830 + (void __user *)&rxnfc->fs.ring_cookie) ||
76831 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
76832 sizeof(rxnfc->rule_cnt)))
76833 return -EFAULT;
76834 @@ -2789,12 +2849,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
76835
76836 if (convert_out) {
76837 if (copy_in_user(compat_rxnfc, rxnfc,
76838 - (const void *)(&rxnfc->fs.m_ext + 1) -
76839 - (const void *)rxnfc) ||
76840 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
76841 + (const void __user *)rxnfc) ||
76842 copy_in_user(&compat_rxnfc->fs.ring_cookie,
76843 &rxnfc->fs.ring_cookie,
76844 - (const void *)(&rxnfc->fs.location + 1) -
76845 - (const void *)&rxnfc->fs.ring_cookie) ||
76846 + (const void __user *)(&rxnfc->fs.location + 1) -
76847 + (const void __user *)&rxnfc->fs.ring_cookie) ||
76848 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
76849 sizeof(rxnfc->rule_cnt)))
76850 return -EFAULT;
76851 @@ -2864,7 +2924,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
76852 old_fs = get_fs();
76853 set_fs(KERNEL_DS);
76854 err = dev_ioctl(net, cmd,
76855 - (struct ifreq __user __force *) &kifr);
76856 + (struct ifreq __force_user *) &kifr);
76857 set_fs(old_fs);
76858
76859 return err;
76860 @@ -2973,7 +3033,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
76861
76862 old_fs = get_fs();
76863 set_fs(KERNEL_DS);
76864 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
76865 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
76866 set_fs(old_fs);
76867
76868 if (cmd == SIOCGIFMAP && !err) {
76869 @@ -3078,7 +3138,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
76870 ret |= __get_user(rtdev, &(ur4->rt_dev));
76871 if (rtdev) {
76872 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
76873 - r4.rt_dev = (char __user __force *)devname;
76874 + r4.rt_dev = (char __force_user *)devname;
76875 devname[15] = 0;
76876 } else
76877 r4.rt_dev = NULL;
76878 @@ -3318,8 +3378,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
76879 int __user *uoptlen;
76880 int err;
76881
76882 - uoptval = (char __user __force *) optval;
76883 - uoptlen = (int __user __force *) optlen;
76884 + uoptval = (char __force_user *) optval;
76885 + uoptlen = (int __force_user *) optlen;
76886
76887 set_fs(KERNEL_DS);
76888 if (level == SOL_SOCKET)
76889 @@ -3339,7 +3399,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
76890 char __user *uoptval;
76891 int err;
76892
76893 - uoptval = (char __user __force *) optval;
76894 + uoptval = (char __force_user *) optval;
76895
76896 set_fs(KERNEL_DS);
76897 if (level == SOL_SOCKET)
76898 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
76899 index 00a1a2a..6a0138a 100644
76900 --- a/net/sunrpc/sched.c
76901 +++ b/net/sunrpc/sched.c
76902 @@ -238,9 +238,9 @@ static int rpc_wait_bit_killable(void *word)
76903 #ifdef RPC_DEBUG
76904 static void rpc_task_set_debuginfo(struct rpc_task *task)
76905 {
76906 - static atomic_t rpc_pid;
76907 + static atomic_unchecked_t rpc_pid;
76908
76909 - task->tk_pid = atomic_inc_return(&rpc_pid);
76910 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
76911 }
76912 #else
76913 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
76914 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
76915 index 71bed1c..5dff36d 100644
76916 --- a/net/sunrpc/svcsock.c
76917 +++ b/net/sunrpc/svcsock.c
76918 @@ -396,7 +396,7 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
76919 int buflen, unsigned int base)
76920 {
76921 size_t save_iovlen;
76922 - void __user *save_iovbase;
76923 + void *save_iovbase;
76924 unsigned int i;
76925 int ret;
76926
76927 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
76928 index 09af4fa..77110a9 100644
76929 --- a/net/sunrpc/xprtrdma/svc_rdma.c
76930 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
76931 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
76932 static unsigned int min_max_inline = 4096;
76933 static unsigned int max_max_inline = 65536;
76934
76935 -atomic_t rdma_stat_recv;
76936 -atomic_t rdma_stat_read;
76937 -atomic_t rdma_stat_write;
76938 -atomic_t rdma_stat_sq_starve;
76939 -atomic_t rdma_stat_rq_starve;
76940 -atomic_t rdma_stat_rq_poll;
76941 -atomic_t rdma_stat_rq_prod;
76942 -atomic_t rdma_stat_sq_poll;
76943 -atomic_t rdma_stat_sq_prod;
76944 +atomic_unchecked_t rdma_stat_recv;
76945 +atomic_unchecked_t rdma_stat_read;
76946 +atomic_unchecked_t rdma_stat_write;
76947 +atomic_unchecked_t rdma_stat_sq_starve;
76948 +atomic_unchecked_t rdma_stat_rq_starve;
76949 +atomic_unchecked_t rdma_stat_rq_poll;
76950 +atomic_unchecked_t rdma_stat_rq_prod;
76951 +atomic_unchecked_t rdma_stat_sq_poll;
76952 +atomic_unchecked_t rdma_stat_sq_prod;
76953
76954 /* Temporary NFS request map and context caches */
76955 struct kmem_cache *svc_rdma_map_cachep;
76956 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *table, int write,
76957 len -= *ppos;
76958 if (len > *lenp)
76959 len = *lenp;
76960 - if (len && copy_to_user(buffer, str_buf, len))
76961 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
76962 return -EFAULT;
76963 *lenp = len;
76964 *ppos += len;
76965 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] = {
76966 {
76967 .procname = "rdma_stat_read",
76968 .data = &rdma_stat_read,
76969 - .maxlen = sizeof(atomic_t),
76970 + .maxlen = sizeof(atomic_unchecked_t),
76971 .mode = 0644,
76972 .proc_handler = read_reset_stat,
76973 },
76974 {
76975 .procname = "rdma_stat_recv",
76976 .data = &rdma_stat_recv,
76977 - .maxlen = sizeof(atomic_t),
76978 + .maxlen = sizeof(atomic_unchecked_t),
76979 .mode = 0644,
76980 .proc_handler = read_reset_stat,
76981 },
76982 {
76983 .procname = "rdma_stat_write",
76984 .data = &rdma_stat_write,
76985 - .maxlen = sizeof(atomic_t),
76986 + .maxlen = sizeof(atomic_unchecked_t),
76987 .mode = 0644,
76988 .proc_handler = read_reset_stat,
76989 },
76990 {
76991 .procname = "rdma_stat_sq_starve",
76992 .data = &rdma_stat_sq_starve,
76993 - .maxlen = sizeof(atomic_t),
76994 + .maxlen = sizeof(atomic_unchecked_t),
76995 .mode = 0644,
76996 .proc_handler = read_reset_stat,
76997 },
76998 {
76999 .procname = "rdma_stat_rq_starve",
77000 .data = &rdma_stat_rq_starve,
77001 - .maxlen = sizeof(atomic_t),
77002 + .maxlen = sizeof(atomic_unchecked_t),
77003 .mode = 0644,
77004 .proc_handler = read_reset_stat,
77005 },
77006 {
77007 .procname = "rdma_stat_rq_poll",
77008 .data = &rdma_stat_rq_poll,
77009 - .maxlen = sizeof(atomic_t),
77010 + .maxlen = sizeof(atomic_unchecked_t),
77011 .mode = 0644,
77012 .proc_handler = read_reset_stat,
77013 },
77014 {
77015 .procname = "rdma_stat_rq_prod",
77016 .data = &rdma_stat_rq_prod,
77017 - .maxlen = sizeof(atomic_t),
77018 + .maxlen = sizeof(atomic_unchecked_t),
77019 .mode = 0644,
77020 .proc_handler = read_reset_stat,
77021 },
77022 {
77023 .procname = "rdma_stat_sq_poll",
77024 .data = &rdma_stat_sq_poll,
77025 - .maxlen = sizeof(atomic_t),
77026 + .maxlen = sizeof(atomic_unchecked_t),
77027 .mode = 0644,
77028 .proc_handler = read_reset_stat,
77029 },
77030 {
77031 .procname = "rdma_stat_sq_prod",
77032 .data = &rdma_stat_sq_prod,
77033 - .maxlen = sizeof(atomic_t),
77034 + .maxlen = sizeof(atomic_unchecked_t),
77035 .mode = 0644,
77036 .proc_handler = read_reset_stat,
77037 },
77038 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
77039 index df67211..c354b13 100644
77040 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
77041 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
77042 @@ -499,7 +499,7 @@ next_sge:
77043 svc_rdma_put_context(ctxt, 0);
77044 goto out;
77045 }
77046 - atomic_inc(&rdma_stat_read);
77047 + atomic_inc_unchecked(&rdma_stat_read);
77048
77049 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
77050 chl_map->ch[ch_no].count -= read_wr.num_sge;
77051 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
77052 dto_q);
77053 list_del_init(&ctxt->dto_q);
77054 } else {
77055 - atomic_inc(&rdma_stat_rq_starve);
77056 + atomic_inc_unchecked(&rdma_stat_rq_starve);
77057 clear_bit(XPT_DATA, &xprt->xpt_flags);
77058 ctxt = NULL;
77059 }
77060 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
77061 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
77062 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
77063 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
77064 - atomic_inc(&rdma_stat_recv);
77065 + atomic_inc_unchecked(&rdma_stat_recv);
77066
77067 /* Build up the XDR from the receive buffers. */
77068 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
77069 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
77070 index 249a835..fb2794b 100644
77071 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
77072 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
77073 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
77074 write_wr.wr.rdma.remote_addr = to;
77075
77076 /* Post It */
77077 - atomic_inc(&rdma_stat_write);
77078 + atomic_inc_unchecked(&rdma_stat_write);
77079 if (svc_rdma_send(xprt, &write_wr))
77080 goto err;
77081 return 0;
77082 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
77083 index ba1296d..0fec1a5 100644
77084 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
77085 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
77086 @@ -300,7 +300,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
77087 return;
77088
77089 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
77090 - atomic_inc(&rdma_stat_rq_poll);
77091 + atomic_inc_unchecked(&rdma_stat_rq_poll);
77092
77093 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
77094 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
77095 @@ -322,7 +322,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
77096 }
77097
77098 if (ctxt)
77099 - atomic_inc(&rdma_stat_rq_prod);
77100 + atomic_inc_unchecked(&rdma_stat_rq_prod);
77101
77102 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
77103 /*
77104 @@ -394,7 +394,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
77105 return;
77106
77107 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
77108 - atomic_inc(&rdma_stat_sq_poll);
77109 + atomic_inc_unchecked(&rdma_stat_sq_poll);
77110 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
77111 if (wc.status != IB_WC_SUCCESS)
77112 /* Close the transport */
77113 @@ -412,7 +412,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
77114 }
77115
77116 if (ctxt)
77117 - atomic_inc(&rdma_stat_sq_prod);
77118 + atomic_inc_unchecked(&rdma_stat_sq_prod);
77119 }
77120
77121 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
77122 @@ -1274,7 +1274,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
77123 spin_lock_bh(&xprt->sc_lock);
77124 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
77125 spin_unlock_bh(&xprt->sc_lock);
77126 - atomic_inc(&rdma_stat_sq_starve);
77127 + atomic_inc_unchecked(&rdma_stat_sq_starve);
77128
77129 /* See if we can opportunistically reap SQ WR to make room */
77130 sq_cq_reap(xprt);
77131 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
77132 index e758139..d29ea47 100644
77133 --- a/net/sysctl_net.c
77134 +++ b/net/sysctl_net.c
77135 @@ -47,7 +47,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
77136 struct ctl_table *table)
77137 {
77138 /* Allow network administrator to have same access as root. */
77139 - if (capable(CAP_NET_ADMIN)) {
77140 + if (capable_nolog(CAP_NET_ADMIN)) {
77141 int mode = (table->mode >> 6) & 7;
77142 return (mode << 6) | (mode << 3) | mode;
77143 }
77144 diff --git a/net/tipc/link.c b/net/tipc/link.c
77145 index ae98a72..7bb6056 100644
77146 --- a/net/tipc/link.c
77147 +++ b/net/tipc/link.c
77148 @@ -1203,7 +1203,7 @@ static int link_send_sections_long(struct tipc_port *sender,
77149 struct tipc_msg fragm_hdr;
77150 struct sk_buff *buf, *buf_chain, *prev;
77151 u32 fragm_crs, fragm_rest, hsz, sect_rest;
77152 - const unchar *sect_crs;
77153 + const unchar __user *sect_crs;
77154 int curr_sect;
77155 u32 fragm_no;
77156
77157 @@ -1247,7 +1247,7 @@ again:
77158
77159 if (!sect_rest) {
77160 sect_rest = msg_sect[++curr_sect].iov_len;
77161 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
77162 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
77163 }
77164
77165 if (sect_rest < fragm_rest)
77166 @@ -1266,7 +1266,7 @@ error:
77167 }
77168 } else
77169 skb_copy_to_linear_data_offset(buf, fragm_crs,
77170 - sect_crs, sz);
77171 + (const void __force_kernel *)sect_crs, sz);
77172 sect_crs += sz;
77173 sect_rest -= sz;
77174 fragm_crs += sz;
77175 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
77176 index 83d5096..dcba497 100644
77177 --- a/net/tipc/msg.c
77178 +++ b/net/tipc/msg.c
77179 @@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
77180 msg_sect[cnt].iov_len);
77181 else
77182 skb_copy_to_linear_data_offset(*buf, pos,
77183 - msg_sect[cnt].iov_base,
77184 + (const void __force_kernel *)msg_sect[cnt].iov_base,
77185 msg_sect[cnt].iov_len);
77186 pos += msg_sect[cnt].iov_len;
77187 }
77188 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
77189 index 1983717..4d6102c 100644
77190 --- a/net/tipc/subscr.c
77191 +++ b/net/tipc/subscr.c
77192 @@ -101,7 +101,7 @@ static void subscr_send_event(struct subscription *sub,
77193 {
77194 struct iovec msg_sect;
77195
77196 - msg_sect.iov_base = (void *)&sub->evt;
77197 + msg_sect.iov_base = (void __force_user *)&sub->evt;
77198 msg_sect.iov_len = sizeof(struct tipc_event);
77199
77200 sub->evt.event = htohl(event, sub->swap);
77201 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
77202 index d99678a..3514a21 100644
77203 --- a/net/unix/af_unix.c
77204 +++ b/net/unix/af_unix.c
77205 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(struct net *net,
77206 err = -ECONNREFUSED;
77207 if (!S_ISSOCK(inode->i_mode))
77208 goto put_fail;
77209 +
77210 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
77211 + err = -EACCES;
77212 + goto put_fail;
77213 + }
77214 +
77215 u = unix_find_socket_byinode(inode);
77216 if (!u)
77217 goto put_fail;
77218 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(struct net *net,
77219 if (u) {
77220 struct dentry *dentry;
77221 dentry = unix_sk(u)->dentry;
77222 +
77223 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
77224 + err = -EPERM;
77225 + sock_put(u);
77226 + goto fail;
77227 + }
77228 +
77229 if (dentry)
77230 touch_atime(unix_sk(u)->mnt, dentry);
77231 } else
77232 @@ -869,11 +882,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
77233 err = security_path_mknod(&path, dentry, mode, 0);
77234 if (err)
77235 goto out_mknod_drop_write;
77236 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
77237 + err = -EACCES;
77238 + goto out_mknod_drop_write;
77239 + }
77240 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
77241 out_mknod_drop_write:
77242 mnt_drop_write(path.mnt);
77243 if (err)
77244 goto out_mknod_dput;
77245 +
77246 + gr_handle_create(dentry, path.mnt);
77247 +
77248 mutex_unlock(&path.dentry->d_inode->i_mutex);
77249 dput(path.dentry);
77250 path.dentry = dentry;
77251 diff --git a/net/wireless/core.h b/net/wireless/core.h
77252 index b9ec306..b4a563e 100644
77253 --- a/net/wireless/core.h
77254 +++ b/net/wireless/core.h
77255 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
77256 struct mutex mtx;
77257
77258 /* rfkill support */
77259 - struct rfkill_ops rfkill_ops;
77260 + rfkill_ops_no_const rfkill_ops;
77261 struct rfkill *rfkill;
77262 struct work_struct rfkill_sync;
77263
77264 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
77265 index 0af7f54..c916d2f 100644
77266 --- a/net/wireless/wext-core.c
77267 +++ b/net/wireless/wext-core.c
77268 @@ -747,8 +747,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
77269 */
77270
77271 /* Support for very large requests */
77272 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
77273 - (user_length > descr->max_tokens)) {
77274 + if (user_length > descr->max_tokens) {
77275 /* Allow userspace to GET more than max so
77276 * we can support any size GET requests.
77277 * There is still a limit : -ENOMEM.
77278 @@ -785,22 +784,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
77279 }
77280 }
77281
77282 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
77283 - /*
77284 - * If this is a GET, but not NOMAX, it means that the extra
77285 - * data is not bounded by userspace, but by max_tokens. Thus
77286 - * set the length to max_tokens. This matches the extra data
77287 - * allocation.
77288 - * The driver should fill it with the number of tokens it
77289 - * provided, and it may check iwp->length rather than having
77290 - * knowledge of max_tokens. If the driver doesn't change the
77291 - * iwp->length, this ioctl just copies back max_token tokens
77292 - * filled with zeroes. Hopefully the driver isn't claiming
77293 - * them to be valid data.
77294 - */
77295 - iwp->length = descr->max_tokens;
77296 - }
77297 -
77298 err = handler(dev, info, (union iwreq_data *) iwp, extra);
77299
77300 iwp->length += essid_compat;
77301 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
77302 index 9049a5c..cfa6f5c 100644
77303 --- a/net/xfrm/xfrm_policy.c
77304 +++ b/net/xfrm/xfrm_policy.c
77305 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
77306 {
77307 policy->walk.dead = 1;
77308
77309 - atomic_inc(&policy->genid);
77310 + atomic_inc_unchecked(&policy->genid);
77311
77312 if (del_timer(&policy->timer))
77313 xfrm_pol_put(policy);
77314 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
77315 hlist_add_head(&policy->bydst, chain);
77316 xfrm_pol_hold(policy);
77317 net->xfrm.policy_count[dir]++;
77318 - atomic_inc(&flow_cache_genid);
77319 + atomic_inc_unchecked(&flow_cache_genid);
77320 if (delpol)
77321 __xfrm_policy_unlink(delpol, dir);
77322 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
77323 @@ -1530,7 +1530,7 @@ free_dst:
77324 goto out;
77325 }
77326
77327 -static int inline
77328 +static inline int
77329 xfrm_dst_alloc_copy(void **target, const void *src, int size)
77330 {
77331 if (!*target) {
77332 @@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
77333 return 0;
77334 }
77335
77336 -static int inline
77337 +static inline int
77338 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
77339 {
77340 #ifdef CONFIG_XFRM_SUB_POLICY
77341 @@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
77342 #endif
77343 }
77344
77345 -static int inline
77346 +static inline int
77347 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
77348 {
77349 #ifdef CONFIG_XFRM_SUB_POLICY
77350 @@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
77351
77352 xdst->num_pols = num_pols;
77353 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
77354 - xdst->policy_genid = atomic_read(&pols[0]->genid);
77355 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
77356
77357 return xdst;
77358 }
77359 @@ -2345,7 +2345,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
77360 if (xdst->xfrm_genid != dst->xfrm->genid)
77361 return 0;
77362 if (xdst->num_pols > 0 &&
77363 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
77364 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
77365 return 0;
77366
77367 mtu = dst_mtu(dst->child);
77368 @@ -2882,7 +2882,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
77369 sizeof(pol->xfrm_vec[i].saddr));
77370 pol->xfrm_vec[i].encap_family = mp->new_family;
77371 /* flush bundles */
77372 - atomic_inc(&pol->genid);
77373 + atomic_inc_unchecked(&pol->genid);
77374 }
77375 }
77376
77377 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
77378 index d2b366c..51ff91ebc 100644
77379 --- a/scripts/Makefile.build
77380 +++ b/scripts/Makefile.build
77381 @@ -109,7 +109,7 @@ endif
77382 endif
77383
77384 # Do not include host rules unless needed
77385 -ifneq ($(hostprogs-y)$(hostprogs-m),)
77386 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
77387 include scripts/Makefile.host
77388 endif
77389
77390 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
77391 index 686cb0d..9d653bf 100644
77392 --- a/scripts/Makefile.clean
77393 +++ b/scripts/Makefile.clean
77394 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
77395 __clean-files := $(extra-y) $(always) \
77396 $(targets) $(clean-files) \
77397 $(host-progs) \
77398 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
77399 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
77400 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
77401
77402 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
77403
77404 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
77405 index 1ac414f..a1c1451 100644
77406 --- a/scripts/Makefile.host
77407 +++ b/scripts/Makefile.host
77408 @@ -31,6 +31,7 @@
77409 # Note: Shared libraries consisting of C++ files are not supported
77410
77411 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
77412 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
77413
77414 # C code
77415 # Executables compiled from a single .c file
77416 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
77417 # Shared libaries (only .c supported)
77418 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
77419 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
77420 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
77421 # Remove .so files from "xxx-objs"
77422 host-cobjs := $(filter-out %.so,$(host-cobjs))
77423
77424 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
77425 index cb1f50c..cef2a7c 100644
77426 --- a/scripts/basic/fixdep.c
77427 +++ b/scripts/basic/fixdep.c
77428 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
77429 /*
77430 * Lookup a value in the configuration string.
77431 */
77432 -static int is_defined_config(const char *name, int len, unsigned int hash)
77433 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
77434 {
77435 struct item *aux;
77436
77437 @@ -211,10 +211,10 @@ static void clear_config(void)
77438 /*
77439 * Record the use of a CONFIG_* word.
77440 */
77441 -static void use_config(const char *m, int slen)
77442 +static void use_config(const char *m, unsigned int slen)
77443 {
77444 unsigned int hash = strhash(m, slen);
77445 - int c, i;
77446 + unsigned int c, i;
77447
77448 if (is_defined_config(m, slen, hash))
77449 return;
77450 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
77451
77452 static void parse_config_file(const char *map, size_t len)
77453 {
77454 - const int *end = (const int *) (map + len);
77455 + const unsigned int *end = (const unsigned int *) (map + len);
77456 /* start at +1, so that p can never be < map */
77457 - const int *m = (const int *) map + 1;
77458 + const unsigned int *m = (const unsigned int *) map + 1;
77459 const char *p, *q;
77460
77461 for (; m < end; m++) {
77462 @@ -406,7 +406,7 @@ static void print_deps(void)
77463 static void traps(void)
77464 {
77465 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
77466 - int *p = (int *)test;
77467 + unsigned int *p = (unsigned int *)test;
77468
77469 if (*p != INT_CONF) {
77470 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
77471 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
77472 new file mode 100644
77473 index 0000000..8729101
77474 --- /dev/null
77475 +++ b/scripts/gcc-plugin.sh
77476 @@ -0,0 +1,2 @@
77477 +#!/bin/sh
77478 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
77479 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
77480 index f936d1f..a66d95f 100644
77481 --- a/scripts/mod/file2alias.c
77482 +++ b/scripts/mod/file2alias.c
77483 @@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id,
77484 unsigned long size, unsigned long id_size,
77485 void *symval)
77486 {
77487 - int i;
77488 + unsigned int i;
77489
77490 if (size % id_size || size < id_size) {
77491 if (cross_build != 0)
77492 @@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id,
77493 /* USB is special because the bcdDevice can be matched against a numeric range */
77494 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
77495 static void do_usb_entry(struct usb_device_id *id,
77496 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
77497 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
77498 unsigned char range_lo, unsigned char range_hi,
77499 unsigned char max, struct module *mod)
77500 {
77501 @@ -203,7 +203,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
77502 {
77503 unsigned int devlo, devhi;
77504 unsigned char chi, clo, max;
77505 - int ndigits;
77506 + unsigned int ndigits;
77507
77508 id->match_flags = TO_NATIVE(id->match_flags);
77509 id->idVendor = TO_NATIVE(id->idVendor);
77510 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
77511 for (i = 0; i < count; i++) {
77512 const char *id = (char *)devs[i].id;
77513 char acpi_id[sizeof(devs[0].id)];
77514 - int j;
77515 + unsigned int j;
77516
77517 buf_printf(&mod->dev_table_buf,
77518 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
77519 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
77520
77521 for (j = 0; j < PNP_MAX_DEVICES; j++) {
77522 const char *id = (char *)card->devs[j].id;
77523 - int i2, j2;
77524 + unsigned int i2, j2;
77525 int dup = 0;
77526
77527 if (!id[0])
77528 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
77529 /* add an individual alias for every device entry */
77530 if (!dup) {
77531 char acpi_id[sizeof(card->devs[0].id)];
77532 - int k;
77533 + unsigned int k;
77534
77535 buf_printf(&mod->dev_table_buf,
77536 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
77537 @@ -807,7 +807,7 @@ static void dmi_ascii_filter(char *d, const char *s)
77538 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
77539 char *alias)
77540 {
77541 - int i, j;
77542 + unsigned int i, j;
77543
77544 sprintf(alias, "dmi*");
77545
77546 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
77547 index 2bd594e..d43245e 100644
77548 --- a/scripts/mod/modpost.c
77549 +++ b/scripts/mod/modpost.c
77550 @@ -919,6 +919,7 @@ enum mismatch {
77551 ANY_INIT_TO_ANY_EXIT,
77552 ANY_EXIT_TO_ANY_INIT,
77553 EXPORT_TO_INIT_EXIT,
77554 + DATA_TO_TEXT
77555 };
77556
77557 struct sectioncheck {
77558 @@ -1027,6 +1028,12 @@ const struct sectioncheck sectioncheck[] = {
77559 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
77560 .mismatch = EXPORT_TO_INIT_EXIT,
77561 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
77562 +},
77563 +/* Do not reference code from writable data */
77564 +{
77565 + .fromsec = { DATA_SECTIONS, NULL },
77566 + .tosec = { TEXT_SECTIONS, NULL },
77567 + .mismatch = DATA_TO_TEXT
77568 }
77569 };
77570
77571 @@ -1149,10 +1156,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
77572 continue;
77573 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
77574 continue;
77575 - if (sym->st_value == addr)
77576 - return sym;
77577 /* Find a symbol nearby - addr are maybe negative */
77578 d = sym->st_value - addr;
77579 + if (d == 0)
77580 + return sym;
77581 if (d < 0)
77582 d = addr - sym->st_value;
77583 if (d < distance) {
77584 @@ -1431,6 +1438,14 @@ static void report_sec_mismatch(const char *modname,
77585 tosym, prl_to, prl_to, tosym);
77586 free(prl_to);
77587 break;
77588 + case DATA_TO_TEXT:
77589 +/*
77590 + fprintf(stderr,
77591 + "The variable %s references\n"
77592 + "the %s %s%s%s\n",
77593 + fromsym, to, sec2annotation(tosec), tosym, to_p);
77594 +*/
77595 + break;
77596 }
77597 fprintf(stderr, "\n");
77598 }
77599 @@ -1656,7 +1671,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
77600 static void check_sec_ref(struct module *mod, const char *modname,
77601 struct elf_info *elf)
77602 {
77603 - int i;
77604 + unsigned int i;
77605 Elf_Shdr *sechdrs = elf->sechdrs;
77606
77607 /* Walk through all sections */
77608 @@ -1754,7 +1769,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
77609 va_end(ap);
77610 }
77611
77612 -void buf_write(struct buffer *buf, const char *s, int len)
77613 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
77614 {
77615 if (buf->size - buf->pos < len) {
77616 buf->size += len + SZ;
77617 @@ -1972,7 +1987,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
77618 if (fstat(fileno(file), &st) < 0)
77619 goto close_write;
77620
77621 - if (st.st_size != b->pos)
77622 + if (st.st_size != (off_t)b->pos)
77623 goto close_write;
77624
77625 tmp = NOFAIL(malloc(b->pos));
77626 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
77627 index 2031119..b5433af 100644
77628 --- a/scripts/mod/modpost.h
77629 +++ b/scripts/mod/modpost.h
77630 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
77631
77632 struct buffer {
77633 char *p;
77634 - int pos;
77635 - int size;
77636 + unsigned int pos;
77637 + unsigned int size;
77638 };
77639
77640 void __attribute__((format(printf, 2, 3)))
77641 buf_printf(struct buffer *buf, const char *fmt, ...);
77642
77643 void
77644 -buf_write(struct buffer *buf, const char *s, int len);
77645 +buf_write(struct buffer *buf, const char *s, unsigned int len);
77646
77647 struct module {
77648 struct module *next;
77649 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
77650 index 9dfcd6d..099068e 100644
77651 --- a/scripts/mod/sumversion.c
77652 +++ b/scripts/mod/sumversion.c
77653 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
77654 goto out;
77655 }
77656
77657 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
77658 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
77659 warn("writing sum in %s failed: %s\n",
77660 filename, strerror(errno));
77661 goto out;
77662 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
77663 index 5c11312..72742b5 100644
77664 --- a/scripts/pnmtologo.c
77665 +++ b/scripts/pnmtologo.c
77666 @@ -237,14 +237,14 @@ static void write_header(void)
77667 fprintf(out, " * Linux logo %s\n", logoname);
77668 fputs(" */\n\n", out);
77669 fputs("#include <linux/linux_logo.h>\n\n", out);
77670 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
77671 + fprintf(out, "static unsigned char %s_data[] = {\n",
77672 logoname);
77673 }
77674
77675 static void write_footer(void)
77676 {
77677 fputs("\n};\n\n", out);
77678 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
77679 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
77680 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
77681 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
77682 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
77683 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
77684 fputs("\n};\n\n", out);
77685
77686 /* write logo clut */
77687 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
77688 + fprintf(out, "static unsigned char %s_clut[] = {\n",
77689 logoname);
77690 write_hex_cnt = 0;
77691 for (i = 0; i < logo_clutsize; i++) {
77692 diff --git a/security/Kconfig b/security/Kconfig
77693 index 51bd5a0..3a4ebd0 100644
77694 --- a/security/Kconfig
77695 +++ b/security/Kconfig
77696 @@ -4,6 +4,627 @@
77697
77698 menu "Security options"
77699
77700 +source grsecurity/Kconfig
77701 +
77702 +menu "PaX"
77703 +
77704 + config ARCH_TRACK_EXEC_LIMIT
77705 + bool
77706 +
77707 + config PAX_KERNEXEC_PLUGIN
77708 + bool
77709 +
77710 + config PAX_PER_CPU_PGD
77711 + bool
77712 +
77713 + config TASK_SIZE_MAX_SHIFT
77714 + int
77715 + depends on X86_64
77716 + default 47 if !PAX_PER_CPU_PGD
77717 + default 42 if PAX_PER_CPU_PGD
77718 +
77719 + config PAX_ENABLE_PAE
77720 + bool
77721 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
77722 +
77723 +config PAX
77724 + bool "Enable various PaX features"
77725 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
77726 + help
77727 + This allows you to enable various PaX features. PaX adds
77728 + intrusion prevention mechanisms to the kernel that reduce
77729 + the risks posed by exploitable memory corruption bugs.
77730 +
77731 +menu "PaX Control"
77732 + depends on PAX
77733 +
77734 +config PAX_SOFTMODE
77735 + bool 'Support soft mode'
77736 + help
77737 + Enabling this option will allow you to run PaX in soft mode, that
77738 + is, PaX features will not be enforced by default, only on executables
77739 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
77740 + support as they are the only way to mark executables for soft mode use.
77741 +
77742 + Soft mode can be activated by using the "pax_softmode=1" kernel command
77743 + line option on boot. Furthermore you can control various PaX features
77744 + at runtime via the entries in /proc/sys/kernel/pax.
77745 +
77746 +config PAX_EI_PAX
77747 + bool 'Use legacy ELF header marking'
77748 + help
77749 + Enabling this option will allow you to control PaX features on
77750 + a per executable basis via the 'chpax' utility available at
77751 + http://pax.grsecurity.net/. The control flags will be read from
77752 + an otherwise reserved part of the ELF header. This marking has
77753 + numerous drawbacks (no support for soft-mode, toolchain does not
77754 + know about the non-standard use of the ELF header) therefore it
77755 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
77756 + support.
77757 +
77758 + If you have applications not marked by the PT_PAX_FLAGS ELF program
77759 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
77760 + option otherwise they will not get any protection.
77761 +
77762 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
77763 + support as well, they will override the legacy EI_PAX marks.
77764 +
77765 +config PAX_PT_PAX_FLAGS
77766 + bool 'Use ELF program header marking'
77767 + help
77768 + Enabling this option will allow you to control PaX features on
77769 + a per executable basis via the 'paxctl' utility available at
77770 + http://pax.grsecurity.net/. The control flags will be read from
77771 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
77772 + has the benefits of supporting both soft mode and being fully
77773 + integrated into the toolchain (the binutils patch is available
77774 + from http://pax.grsecurity.net).
77775 +
77776 + If you have applications not marked by the PT_PAX_FLAGS ELF program
77777 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
77778 + support otherwise they will not get any protection.
77779 +
77780 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
77781 + must make sure that the marks are the same if a binary has both marks.
77782 +
77783 + Note that if you enable the legacy EI_PAX marking support as well,
77784 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
77785 +
77786 +config PAX_XATTR_PAX_FLAGS
77787 + bool 'Use filesystem extended attributes marking'
77788 + depends on EXPERT
77789 + select CIFS_XATTR if CIFS
77790 + select EXT2_FS_XATTR if EXT2_FS
77791 + select EXT3_FS_XATTR if EXT3_FS
77792 + select EXT4_FS_XATTR if EXT4_FS
77793 + select JFFS2_FS_XATTR if JFFS2_FS
77794 + select REISERFS_FS_XATTR if REISERFS_FS
77795 + select SQUASHFS_XATTR if SQUASHFS
77796 + select TMPFS_XATTR if TMPFS
77797 + select UBIFS_FS_XATTR if UBIFS_FS
77798 + help
77799 + Enabling this option will allow you to control PaX features on
77800 + a per executable basis via the 'setfattr' utility. The control
77801 + flags will be read from the user.pax.flags extended attribute of
77802 + the file. This marking has the benefit of supporting binary-only
77803 + applications that self-check themselves (e.g., skype) and would
77804 + not tolerate chpax/paxctl changes. The main drawback is that
77805 + extended attributes are not supported by some filesystems (e.g.,
77806 + isofs, udf, vfat) so copying files through such filesystems will
77807 + lose the extended attributes and these PaX markings.
77808 +
77809 + If you have applications not marked by the PT_PAX_FLAGS ELF program
77810 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
77811 + support otherwise they will not get any protection.
77812 +
77813 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
77814 + must make sure that the marks are the same if a binary has both marks.
77815 +
77816 + Note that if you enable the legacy EI_PAX marking support as well,
77817 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
77818 +
77819 +choice
77820 + prompt 'MAC system integration'
77821 + default PAX_HAVE_ACL_FLAGS
77822 + help
77823 + Mandatory Access Control systems have the option of controlling
77824 + PaX flags on a per executable basis, choose the method supported
77825 + by your particular system.
77826 +
77827 + - "none": if your MAC system does not interact with PaX,
77828 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
77829 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
77830 +
77831 + NOTE: this option is for developers/integrators only.
77832 +
77833 + config PAX_NO_ACL_FLAGS
77834 + bool 'none'
77835 +
77836 + config PAX_HAVE_ACL_FLAGS
77837 + bool 'direct'
77838 +
77839 + config PAX_HOOK_ACL_FLAGS
77840 + bool 'hook'
77841 +endchoice
77842 +
77843 +endmenu
77844 +
77845 +menu "Non-executable pages"
77846 + depends on PAX
77847 +
77848 +config PAX_NOEXEC
77849 + bool "Enforce non-executable pages"
77850 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
77851 + help
77852 + By design some architectures do not allow for protecting memory
77853 + pages against execution or even if they do, Linux does not make
77854 + use of this feature. In practice this means that if a page is
77855 + readable (such as the stack or heap) it is also executable.
77856 +
77857 + There is a well known exploit technique that makes use of this
77858 + fact and a common programming mistake where an attacker can
77859 + introduce code of his choice somewhere in the attacked program's
77860 + memory (typically the stack or the heap) and then execute it.
77861 +
77862 + If the attacked program was running with different (typically
77863 + higher) privileges than that of the attacker, then he can elevate
77864 + his own privilege level (e.g. get a root shell, write to files for
77865 + which he does not have write access to, etc).
77866 +
77867 + Enabling this option will let you choose from various features
77868 + that prevent the injection and execution of 'foreign' code in
77869 + a program.
77870 +
77871 + This will also break programs that rely on the old behaviour and
77872 + expect that dynamically allocated memory via the malloc() family
77873 + of functions is executable (which it is not). Notable examples
77874 + are the XFree86 4.x server, the java runtime and wine.
77875 +
77876 +config PAX_PAGEEXEC
77877 + bool "Paging based non-executable pages"
77878 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
77879 + select S390_SWITCH_AMODE if S390
77880 + select S390_EXEC_PROTECT if S390
77881 + select ARCH_TRACK_EXEC_LIMIT if X86_32
77882 + help
77883 + This implementation is based on the paging feature of the CPU.
77884 + On i386 without hardware non-executable bit support there is a
77885 + variable but usually low performance impact, however on Intel's
77886 + P4 core based CPUs it is very high so you should not enable this
77887 + for kernels meant to be used on such CPUs.
77888 +
77889 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
77890 + with hardware non-executable bit support there is no performance
77891 + impact, on ppc the impact is negligible.
77892 +
77893 + Note that several architectures require various emulations due to
77894 + badly designed userland ABIs, this will cause a performance impact
77895 + but will disappear as soon as userland is fixed. For example, ppc
77896 + userland MUST have been built with secure-plt by a recent toolchain.
77897 +
77898 +config PAX_SEGMEXEC
77899 + bool "Segmentation based non-executable pages"
77900 + depends on PAX_NOEXEC && X86_32
77901 + help
77902 + This implementation is based on the segmentation feature of the
77903 + CPU and has a very small performance impact, however applications
77904 + will be limited to a 1.5 GB address space instead of the normal
77905 + 3 GB.
77906 +
77907 +config PAX_EMUTRAMP
77908 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
77909 + default y if PARISC
77910 + help
77911 + There are some programs and libraries that for one reason or
77912 + another attempt to execute special small code snippets from
77913 + non-executable memory pages. Most notable examples are the
77914 + signal handler return code generated by the kernel itself and
77915 + the GCC trampolines.
77916 +
77917 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
77918 + such programs will no longer work under your kernel.
77919 +
77920 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
77921 + utilities to enable trampoline emulation for the affected programs
77922 + yet still have the protection provided by the non-executable pages.
77923 +
77924 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
77925 + your system will not even boot.
77926 +
77927 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
77928 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
77929 + for the affected files.
77930 +
77931 + NOTE: enabling this feature *may* open up a loophole in the
77932 + protection provided by non-executable pages that an attacker
77933 + could abuse. Therefore the best solution is to not have any
77934 + files on your system that would require this option. This can
77935 + be achieved by not using libc5 (which relies on the kernel
77936 + signal handler return code) and not using or rewriting programs
77937 + that make use of the nested function implementation of GCC.
77938 + Skilled users can just fix GCC itself so that it implements
77939 + nested function calls in a way that does not interfere with PaX.
77940 +
77941 +config PAX_EMUSIGRT
77942 + bool "Automatically emulate sigreturn trampolines"
77943 + depends on PAX_EMUTRAMP && PARISC
77944 + default y
77945 + help
77946 + Enabling this option will have the kernel automatically detect
77947 + and emulate signal return trampolines executing on the stack
77948 + that would otherwise lead to task termination.
77949 +
77950 + This solution is intended as a temporary one for users with
77951 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
77952 + Modula-3 runtime, etc) or executables linked to such, basically
77953 + everything that does not specify its own SA_RESTORER function in
77954 + normal executable memory like glibc 2.1+ does.
77955 +
77956 + On parisc you MUST enable this option, otherwise your system will
77957 + not even boot.
77958 +
77959 + NOTE: this feature cannot be disabled on a per executable basis
77960 + and since it *does* open up a loophole in the protection provided
77961 + by non-executable pages, the best solution is to not have any
77962 + files on your system that would require this option.
77963 +
77964 +config PAX_MPROTECT
77965 + bool "Restrict mprotect()"
77966 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
77967 + help
77968 + Enabling this option will prevent programs from
77969 + - changing the executable status of memory pages that were
77970 + not originally created as executable,
77971 + - making read-only executable pages writable again,
77972 + - creating executable pages from anonymous memory,
77973 + - making read-only-after-relocations (RELRO) data pages writable again.
77974 +
77975 + You should say Y here to complete the protection provided by
77976 + the enforcement of non-executable pages.
77977 +
77978 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
77979 + this feature on a per file basis.
77980 +
77981 +config PAX_MPROTECT_COMPAT
77982 + bool "Use legacy/compat protection demoting (read help)"
77983 + depends on PAX_MPROTECT
77984 + default n
77985 + help
77986 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
77987 + by sending the proper error code to the application. For some broken
77988 + userland, this can cause problems with Python or other applications. The
77989 + current implementation however allows for applications like clamav to
77990 + detect if JIT compilation/execution is allowed and to fall back gracefully
77991 + to an interpreter-based mode if it does not. While we encourage everyone
77992 + to use the current implementation as-is and push upstream to fix broken
77993 + userland (note that the RWX logging option can assist with this), in some
77994 + environments this may not be possible. Having to disable MPROTECT
77995 + completely on certain binaries reduces the security benefit of PaX,
77996 + so this option is provided for those environments to revert to the old
77997 + behavior.
77998 +
77999 +config PAX_ELFRELOCS
78000 + bool "Allow ELF text relocations (read help)"
78001 + depends on PAX_MPROTECT
78002 + default n
78003 + help
78004 + Non-executable pages and mprotect() restrictions are effective
78005 + in preventing the introduction of new executable code into an
78006 + attacked task's address space. There remain only two venues
78007 + for this kind of attack: if the attacker can execute already
78008 + existing code in the attacked task then he can either have it
78009 + create and mmap() a file containing his code or have it mmap()
78010 + an already existing ELF library that does not have position
78011 + independent code in it and use mprotect() on it to make it
78012 + writable and copy his code there. While protecting against
78013 + the former approach is beyond PaX, the latter can be prevented
78014 + by having only PIC ELF libraries on one's system (which do not
78015 + need to relocate their code). If you are sure this is your case,
78016 + as is the case with all modern Linux distributions, then leave
78017 + this option disabled. You should say 'n' here.
78018 +
78019 +config PAX_ETEXECRELOCS
78020 + bool "Allow ELF ET_EXEC text relocations"
78021 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
78022 + select PAX_ELFRELOCS
78023 + default y
78024 + help
78025 + On some architectures there are incorrectly created applications
78026 + that require text relocations and would not work without enabling
78027 + this option. If you are an alpha, ia64 or parisc user, you should
78028 + enable this option and disable it once you have made sure that
78029 + none of your applications need it.
78030 +
78031 +config PAX_EMUPLT
78032 + bool "Automatically emulate ELF PLT"
78033 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
78034 + default y
78035 + help
78036 + Enabling this option will have the kernel automatically detect
78037 + and emulate the Procedure Linkage Table entries in ELF files.
78038 + On some architectures such entries are in writable memory, and
78039 + become non-executable leading to task termination. Therefore
78040 + it is mandatory that you enable this option on alpha, parisc,
78041 + sparc and sparc64, otherwise your system would not even boot.
78042 +
78043 + NOTE: this feature *does* open up a loophole in the protection
78044 + provided by the non-executable pages, therefore the proper
78045 + solution is to modify the toolchain to produce a PLT that does
78046 + not need to be writable.
78047 +
78048 +config PAX_DLRESOLVE
78049 + bool 'Emulate old glibc resolver stub'
78050 + depends on PAX_EMUPLT && SPARC
78051 + default n
78052 + help
78053 + This option is needed if userland has an old glibc (before 2.4)
78054 + that puts a 'save' instruction into the runtime generated resolver
78055 + stub that needs special emulation.
78056 +
78057 +config PAX_KERNEXEC
78058 + bool "Enforce non-executable kernel pages"
78059 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
78060 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
78061 + select PAX_KERNEXEC_PLUGIN if X86_64
78062 + help
78063 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
78064 + that is, enabling this option will make it harder to inject
78065 + and execute 'foreign' code in kernel memory itself.
78066 +
78067 + Note that on x86_64 kernels there is a known regression when
78068 + this feature and KVM/VMX are both enabled in the host kernel.
78069 +
78070 +choice
78071 + prompt "Return Address Instrumentation Method"
78072 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
78073 + depends on PAX_KERNEXEC_PLUGIN
78074 + help
78075 + Select the method used to instrument function pointer dereferences.
78076 + Note that binary modules cannot be instrumented by this approach.
78077 +
78078 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
78079 + bool "bts"
78080 + help
78081 + This method is compatible with binary only modules but has
78082 + a higher runtime overhead.
78083 +
78084 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
78085 + bool "or"
78086 + depends on !PARAVIRT
78087 + help
78088 + This method is incompatible with binary only modules but has
78089 + a lower runtime overhead.
78090 +endchoice
78091 +
78092 +config PAX_KERNEXEC_PLUGIN_METHOD
78093 + string
78094 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
78095 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
78096 + default ""
78097 +
78098 +config PAX_KERNEXEC_MODULE_TEXT
78099 + int "Minimum amount of memory reserved for module code"
78100 + default "4"
78101 + depends on PAX_KERNEXEC && X86_32 && MODULES
78102 + help
78103 + Due to implementation details the kernel must reserve a fixed
78104 + amount of memory for module code at compile time that cannot be
78105 + changed at runtime. Here you can specify the minimum amount
78106 + in MB that will be reserved. Due to the same implementation
78107 + details this size will always be rounded up to the next 2/4 MB
78108 + boundary (depends on PAE) so the actually available memory for
78109 + module code will usually be more than this minimum.
78110 +
78111 + The default 4 MB should be enough for most users but if you have
78112 + an excessive number of modules (e.g., most distribution configs
78113 + compile many drivers as modules) or use huge modules such as
78114 + nvidia's kernel driver, you will need to adjust this amount.
78115 + A good rule of thumb is to look at your currently loaded kernel
78116 + modules and add up their sizes.
78117 +
78118 +endmenu
78119 +
78120 +menu "Address Space Layout Randomization"
78121 + depends on PAX
78122 +
78123 +config PAX_ASLR
78124 + bool "Address Space Layout Randomization"
78125 + help
78126 + Many if not most exploit techniques rely on the knowledge of
78127 + certain addresses in the attacked program. The following options
78128 + will allow the kernel to apply a certain amount of randomization
78129 + to specific parts of the program thereby forcing an attacker to
78130 + guess them in most cases. Any failed guess will most likely crash
78131 + the attacked program which allows the kernel to detect such attempts
78132 + and react on them. PaX itself provides no reaction mechanisms,
78133 + instead it is strongly encouraged that you make use of Nergal's
78134 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
78135 + (http://www.grsecurity.net/) built-in crash detection features or
78136 + develop one yourself.
78137 +
78138 + By saying Y here you can choose to randomize the following areas:
78139 + - top of the task's kernel stack
78140 + - top of the task's userland stack
78141 + - base address for mmap() requests that do not specify one
78142 + (this includes all libraries)
78143 + - base address of the main executable
78144 +
78145 + It is strongly recommended to say Y here as address space layout
78146 + randomization has negligible impact on performance yet it provides
78147 + a very effective protection.
78148 +
78149 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
78150 + this feature on a per file basis.
78151 +
78152 +config PAX_RANDKSTACK
78153 + bool "Randomize kernel stack base"
78154 + depends on X86_TSC && X86
78155 + help
78156 + By saying Y here the kernel will randomize every task's kernel
78157 + stack on every system call. This will not only force an attacker
78158 + to guess it but also prevent him from making use of possible
78159 + leaked information about it.
78160 +
78161 + Since the kernel stack is a rather scarce resource, randomization
78162 + may cause unexpected stack overflows, therefore you should very
78163 + carefully test your system. Note that once enabled in the kernel
78164 + configuration, this feature cannot be disabled on a per file basis.
78165 +
78166 +config PAX_RANDUSTACK
78167 + bool "Randomize user stack base"
78168 + depends on PAX_ASLR
78169 + help
78170 + By saying Y here the kernel will randomize every task's userland
78171 + stack. The randomization is done in two steps where the second
78172 + one may apply a big amount of shift to the top of the stack and
78173 + cause problems for programs that want to use lots of memory (more
78174 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
78175 + For this reason the second step can be controlled by 'chpax' or
78176 + 'paxctl' on a per file basis.
78177 +
78178 +config PAX_RANDMMAP
78179 + bool "Randomize mmap() base"
78180 + depends on PAX_ASLR
78181 + help
78182 + By saying Y here the kernel will use a randomized base address for
78183 + mmap() requests that do not specify one themselves. As a result
78184 + all dynamically loaded libraries will appear at random addresses
78185 + and therefore be harder to exploit by a technique where an attacker
78186 + attempts to execute library code for his purposes (e.g. spawn a
78187 + shell from an exploited program that is running at an elevated
78188 + privilege level).
78189 +
78190 + Furthermore, if a program is relinked as a dynamic ELF file, its
78191 + base address will be randomized as well, completing the full
78192 + randomization of the address space layout. Attacking such programs
78193 + becomes a guess game. You can find an example of doing this at
78194 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
78195 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
78196 +
78197 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
78198 + feature on a per file basis.
78199 +
78200 +endmenu
78201 +
78202 +menu "Miscellaneous hardening features"
78203 +
78204 +config PAX_MEMORY_SANITIZE
78205 + bool "Sanitize all freed memory"
78206 + depends on !HIBERNATION
78207 + help
78208 + By saying Y here the kernel will erase memory pages as soon as they
78209 + are freed. This in turn reduces the lifetime of data stored in the
78210 + pages, making it less likely that sensitive information such as
78211 + passwords, cryptographic secrets, etc stay in memory for too long.
78212 +
78213 + This is especially useful for programs whose runtime is short, long
78214 + lived processes and the kernel itself benefit from this as long as
78215 + they operate on whole memory pages and ensure timely freeing of pages
78216 + that may hold sensitive information.
78217 +
78218 + The tradeoff is performance impact, on a single CPU system kernel
78219 + compilation sees a 3% slowdown, other systems and workloads may vary
78220 + and you are advised to test this feature on your expected workload
78221 + before deploying it.
78222 +
78223 + Note that this feature does not protect data stored in live pages,
78224 + e.g., process memory swapped to disk may stay there for a long time.
78225 +
78226 +config PAX_MEMORY_STACKLEAK
78227 + bool "Sanitize kernel stack"
78228 + depends on X86
78229 + help
78230 + By saying Y here the kernel will erase the kernel stack before it
78231 + returns from a system call. This in turn reduces the information
78232 + that a kernel stack leak bug can reveal.
78233 +
78234 + Note that such a bug can still leak information that was put on
78235 + the stack by the current system call (the one eventually triggering
78236 + the bug) but traces of earlier system calls on the kernel stack
78237 + cannot leak anymore.
78238 +
78239 + The tradeoff is performance impact: on a single CPU system kernel
78240 + compilation sees a 1% slowdown, other systems and workloads may vary
78241 + and you are advised to test this feature on your expected workload
78242 + before deploying it.
78243 +
78244 + Note: full support for this feature requires gcc with plugin support
78245 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
78246 + versions means that functions with large enough stack frames may
78247 + leave uninitialized memory behind that may be exposed to a later
78248 + syscall leaking the stack.
78249 +
78250 +config PAX_MEMORY_UDEREF
78251 + bool "Prevent invalid userland pointer dereference"
78252 + depends on X86 && !UML_X86 && !XEN
78253 + select PAX_PER_CPU_PGD if X86_64
78254 + help
78255 + By saying Y here the kernel will be prevented from dereferencing
78256 + userland pointers in contexts where the kernel expects only kernel
78257 + pointers. This is both a useful runtime debugging feature and a
78258 + security measure that prevents exploiting a class of kernel bugs.
78259 +
78260 + The tradeoff is that some virtualization solutions may experience
78261 + a huge slowdown and therefore you should not enable this feature
78262 + for kernels meant to run in such environments. Whether a given VM
78263 + solution is affected or not is best determined by simply trying it
78264 + out, the performance impact will be obvious right on boot as this
78265 + mechanism engages from very early on. A good rule of thumb is that
78266 + VMs running on CPUs without hardware virtualization support (i.e.,
78267 + the majority of IA-32 CPUs) will likely experience the slowdown.
78268 +
78269 +config PAX_REFCOUNT
78270 + bool "Prevent various kernel object reference counter overflows"
78271 + depends on GRKERNSEC && ((ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86)
78272 + help
78273 + By saying Y here the kernel will detect and prevent overflowing
78274 + various (but not all) kinds of object reference counters. Such
78275 + overflows can normally occur due to bugs only and are often, if
78276 + not always, exploitable.
78277 +
78278 + The tradeoff is that data structures protected by an overflowed
78279 + refcount will never be freed and therefore will leak memory. Note
78280 + that this leak also happens even without this protection but in
78281 + that case the overflow can eventually trigger the freeing of the
78282 + data structure while it is still being used elsewhere, resulting
78283 + in the exploitable situation that this feature prevents.
78284 +
78285 + Since this has a negligible performance impact, you should enable
78286 + this feature.
78287 +
78288 +config PAX_USERCOPY
78289 + bool "Harden heap object copies between kernel and userland"
78290 + depends on X86 || PPC || SPARC || ARM
78291 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
78292 + help
78293 + By saying Y here the kernel will enforce the size of heap objects
78294 + when they are copied in either direction between the kernel and
78295 + userland, even if only a part of the heap object is copied.
78296 +
78297 + Specifically, this checking prevents information leaking from the
78298 + kernel heap during kernel to userland copies (if the kernel heap
78299 + object is otherwise fully initialized) and prevents kernel heap
78300 + overflows during userland to kernel copies.
78301 +
78302 + Note that the current implementation provides the strictest bounds
78303 + checks for the SLUB allocator.
78304 +
78305 + Enabling this option also enables per-slab cache protection against
78306 + data in a given cache being copied into/out of via userland
78307 + accessors. Though the whitelist of regions will be reduced over
78308 + time, it notably protects important data structures like task structs.
78309 +
78310 + If frame pointers are enabled on x86, this option will also restrict
78311 + copies into and out of the kernel stack to local variables within a
78312 + single frame.
78313 +
78314 + Since this has a negligible performance impact, you should enable
78315 + this feature.
78316 +
78317 +endmenu
78318 +
78319 +endmenu
78320 +
78321 config KEYS
78322 bool "Enable access key retention support"
78323 help
78324 @@ -169,7 +790,7 @@ config INTEL_TXT
78325 config LSM_MMAP_MIN_ADDR
78326 int "Low address space for LSM to protect from user allocation"
78327 depends on SECURITY && SECURITY_SELINUX
78328 - default 32768 if ARM
78329 + default 32768 if ALPHA || ARM || PARISC || SPARC32
78330 default 65536
78331 help
78332 This is the portion of low virtual memory which should be protected
78333 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
78334 index 3783202..1852837 100644
78335 --- a/security/apparmor/lsm.c
78336 +++ b/security/apparmor/lsm.c
78337 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
78338 return error;
78339 }
78340
78341 -static struct security_operations apparmor_ops = {
78342 +static struct security_operations apparmor_ops __read_only = {
78343 .name = "apparmor",
78344
78345 .ptrace_access_check = apparmor_ptrace_access_check,
78346 diff --git a/security/commoncap.c b/security/commoncap.c
78347 index ee4f848..a320c64 100644
78348 --- a/security/commoncap.c
78349 +++ b/security/commoncap.c
78350 @@ -28,6 +28,7 @@
78351 #include <linux/prctl.h>
78352 #include <linux/securebits.h>
78353 #include <linux/user_namespace.h>
78354 +#include <net/sock.h>
78355
78356 /*
78357 * If a non-root user executes a setuid-root binary in
78358 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
78359
78360 int cap_netlink_recv(struct sk_buff *skb, int cap)
78361 {
78362 - if (!cap_raised(current_cap(), cap))
78363 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
78364 return -EPERM;
78365 return 0;
78366 }
78367 @@ -579,6 +580,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
78368 {
78369 const struct cred *cred = current_cred();
78370
78371 + if (gr_acl_enable_at_secure())
78372 + return 1;
78373 +
78374 if (cred->uid != 0) {
78375 if (bprm->cap_effective)
78376 return 1;
78377 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
78378 index 3ccf7ac..d73ad64 100644
78379 --- a/security/integrity/ima/ima.h
78380 +++ b/security/integrity/ima/ima.h
78381 @@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
78382 extern spinlock_t ima_queue_lock;
78383
78384 struct ima_h_table {
78385 - atomic_long_t len; /* number of stored measurements in the list */
78386 - atomic_long_t violations;
78387 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
78388 + atomic_long_unchecked_t violations;
78389 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
78390 };
78391 extern struct ima_h_table ima_htable;
78392 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
78393 index 88a2788..581ab92 100644
78394 --- a/security/integrity/ima/ima_api.c
78395 +++ b/security/integrity/ima/ima_api.c
78396 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
78397 int result;
78398
78399 /* can overflow, only indicator */
78400 - atomic_long_inc(&ima_htable.violations);
78401 + atomic_long_inc_unchecked(&ima_htable.violations);
78402
78403 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
78404 if (!entry) {
78405 diff --git a/security/integrity/ima/ima_audit.c b/security/integrity/ima/ima_audit.c
78406 index c5c5a72..2ad942f 100644
78407 --- a/security/integrity/ima/ima_audit.c
78408 +++ b/security/integrity/ima/ima_audit.c
78409 @@ -56,9 +56,11 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
78410 audit_log_format(ab, " name=");
78411 audit_log_untrustedstring(ab, fname);
78412 }
78413 - if (inode)
78414 - audit_log_format(ab, " dev=%s ino=%lu",
78415 - inode->i_sb->s_id, inode->i_ino);
78416 + if (inode) {
78417 + audit_log_format(ab, " dev=");
78418 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
78419 + audit_log_format(ab, " ino=%lu", inode->i_ino);
78420 + }
78421 audit_log_format(ab, " res=%d", !result ? 0 : 1);
78422 audit_log_end(ab);
78423 }
78424 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
78425 index e1aa2b4..52027bf 100644
78426 --- a/security/integrity/ima/ima_fs.c
78427 +++ b/security/integrity/ima/ima_fs.c
78428 @@ -28,12 +28,12 @@
78429 static int valid_policy = 1;
78430 #define TMPBUFLEN 12
78431 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
78432 - loff_t *ppos, atomic_long_t *val)
78433 + loff_t *ppos, atomic_long_unchecked_t *val)
78434 {
78435 char tmpbuf[TMPBUFLEN];
78436 ssize_t len;
78437
78438 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
78439 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
78440 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
78441 }
78442
78443 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
78444 index 55a6271..ad829c3 100644
78445 --- a/security/integrity/ima/ima_queue.c
78446 +++ b/security/integrity/ima/ima_queue.c
78447 @@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
78448 INIT_LIST_HEAD(&qe->later);
78449 list_add_tail_rcu(&qe->later, &ima_measurements);
78450
78451 - atomic_long_inc(&ima_htable.len);
78452 + atomic_long_inc_unchecked(&ima_htable.len);
78453 key = ima_hash_key(entry->digest);
78454 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
78455 return 0;
78456 diff --git a/security/keys/compat.c b/security/keys/compat.c
78457 index 4c48e13..7abdac9 100644
78458 --- a/security/keys/compat.c
78459 +++ b/security/keys/compat.c
78460 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
78461 if (ret == 0)
78462 goto no_payload_free;
78463
78464 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
78465 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
78466
78467 if (iov != iovstack)
78468 kfree(iov);
78469 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
78470 index 0b3f5d7..892c8a6 100644
78471 --- a/security/keys/keyctl.c
78472 +++ b/security/keys/keyctl.c
78473 @@ -921,7 +921,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
78474 /*
78475 * Copy the iovec data from userspace
78476 */
78477 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
78478 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
78479 unsigned ioc)
78480 {
78481 for (; ioc > 0; ioc--) {
78482 @@ -943,7 +943,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
78483 * If successful, 0 will be returned.
78484 */
78485 long keyctl_instantiate_key_common(key_serial_t id,
78486 - const struct iovec *payload_iov,
78487 + const struct iovec __user *payload_iov,
78488 unsigned ioc,
78489 size_t plen,
78490 key_serial_t ringid)
78491 @@ -1038,7 +1038,7 @@ long keyctl_instantiate_key(key_serial_t id,
78492 [0].iov_len = plen
78493 };
78494
78495 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
78496 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
78497 }
78498
78499 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
78500 @@ -1071,7 +1071,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
78501 if (ret == 0)
78502 goto no_payload_free;
78503
78504 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
78505 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
78506
78507 if (iov != iovstack)
78508 kfree(iov);
78509 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
78510 index 37a7f3b..86dc19f 100644
78511 --- a/security/keys/keyring.c
78512 +++ b/security/keys/keyring.c
78513 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
78514 ret = -EFAULT;
78515
78516 for (loop = 0; loop < klist->nkeys; loop++) {
78517 + key_serial_t serial;
78518 key = klist->keys[loop];
78519 + serial = key->serial;
78520
78521 tmp = sizeof(key_serial_t);
78522 if (tmp > buflen)
78523 tmp = buflen;
78524
78525 - if (copy_to_user(buffer,
78526 - &key->serial,
78527 - tmp) != 0)
78528 + if (copy_to_user(buffer, &serial, tmp))
78529 goto error;
78530
78531 buflen -= tmp;
78532 diff --git a/security/lsm_audit.c b/security/lsm_audit.c
78533 index 893af8a..ba9237c 100644
78534 --- a/security/lsm_audit.c
78535 +++ b/security/lsm_audit.c
78536 @@ -234,10 +234,11 @@ static void dump_common_audit_data(struct audit_buffer *ab,
78537 audit_log_d_path(ab, "path=", &a->u.path);
78538
78539 inode = a->u.path.dentry->d_inode;
78540 - if (inode)
78541 - audit_log_format(ab, " dev=%s ino=%lu",
78542 - inode->i_sb->s_id,
78543 - inode->i_ino);
78544 + if (inode) {
78545 + audit_log_format(ab, " dev=");
78546 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
78547 + audit_log_format(ab, " ino=%lu", inode->i_ino);
78548 + }
78549 break;
78550 }
78551 case LSM_AUDIT_DATA_DENTRY: {
78552 @@ -247,10 +248,11 @@ static void dump_common_audit_data(struct audit_buffer *ab,
78553 audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
78554
78555 inode = a->u.dentry->d_inode;
78556 - if (inode)
78557 - audit_log_format(ab, " dev=%s ino=%lu",
78558 - inode->i_sb->s_id,
78559 - inode->i_ino);
78560 + if (inode) {
78561 + audit_log_format(ab, " dev=");
78562 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
78563 + audit_log_format(ab, " ino=%lu", inode->i_ino);
78564 + }
78565 break;
78566 }
78567 case LSM_AUDIT_DATA_INODE: {
78568 @@ -265,8 +267,9 @@ static void dump_common_audit_data(struct audit_buffer *ab,
78569 dentry->d_name.name);
78570 dput(dentry);
78571 }
78572 - audit_log_format(ab, " dev=%s ino=%lu", inode->i_sb->s_id,
78573 - inode->i_ino);
78574 + audit_log_format(ab, " dev=");
78575 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
78576 + audit_log_format(ab, " ino=%lu", inode->i_ino);
78577 break;
78578 }
78579 case LSM_AUDIT_DATA_TASK:
78580 diff --git a/security/min_addr.c b/security/min_addr.c
78581 index f728728..6457a0c 100644
78582 --- a/security/min_addr.c
78583 +++ b/security/min_addr.c
78584 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
78585 */
78586 static void update_mmap_min_addr(void)
78587 {
78588 +#ifndef SPARC
78589 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
78590 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
78591 mmap_min_addr = dac_mmap_min_addr;
78592 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
78593 #else
78594 mmap_min_addr = dac_mmap_min_addr;
78595 #endif
78596 +#endif
78597 }
78598
78599 /*
78600 diff --git a/security/security.c b/security/security.c
78601 index e2f684a..8d62ef5 100644
78602 --- a/security/security.c
78603 +++ b/security/security.c
78604 @@ -26,8 +26,8 @@
78605 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
78606 CONFIG_DEFAULT_SECURITY;
78607
78608 -static struct security_operations *security_ops;
78609 -static struct security_operations default_security_ops = {
78610 +static struct security_operations *security_ops __read_only;
78611 +static struct security_operations default_security_ops __read_only = {
78612 .name = "default",
78613 };
78614
78615 @@ -68,7 +68,9 @@ int __init security_init(void)
78616
78617 void reset_security_ops(void)
78618 {
78619 + pax_open_kernel();
78620 security_ops = &default_security_ops;
78621 + pax_close_kernel();
78622 }
78623
78624 /* Save user chosen LSM */
78625 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
78626 index 1126c10..effb32b 100644
78627 --- a/security/selinux/hooks.c
78628 +++ b/security/selinux/hooks.c
78629 @@ -94,8 +94,6 @@
78630
78631 #define NUM_SEL_MNT_OPTS 5
78632
78633 -extern struct security_operations *security_ops;
78634 -
78635 /* SECMARK reference count */
78636 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
78637
78638 @@ -5449,7 +5447,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
78639
78640 #endif
78641
78642 -static struct security_operations selinux_ops = {
78643 +static struct security_operations selinux_ops __read_only = {
78644 .name = "selinux",
78645
78646 .ptrace_access_check = selinux_ptrace_access_check,
78647 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
78648 index b43813c..74be837 100644
78649 --- a/security/selinux/include/xfrm.h
78650 +++ b/security/selinux/include/xfrm.h
78651 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
78652
78653 static inline void selinux_xfrm_notify_policyload(void)
78654 {
78655 - atomic_inc(&flow_cache_genid);
78656 + atomic_inc_unchecked(&flow_cache_genid);
78657 }
78658 #else
78659 static inline int selinux_xfrm_enabled(void)
78660 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
78661 index 7db62b4..ee4d949 100644
78662 --- a/security/smack/smack_lsm.c
78663 +++ b/security/smack/smack_lsm.c
78664 @@ -3481,7 +3481,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
78665 return 0;
78666 }
78667
78668 -struct security_operations smack_ops = {
78669 +struct security_operations smack_ops __read_only = {
78670 .name = "smack",
78671
78672 .ptrace_access_check = smack_ptrace_access_check,
78673 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
78674 index 4b327b6..646c57a 100644
78675 --- a/security/tomoyo/tomoyo.c
78676 +++ b/security/tomoyo/tomoyo.c
78677 @@ -504,7 +504,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
78678 * tomoyo_security_ops is a "struct security_operations" which is used for
78679 * registering TOMOYO.
78680 */
78681 -static struct security_operations tomoyo_security_ops = {
78682 +static struct security_operations tomoyo_security_ops __read_only = {
78683 .name = "tomoyo",
78684 .cred_alloc_blank = tomoyo_cred_alloc_blank,
78685 .cred_prepare = tomoyo_cred_prepare,
78686 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
78687 index 762af68..7103453 100644
78688 --- a/sound/aoa/codecs/onyx.c
78689 +++ b/sound/aoa/codecs/onyx.c
78690 @@ -54,7 +54,7 @@ struct onyx {
78691 spdif_locked:1,
78692 analog_locked:1,
78693 original_mute:2;
78694 - int open_count;
78695 + local_t open_count;
78696 struct codec_info *codec_info;
78697
78698 /* mutex serializes concurrent access to the device
78699 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
78700 struct onyx *onyx = cii->codec_data;
78701
78702 mutex_lock(&onyx->mutex);
78703 - onyx->open_count++;
78704 + local_inc(&onyx->open_count);
78705 mutex_unlock(&onyx->mutex);
78706
78707 return 0;
78708 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
78709 struct onyx *onyx = cii->codec_data;
78710
78711 mutex_lock(&onyx->mutex);
78712 - onyx->open_count--;
78713 - if (!onyx->open_count)
78714 + if (local_dec_and_test(&onyx->open_count))
78715 onyx->spdif_locked = onyx->analog_locked = 0;
78716 mutex_unlock(&onyx->mutex);
78717
78718 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
78719 index ffd2025..df062c9 100644
78720 --- a/sound/aoa/codecs/onyx.h
78721 +++ b/sound/aoa/codecs/onyx.h
78722 @@ -11,6 +11,7 @@
78723 #include <linux/i2c.h>
78724 #include <asm/pmac_low_i2c.h>
78725 #include <asm/prom.h>
78726 +#include <asm/local.h>
78727
78728 /* PCM3052 register definitions */
78729
78730 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
78731 index 3cc4b86..af0a951 100644
78732 --- a/sound/core/oss/pcm_oss.c
78733 +++ b/sound/core/oss/pcm_oss.c
78734 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
78735 if (in_kernel) {
78736 mm_segment_t fs;
78737 fs = snd_enter_user();
78738 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
78739 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
78740 snd_leave_user(fs);
78741 } else {
78742 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
78743 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
78744 }
78745 if (ret != -EPIPE && ret != -ESTRPIPE)
78746 break;
78747 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
78748 if (in_kernel) {
78749 mm_segment_t fs;
78750 fs = snd_enter_user();
78751 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
78752 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
78753 snd_leave_user(fs);
78754 } else {
78755 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
78756 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
78757 }
78758 if (ret == -EPIPE) {
78759 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
78760 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
78761 struct snd_pcm_plugin_channel *channels;
78762 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
78763 if (!in_kernel) {
78764 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
78765 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
78766 return -EFAULT;
78767 buf = runtime->oss.buffer;
78768 }
78769 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
78770 }
78771 } else {
78772 tmp = snd_pcm_oss_write2(substream,
78773 - (const char __force *)buf,
78774 + (const char __force_kernel *)buf,
78775 runtime->oss.period_bytes, 0);
78776 if (tmp <= 0)
78777 goto err;
78778 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
78779 struct snd_pcm_runtime *runtime = substream->runtime;
78780 snd_pcm_sframes_t frames, frames1;
78781 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
78782 - char __user *final_dst = (char __force __user *)buf;
78783 + char __user *final_dst = (char __force_user *)buf;
78784 if (runtime->oss.plugin_first) {
78785 struct snd_pcm_plugin_channel *channels;
78786 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
78787 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
78788 xfer += tmp;
78789 runtime->oss.buffer_used -= tmp;
78790 } else {
78791 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
78792 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
78793 runtime->oss.period_bytes, 0);
78794 if (tmp <= 0)
78795 goto err;
78796 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
78797 size1);
78798 size1 /= runtime->channels; /* frames */
78799 fs = snd_enter_user();
78800 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
78801 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
78802 snd_leave_user(fs);
78803 }
78804 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
78805 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
78806 index 91cdf94..4085161 100644
78807 --- a/sound/core/pcm_compat.c
78808 +++ b/sound/core/pcm_compat.c
78809 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
78810 int err;
78811
78812 fs = snd_enter_user();
78813 - err = snd_pcm_delay(substream, &delay);
78814 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
78815 snd_leave_user(fs);
78816 if (err < 0)
78817 return err;
78818 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
78819 index 25ed9fe..24c46e9 100644
78820 --- a/sound/core/pcm_native.c
78821 +++ b/sound/core/pcm_native.c
78822 @@ -2765,11 +2765,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
78823 switch (substream->stream) {
78824 case SNDRV_PCM_STREAM_PLAYBACK:
78825 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
78826 - (void __user *)arg);
78827 + (void __force_user *)arg);
78828 break;
78829 case SNDRV_PCM_STREAM_CAPTURE:
78830 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
78831 - (void __user *)arg);
78832 + (void __force_user *)arg);
78833 break;
78834 default:
78835 result = -EINVAL;
78836 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
78837 index 5cf8d65..912a79c 100644
78838 --- a/sound/core/seq/seq_device.c
78839 +++ b/sound/core/seq/seq_device.c
78840 @@ -64,7 +64,7 @@ struct ops_list {
78841 int argsize; /* argument size */
78842
78843 /* operators */
78844 - struct snd_seq_dev_ops ops;
78845 + struct snd_seq_dev_ops *ops;
78846
78847 /* registred devices */
78848 struct list_head dev_list; /* list of devices */
78849 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
78850
78851 mutex_lock(&ops->reg_mutex);
78852 /* copy driver operators */
78853 - ops->ops = *entry;
78854 + ops->ops = entry;
78855 ops->driver |= DRIVER_LOADED;
78856 ops->argsize = argsize;
78857
78858 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
78859 dev->name, ops->id, ops->argsize, dev->argsize);
78860 return -EINVAL;
78861 }
78862 - if (ops->ops.init_device(dev) >= 0) {
78863 + if (ops->ops->init_device(dev) >= 0) {
78864 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
78865 ops->num_init_devices++;
78866 } else {
78867 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
78868 dev->name, ops->id, ops->argsize, dev->argsize);
78869 return -EINVAL;
78870 }
78871 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
78872 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
78873 dev->status = SNDRV_SEQ_DEVICE_FREE;
78874 dev->driver_data = NULL;
78875 ops->num_init_devices--;
78876 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
78877 index f24bf9a..1f7b67c 100644
78878 --- a/sound/drivers/mts64.c
78879 +++ b/sound/drivers/mts64.c
78880 @@ -29,6 +29,7 @@
78881 #include <sound/initval.h>
78882 #include <sound/rawmidi.h>
78883 #include <sound/control.h>
78884 +#include <asm/local.h>
78885
78886 #define CARD_NAME "Miditerminal 4140"
78887 #define DRIVER_NAME "MTS64"
78888 @@ -67,7 +68,7 @@ struct mts64 {
78889 struct pardevice *pardev;
78890 int pardev_claimed;
78891
78892 - int open_count;
78893 + local_t open_count;
78894 int current_midi_output_port;
78895 int current_midi_input_port;
78896 u8 mode[MTS64_NUM_INPUT_PORTS];
78897 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
78898 {
78899 struct mts64 *mts = substream->rmidi->private_data;
78900
78901 - if (mts->open_count == 0) {
78902 + if (local_read(&mts->open_count) == 0) {
78903 /* We don't need a spinlock here, because this is just called
78904 if the device has not been opened before.
78905 So there aren't any IRQs from the device */
78906 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
78907
78908 msleep(50);
78909 }
78910 - ++(mts->open_count);
78911 + local_inc(&mts->open_count);
78912
78913 return 0;
78914 }
78915 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
78916 struct mts64 *mts = substream->rmidi->private_data;
78917 unsigned long flags;
78918
78919 - --(mts->open_count);
78920 - if (mts->open_count == 0) {
78921 + if (local_dec_return(&mts->open_count) == 0) {
78922 /* We need the spinlock_irqsave here because we can still
78923 have IRQs at this point */
78924 spin_lock_irqsave(&mts->lock, flags);
78925 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
78926
78927 msleep(500);
78928
78929 - } else if (mts->open_count < 0)
78930 - mts->open_count = 0;
78931 + } else if (local_read(&mts->open_count) < 0)
78932 + local_set(&mts->open_count, 0);
78933
78934 return 0;
78935 }
78936 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
78937 index b953fb4..1999c01 100644
78938 --- a/sound/drivers/opl4/opl4_lib.c
78939 +++ b/sound/drivers/opl4/opl4_lib.c
78940 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
78941 MODULE_DESCRIPTION("OPL4 driver");
78942 MODULE_LICENSE("GPL");
78943
78944 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
78945 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
78946 {
78947 int timeout = 10;
78948 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
78949 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
78950 index f664823..590c745 100644
78951 --- a/sound/drivers/portman2x4.c
78952 +++ b/sound/drivers/portman2x4.c
78953 @@ -48,6 +48,7 @@
78954 #include <sound/initval.h>
78955 #include <sound/rawmidi.h>
78956 #include <sound/control.h>
78957 +#include <asm/local.h>
78958
78959 #define CARD_NAME "Portman 2x4"
78960 #define DRIVER_NAME "portman"
78961 @@ -85,7 +86,7 @@ struct portman {
78962 struct pardevice *pardev;
78963 int pardev_claimed;
78964
78965 - int open_count;
78966 + local_t open_count;
78967 int mode[PORTMAN_NUM_INPUT_PORTS];
78968 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
78969 };
78970 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
78971 index 87657dd..a8268d4 100644
78972 --- a/sound/firewire/amdtp.c
78973 +++ b/sound/firewire/amdtp.c
78974 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
78975 ptr = s->pcm_buffer_pointer + data_blocks;
78976 if (ptr >= pcm->runtime->buffer_size)
78977 ptr -= pcm->runtime->buffer_size;
78978 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
78979 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
78980
78981 s->pcm_period_pointer += data_blocks;
78982 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
78983 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
78984 */
78985 void amdtp_out_stream_update(struct amdtp_out_stream *s)
78986 {
78987 - ACCESS_ONCE(s->source_node_id_field) =
78988 + ACCESS_ONCE_RW(s->source_node_id_field) =
78989 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
78990 }
78991 EXPORT_SYMBOL(amdtp_out_stream_update);
78992 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
78993 index 537a9cb..8e8c8e9 100644
78994 --- a/sound/firewire/amdtp.h
78995 +++ b/sound/firewire/amdtp.h
78996 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
78997 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
78998 struct snd_pcm_substream *pcm)
78999 {
79000 - ACCESS_ONCE(s->pcm) = pcm;
79001 + ACCESS_ONCE_RW(s->pcm) = pcm;
79002 }
79003
79004 /**
79005 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
79006 index cd094ec..eca1277 100644
79007 --- a/sound/firewire/isight.c
79008 +++ b/sound/firewire/isight.c
79009 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
79010 ptr += count;
79011 if (ptr >= runtime->buffer_size)
79012 ptr -= runtime->buffer_size;
79013 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
79014 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
79015
79016 isight->period_counter += count;
79017 if (isight->period_counter >= runtime->period_size) {
79018 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
79019 if (err < 0)
79020 return err;
79021
79022 - ACCESS_ONCE(isight->pcm_active) = true;
79023 + ACCESS_ONCE_RW(isight->pcm_active) = true;
79024
79025 return 0;
79026 }
79027 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
79028 {
79029 struct isight *isight = substream->private_data;
79030
79031 - ACCESS_ONCE(isight->pcm_active) = false;
79032 + ACCESS_ONCE_RW(isight->pcm_active) = false;
79033
79034 mutex_lock(&isight->mutex);
79035 isight_stop_streaming(isight);
79036 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
79037
79038 switch (cmd) {
79039 case SNDRV_PCM_TRIGGER_START:
79040 - ACCESS_ONCE(isight->pcm_running) = true;
79041 + ACCESS_ONCE_RW(isight->pcm_running) = true;
79042 break;
79043 case SNDRV_PCM_TRIGGER_STOP:
79044 - ACCESS_ONCE(isight->pcm_running) = false;
79045 + ACCESS_ONCE_RW(isight->pcm_running) = false;
79046 break;
79047 default:
79048 return -EINVAL;
79049 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
79050 index c94578d..0794ac1 100644
79051 --- a/sound/isa/cmi8330.c
79052 +++ b/sound/isa/cmi8330.c
79053 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
79054
79055 struct snd_pcm *pcm;
79056 struct snd_cmi8330_stream {
79057 - struct snd_pcm_ops ops;
79058 + snd_pcm_ops_no_const ops;
79059 snd_pcm_open_callback_t open;
79060 void *private_data; /* sb or wss */
79061 } streams[2];
79062 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
79063 index 733b014..56ce96f 100644
79064 --- a/sound/oss/sb_audio.c
79065 +++ b/sound/oss/sb_audio.c
79066 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
79067 buf16 = (signed short *)(localbuf + localoffs);
79068 while (c)
79069 {
79070 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
79071 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
79072 if (copy_from_user(lbuf8,
79073 userbuf+useroffs + p,
79074 locallen))
79075 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
79076 index 09d4648..cf234c7 100644
79077 --- a/sound/oss/swarm_cs4297a.c
79078 +++ b/sound/oss/swarm_cs4297a.c
79079 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
79080 {
79081 struct cs4297a_state *s;
79082 u32 pwr, id;
79083 - mm_segment_t fs;
79084 int rval;
79085 #ifndef CONFIG_BCM_CS4297A_CSWARM
79086 u64 cfg;
79087 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
79088 if (!rval) {
79089 char *sb1250_duart_present;
79090
79091 +#if 0
79092 + mm_segment_t fs;
79093 fs = get_fs();
79094 set_fs(KERNEL_DS);
79095 -#if 0
79096 val = SOUND_MASK_LINE;
79097 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
79098 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
79099 val = initvol[i].vol;
79100 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
79101 }
79102 + set_fs(fs);
79103 // cs4297a_write_ac97(s, 0x18, 0x0808);
79104 #else
79105 // cs4297a_write_ac97(s, 0x5e, 0x180);
79106 cs4297a_write_ac97(s, 0x02, 0x0808);
79107 cs4297a_write_ac97(s, 0x18, 0x0808);
79108 #endif
79109 - set_fs(fs);
79110
79111 list_add(&s->list, &cs4297a_devs);
79112
79113 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
79114 index 71f6744..d8aeae7 100644
79115 --- a/sound/pci/hda/hda_codec.h
79116 +++ b/sound/pci/hda/hda_codec.h
79117 @@ -614,7 +614,7 @@ struct hda_bus_ops {
79118 /* notify power-up/down from codec to controller */
79119 void (*pm_notify)(struct hda_bus *bus);
79120 #endif
79121 -};
79122 +} __no_const;
79123
79124 /* template to pass to the bus constructor */
79125 struct hda_bus_template {
79126 @@ -716,6 +716,7 @@ struct hda_codec_ops {
79127 #endif
79128 void (*reboot_notify)(struct hda_codec *codec);
79129 };
79130 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
79131
79132 /* record for amp information cache */
79133 struct hda_cache_head {
79134 @@ -746,7 +747,7 @@ struct hda_pcm_ops {
79135 struct snd_pcm_substream *substream);
79136 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
79137 struct snd_pcm_substream *substream);
79138 -};
79139 +} __no_const;
79140
79141 /* PCM information for each substream */
79142 struct hda_pcm_stream {
79143 @@ -804,7 +805,7 @@ struct hda_codec {
79144 const char *modelname; /* model name for preset */
79145
79146 /* set by patch */
79147 - struct hda_codec_ops patch_ops;
79148 + hda_codec_ops_no_const patch_ops;
79149
79150 /* PCM to create, set by patch_ops.build_pcms callback */
79151 unsigned int num_pcms;
79152 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
79153 index 0da778a..bc38b84 100644
79154 --- a/sound/pci/ice1712/ice1712.h
79155 +++ b/sound/pci/ice1712/ice1712.h
79156 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
79157 unsigned int mask_flags; /* total mask bits */
79158 struct snd_akm4xxx_ops {
79159 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
79160 - } ops;
79161 + } __no_const ops;
79162 };
79163
79164 struct snd_ice1712_spdif {
79165 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
79166 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
79167 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
79168 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
79169 - } ops;
79170 + } __no_const ops;
79171 };
79172
79173
79174 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
79175 index 03ee4e3..be86b46 100644
79176 --- a/sound/pci/ymfpci/ymfpci_main.c
79177 +++ b/sound/pci/ymfpci/ymfpci_main.c
79178 @@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
79179 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
79180 break;
79181 }
79182 - if (atomic_read(&chip->interrupt_sleep_count)) {
79183 - atomic_set(&chip->interrupt_sleep_count, 0);
79184 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
79185 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
79186 wake_up(&chip->interrupt_sleep);
79187 }
79188 __end:
79189 @@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
79190 continue;
79191 init_waitqueue_entry(&wait, current);
79192 add_wait_queue(&chip->interrupt_sleep, &wait);
79193 - atomic_inc(&chip->interrupt_sleep_count);
79194 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
79195 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
79196 remove_wait_queue(&chip->interrupt_sleep, &wait);
79197 }
79198 @@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
79199 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
79200 spin_unlock(&chip->reg_lock);
79201
79202 - if (atomic_read(&chip->interrupt_sleep_count)) {
79203 - atomic_set(&chip->interrupt_sleep_count, 0);
79204 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
79205 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
79206 wake_up(&chip->interrupt_sleep);
79207 }
79208 }
79209 @@ -2382,7 +2382,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
79210 spin_lock_init(&chip->reg_lock);
79211 spin_lock_init(&chip->voice_lock);
79212 init_waitqueue_head(&chip->interrupt_sleep);
79213 - atomic_set(&chip->interrupt_sleep_count, 0);
79214 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
79215 chip->card = card;
79216 chip->pci = pci;
79217 chip->irq = -1;
79218 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
79219 index ee15337..e2187a6 100644
79220 --- a/sound/soc/soc-pcm.c
79221 +++ b/sound/soc/soc-pcm.c
79222 @@ -583,7 +583,7 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
79223 }
79224
79225 /* ASoC PCM operations */
79226 -static struct snd_pcm_ops soc_pcm_ops = {
79227 +static snd_pcm_ops_no_const soc_pcm_ops = {
79228 .open = soc_pcm_open,
79229 .close = soc_pcm_close,
79230 .hw_params = soc_pcm_hw_params,
79231 diff --git a/sound/usb/card.h b/sound/usb/card.h
79232 index a39edcc..1014050 100644
79233 --- a/sound/usb/card.h
79234 +++ b/sound/usb/card.h
79235 @@ -44,6 +44,7 @@ struct snd_urb_ops {
79236 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
79237 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
79238 };
79239 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
79240
79241 struct snd_usb_substream {
79242 struct snd_usb_stream *stream;
79243 @@ -93,7 +94,7 @@ struct snd_usb_substream {
79244 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
79245 spinlock_t lock;
79246
79247 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
79248 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
79249 int last_frame_number; /* stored frame number */
79250 int last_delay; /* stored delay */
79251 };
79252 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
79253 new file mode 100644
79254 index 0000000..894c8bf
79255 --- /dev/null
79256 +++ b/tools/gcc/Makefile
79257 @@ -0,0 +1,23 @@
79258 +#CC := gcc
79259 +#PLUGIN_SOURCE_FILES := pax_plugin.c
79260 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
79261 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
79262 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
79263 +
79264 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
79265 +
79266 +hostlibs-y := constify_plugin.so
79267 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
79268 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
79269 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
79270 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
79271 +hostlibs-y += colorize_plugin.so
79272 +
79273 +always := $(hostlibs-y)
79274 +
79275 +constify_plugin-objs := constify_plugin.o
79276 +stackleak_plugin-objs := stackleak_plugin.o
79277 +kallocstat_plugin-objs := kallocstat_plugin.o
79278 +kernexec_plugin-objs := kernexec_plugin.o
79279 +checker_plugin-objs := checker_plugin.o
79280 +colorize_plugin-objs := colorize_plugin.o
79281 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
79282 new file mode 100644
79283 index 0000000..d41b5af
79284 --- /dev/null
79285 +++ b/tools/gcc/checker_plugin.c
79286 @@ -0,0 +1,171 @@
79287 +/*
79288 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
79289 + * Licensed under the GPL v2
79290 + *
79291 + * Note: the choice of the license means that the compilation process is
79292 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
79293 + * but for the kernel it doesn't matter since it doesn't link against
79294 + * any of the gcc libraries
79295 + *
79296 + * gcc plugin to implement various sparse (source code checker) features
79297 + *
79298 + * TODO:
79299 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
79300 + *
79301 + * BUGS:
79302 + * - none known
79303 + */
79304 +#include "gcc-plugin.h"
79305 +#include "config.h"
79306 +#include "system.h"
79307 +#include "coretypes.h"
79308 +#include "tree.h"
79309 +#include "tree-pass.h"
79310 +#include "flags.h"
79311 +#include "intl.h"
79312 +#include "toplev.h"
79313 +#include "plugin.h"
79314 +//#include "expr.h" where are you...
79315 +#include "diagnostic.h"
79316 +#include "plugin-version.h"
79317 +#include "tm.h"
79318 +#include "function.h"
79319 +#include "basic-block.h"
79320 +#include "gimple.h"
79321 +#include "rtl.h"
79322 +#include "emit-rtl.h"
79323 +#include "tree-flow.h"
79324 +#include "target.h"
79325 +
79326 +extern void c_register_addr_space (const char *str, addr_space_t as);
79327 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
79328 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
79329 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
79330 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
79331 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
79332 +
79333 +extern void print_gimple_stmt(FILE *, gimple, int, int);
79334 +extern rtx emit_move_insn(rtx x, rtx y);
79335 +
79336 +int plugin_is_GPL_compatible;
79337 +
79338 +static struct plugin_info checker_plugin_info = {
79339 + .version = "201111150100",
79340 +};
79341 +
79342 +#define ADDR_SPACE_KERNEL 0
79343 +#define ADDR_SPACE_FORCE_KERNEL 1
79344 +#define ADDR_SPACE_USER 2
79345 +#define ADDR_SPACE_FORCE_USER 3
79346 +#define ADDR_SPACE_IOMEM 0
79347 +#define ADDR_SPACE_FORCE_IOMEM 0
79348 +#define ADDR_SPACE_PERCPU 0
79349 +#define ADDR_SPACE_FORCE_PERCPU 0
79350 +#define ADDR_SPACE_RCU 0
79351 +#define ADDR_SPACE_FORCE_RCU 0
79352 +
79353 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
79354 +{
79355 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
79356 +}
79357 +
79358 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
79359 +{
79360 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
79361 +}
79362 +
79363 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
79364 +{
79365 + return default_addr_space_valid_pointer_mode(mode, as);
79366 +}
79367 +
79368 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
79369 +{
79370 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
79371 +}
79372 +
79373 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
79374 +{
79375 + return default_addr_space_legitimize_address(x, oldx, mode, as);
79376 +}
79377 +
79378 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
79379 +{
79380 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
79381 + return true;
79382 +
79383 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
79384 + return true;
79385 +
79386 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
79387 + return true;
79388 +
79389 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
79390 + return true;
79391 +
79392 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
79393 + return true;
79394 +
79395 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
79396 + return true;
79397 +
79398 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
79399 + return true;
79400 +
79401 + return subset == superset;
79402 +}
79403 +
79404 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
79405 +{
79406 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
79407 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
79408 +
79409 + return op;
79410 +}
79411 +
79412 +static void register_checker_address_spaces(void *event_data, void *data)
79413 +{
79414 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
79415 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
79416 + c_register_addr_space("__user", ADDR_SPACE_USER);
79417 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
79418 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
79419 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
79420 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
79421 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
79422 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
79423 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
79424 +
79425 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
79426 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
79427 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
79428 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
79429 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
79430 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
79431 + targetm.addr_space.convert = checker_addr_space_convert;
79432 +}
79433 +
79434 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79435 +{
79436 + const char * const plugin_name = plugin_info->base_name;
79437 + const int argc = plugin_info->argc;
79438 + const struct plugin_argument * const argv = plugin_info->argv;
79439 + int i;
79440 +
79441 + if (!plugin_default_version_check(version, &gcc_version)) {
79442 + error(G_("incompatible gcc/plugin versions"));
79443 + return 1;
79444 + }
79445 +
79446 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
79447 +
79448 + for (i = 0; i < argc; ++i)
79449 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
79450 +
79451 + if (TARGET_64BIT == 0)
79452 + return 0;
79453 +
79454 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
79455 +
79456 + return 0;
79457 +}
79458 diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
79459 new file mode 100644
79460 index 0000000..ee950d0
79461 --- /dev/null
79462 +++ b/tools/gcc/colorize_plugin.c
79463 @@ -0,0 +1,147 @@
79464 +/*
79465 + * Copyright 2012 by PaX Team <pageexec@freemail.hu>
79466 + * Licensed under the GPL v2
79467 + *
79468 + * Note: the choice of the license means that the compilation process is
79469 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
79470 + * but for the kernel it doesn't matter since it doesn't link against
79471 + * any of the gcc libraries
79472 + *
79473 + * gcc plugin to colorize diagnostic output
79474 + *
79475 + */
79476 +
79477 +#include "gcc-plugin.h"
79478 +#include "config.h"
79479 +#include "system.h"
79480 +#include "coretypes.h"
79481 +#include "tree.h"
79482 +#include "tree-pass.h"
79483 +#include "flags.h"
79484 +#include "intl.h"
79485 +#include "toplev.h"
79486 +#include "plugin.h"
79487 +#include "diagnostic.h"
79488 +#include "plugin-version.h"
79489 +#include "tm.h"
79490 +
79491 +int plugin_is_GPL_compatible;
79492 +
79493 +static struct plugin_info colorize_plugin_info = {
79494 + .version = "201203092200",
79495 +};
79496 +
79497 +#define GREEN "\033[32m\033[2m"
79498 +#define LIGHTGREEN "\033[32m\033[1m"
79499 +#define YELLOW "\033[33m\033[2m"
79500 +#define LIGHTYELLOW "\033[33m\033[1m"
79501 +#define RED "\033[31m\033[2m"
79502 +#define LIGHTRED "\033[31m\033[1m"
79503 +#define BLUE "\033[34m\033[2m"
79504 +#define LIGHTBLUE "\033[34m\033[1m"
79505 +#define BRIGHT "\033[m\033[1m"
79506 +#define NORMAL "\033[m"
79507 +
79508 +static diagnostic_starter_fn old_starter;
79509 +static diagnostic_finalizer_fn old_finalizer;
79510 +
79511 +static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
79512 +{
79513 + const char *color;
79514 + char *newprefix;
79515 +
79516 + switch (diagnostic->kind) {
79517 + case DK_NOTE:
79518 + color = LIGHTBLUE;
79519 + break;
79520 +
79521 + case DK_PEDWARN:
79522 + case DK_WARNING:
79523 + color = LIGHTYELLOW;
79524 + break;
79525 +
79526 + case DK_ERROR:
79527 + case DK_FATAL:
79528 + case DK_ICE:
79529 + case DK_PERMERROR:
79530 + case DK_SORRY:
79531 + color = LIGHTRED;
79532 + break;
79533 +
79534 + default:
79535 + color = NORMAL;
79536 + }
79537 +
79538 + old_starter(context, diagnostic);
79539 + if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
79540 + return;
79541 + pp_destroy_prefix(context->printer);
79542 + pp_set_prefix(context->printer, newprefix);
79543 +}
79544 +
79545 +static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
79546 +{
79547 + old_finalizer(context, diagnostic);
79548 +}
79549 +
79550 +static void colorize_arm(void)
79551 +{
79552 + old_starter = diagnostic_starter(global_dc);
79553 + old_finalizer = diagnostic_finalizer(global_dc);
79554 +
79555 + diagnostic_starter(global_dc) = start_colorize;
79556 + diagnostic_finalizer(global_dc) = finalize_colorize;
79557 +}
79558 +
79559 +static unsigned int execute_colorize_rearm(void)
79560 +{
79561 + if (diagnostic_starter(global_dc) == start_colorize)
79562 + return 0;
79563 +
79564 + colorize_arm();
79565 + return 0;
79566 +}
79567 +
79568 +struct simple_ipa_opt_pass pass_ipa_colorize_rearm = {
79569 + .pass = {
79570 + .type = SIMPLE_IPA_PASS,
79571 + .name = "colorize_rearm",
79572 + .gate = NULL,
79573 + .execute = execute_colorize_rearm,
79574 + .sub = NULL,
79575 + .next = NULL,
79576 + .static_pass_number = 0,
79577 + .tv_id = TV_NONE,
79578 + .properties_required = 0,
79579 + .properties_provided = 0,
79580 + .properties_destroyed = 0,
79581 + .todo_flags_start = 0,
79582 + .todo_flags_finish = 0
79583 + }
79584 +};
79585 +
79586 +static void colorize_start_unit(void *gcc_data, void *user_data)
79587 +{
79588 + colorize_arm();
79589 +}
79590 +
79591 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79592 +{
79593 + const char * const plugin_name = plugin_info->base_name;
79594 + struct register_pass_info colorize_rearm_pass_info = {
79595 + .pass = &pass_ipa_colorize_rearm.pass,
79596 + .reference_pass_name = "*free_lang_data",
79597 + .ref_pass_instance_number = 0,
79598 + .pos_op = PASS_POS_INSERT_AFTER
79599 + };
79600 +
79601 + if (!plugin_default_version_check(version, &gcc_version)) {
79602 + error(G_("incompatible gcc/plugin versions"));
79603 + return 1;
79604 + }
79605 +
79606 + register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
79607 + register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
79608 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
79609 + return 0;
79610 +}
79611 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
79612 new file mode 100644
79613 index 0000000..704a564
79614 --- /dev/null
79615 +++ b/tools/gcc/constify_plugin.c
79616 @@ -0,0 +1,303 @@
79617 +/*
79618 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
79619 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
79620 + * Licensed under the GPL v2, or (at your option) v3
79621 + *
79622 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
79623 + *
79624 + * Homepage:
79625 + * http://www.grsecurity.net/~ephox/const_plugin/
79626 + *
79627 + * Usage:
79628 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
79629 + * $ gcc -fplugin=constify_plugin.so test.c -O2
79630 + */
79631 +
79632 +#include "gcc-plugin.h"
79633 +#include "config.h"
79634 +#include "system.h"
79635 +#include "coretypes.h"
79636 +#include "tree.h"
79637 +#include "tree-pass.h"
79638 +#include "flags.h"
79639 +#include "intl.h"
79640 +#include "toplev.h"
79641 +#include "plugin.h"
79642 +#include "diagnostic.h"
79643 +#include "plugin-version.h"
79644 +#include "tm.h"
79645 +#include "function.h"
79646 +#include "basic-block.h"
79647 +#include "gimple.h"
79648 +#include "rtl.h"
79649 +#include "emit-rtl.h"
79650 +#include "tree-flow.h"
79651 +
79652 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
79653 +
79654 +int plugin_is_GPL_compatible;
79655 +
79656 +static struct plugin_info const_plugin_info = {
79657 + .version = "201111150100",
79658 + .help = "no-constify\tturn off constification\n",
79659 +};
79660 +
79661 +static void constify_type(tree type);
79662 +static bool walk_struct(tree node);
79663 +
79664 +static tree deconstify_type(tree old_type)
79665 +{
79666 + tree new_type, field;
79667 +
79668 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
79669 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
79670 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
79671 + DECL_FIELD_CONTEXT(field) = new_type;
79672 + TYPE_READONLY(new_type) = 0;
79673 + C_TYPE_FIELDS_READONLY(new_type) = 0;
79674 + return new_type;
79675 +}
79676 +
79677 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
79678 +{
79679 + tree type;
79680 +
79681 + *no_add_attrs = true;
79682 + if (TREE_CODE(*node) == FUNCTION_DECL) {
79683 + error("%qE attribute does not apply to functions", name);
79684 + return NULL_TREE;
79685 + }
79686 +
79687 + if (TREE_CODE(*node) == VAR_DECL) {
79688 + error("%qE attribute does not apply to variables", name);
79689 + return NULL_TREE;
79690 + }
79691 +
79692 + if (TYPE_P(*node)) {
79693 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
79694 + *no_add_attrs = false;
79695 + else
79696 + error("%qE attribute applies to struct and union types only", name);
79697 + return NULL_TREE;
79698 + }
79699 +
79700 + type = TREE_TYPE(*node);
79701 +
79702 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
79703 + error("%qE attribute applies to struct and union types only", name);
79704 + return NULL_TREE;
79705 + }
79706 +
79707 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
79708 + error("%qE attribute is already applied to the type", name);
79709 + return NULL_TREE;
79710 + }
79711 +
79712 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
79713 + error("%qE attribute used on type that is not constified", name);
79714 + return NULL_TREE;
79715 + }
79716 +
79717 + if (TREE_CODE(*node) == TYPE_DECL) {
79718 + TREE_TYPE(*node) = deconstify_type(type);
79719 + TREE_READONLY(*node) = 0;
79720 + return NULL_TREE;
79721 + }
79722 +
79723 + return NULL_TREE;
79724 +}
79725 +
79726 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
79727 +{
79728 + *no_add_attrs = true;
79729 + if (!TYPE_P(*node)) {
79730 + error("%qE attribute applies to types only", name);
79731 + return NULL_TREE;
79732 + }
79733 +
79734 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
79735 + error("%qE attribute applies to struct and union types only", name);
79736 + return NULL_TREE;
79737 + }
79738 +
79739 + *no_add_attrs = false;
79740 + constify_type(*node);
79741 + return NULL_TREE;
79742 +}
79743 +
79744 +static struct attribute_spec no_const_attr = {
79745 + .name = "no_const",
79746 + .min_length = 0,
79747 + .max_length = 0,
79748 + .decl_required = false,
79749 + .type_required = false,
79750 + .function_type_required = false,
79751 + .handler = handle_no_const_attribute,
79752 +#if BUILDING_GCC_VERSION >= 4007
79753 + .affects_type_identity = true
79754 +#endif
79755 +};
79756 +
79757 +static struct attribute_spec do_const_attr = {
79758 + .name = "do_const",
79759 + .min_length = 0,
79760 + .max_length = 0,
79761 + .decl_required = false,
79762 + .type_required = false,
79763 + .function_type_required = false,
79764 + .handler = handle_do_const_attribute,
79765 +#if BUILDING_GCC_VERSION >= 4007
79766 + .affects_type_identity = true
79767 +#endif
79768 +};
79769 +
79770 +static void register_attributes(void *event_data, void *data)
79771 +{
79772 + register_attribute(&no_const_attr);
79773 + register_attribute(&do_const_attr);
79774 +}
79775 +
79776 +static void constify_type(tree type)
79777 +{
79778 + TYPE_READONLY(type) = 1;
79779 + C_TYPE_FIELDS_READONLY(type) = 1;
79780 +}
79781 +
79782 +static bool is_fptr(tree field)
79783 +{
79784 + tree ptr = TREE_TYPE(field);
79785 +
79786 + if (TREE_CODE(ptr) != POINTER_TYPE)
79787 + return false;
79788 +
79789 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
79790 +}
79791 +
79792 +static bool walk_struct(tree node)
79793 +{
79794 + tree field;
79795 +
79796 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
79797 + return false;
79798 +
79799 + if (TYPE_FIELDS(node) == NULL_TREE)
79800 + return false;
79801 +
79802 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
79803 + tree type = TREE_TYPE(field);
79804 + enum tree_code code = TREE_CODE(type);
79805 + if (code == RECORD_TYPE || code == UNION_TYPE) {
79806 + if (!(walk_struct(type)))
79807 + return false;
79808 + } else if (!is_fptr(field) && !TREE_READONLY(field))
79809 + return false;
79810 + }
79811 + return true;
79812 +}
79813 +
79814 +static void finish_type(void *event_data, void *data)
79815 +{
79816 + tree type = (tree)event_data;
79817 +
79818 + if (type == NULL_TREE)
79819 + return;
79820 +
79821 + if (TYPE_READONLY(type))
79822 + return;
79823 +
79824 + if (walk_struct(type))
79825 + constify_type(type);
79826 +}
79827 +
79828 +static unsigned int check_local_variables(void);
79829 +
79830 +struct gimple_opt_pass pass_local_variable = {
79831 + {
79832 + .type = GIMPLE_PASS,
79833 + .name = "check_local_variables",
79834 + .gate = NULL,
79835 + .execute = check_local_variables,
79836 + .sub = NULL,
79837 + .next = NULL,
79838 + .static_pass_number = 0,
79839 + .tv_id = TV_NONE,
79840 + .properties_required = 0,
79841 + .properties_provided = 0,
79842 + .properties_destroyed = 0,
79843 + .todo_flags_start = 0,
79844 + .todo_flags_finish = 0
79845 + }
79846 +};
79847 +
79848 +static unsigned int check_local_variables(void)
79849 +{
79850 + tree var;
79851 + referenced_var_iterator rvi;
79852 +
79853 +#if BUILDING_GCC_VERSION == 4005
79854 + FOR_EACH_REFERENCED_VAR(var, rvi) {
79855 +#else
79856 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
79857 +#endif
79858 + tree type = TREE_TYPE(var);
79859 +
79860 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
79861 + continue;
79862 +
79863 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
79864 + continue;
79865 +
79866 + if (!TYPE_READONLY(type))
79867 + continue;
79868 +
79869 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
79870 +// continue;
79871 +
79872 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
79873 +// continue;
79874 +
79875 + if (walk_struct(type)) {
79876 + error("constified variable %qE cannot be local", var);
79877 + return 1;
79878 + }
79879 + }
79880 + return 0;
79881 +}
79882 +
79883 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79884 +{
79885 + const char * const plugin_name = plugin_info->base_name;
79886 + const int argc = plugin_info->argc;
79887 + const struct plugin_argument * const argv = plugin_info->argv;
79888 + int i;
79889 + bool constify = true;
79890 +
79891 + struct register_pass_info local_variable_pass_info = {
79892 + .pass = &pass_local_variable.pass,
79893 + .reference_pass_name = "*referenced_vars",
79894 + .ref_pass_instance_number = 0,
79895 + .pos_op = PASS_POS_INSERT_AFTER
79896 + };
79897 +
79898 + if (!plugin_default_version_check(version, &gcc_version)) {
79899 + error(G_("incompatible gcc/plugin versions"));
79900 + return 1;
79901 + }
79902 +
79903 + for (i = 0; i < argc; ++i) {
79904 + if (!(strcmp(argv[i].key, "no-constify"))) {
79905 + constify = false;
79906 + continue;
79907 + }
79908 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
79909 + }
79910 +
79911 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
79912 + if (constify) {
79913 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
79914 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
79915 + }
79916 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
79917 +
79918 + return 0;
79919 +}
79920 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
79921 new file mode 100644
79922 index 0000000..a5eabce
79923 --- /dev/null
79924 +++ b/tools/gcc/kallocstat_plugin.c
79925 @@ -0,0 +1,167 @@
79926 +/*
79927 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
79928 + * Licensed under the GPL v2
79929 + *
79930 + * Note: the choice of the license means that the compilation process is
79931 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
79932 + * but for the kernel it doesn't matter since it doesn't link against
79933 + * any of the gcc libraries
79934 + *
79935 + * gcc plugin to find the distribution of k*alloc sizes
79936 + *
79937 + * TODO:
79938 + *
79939 + * BUGS:
79940 + * - none known
79941 + */
79942 +#include "gcc-plugin.h"
79943 +#include "config.h"
79944 +#include "system.h"
79945 +#include "coretypes.h"
79946 +#include "tree.h"
79947 +#include "tree-pass.h"
79948 +#include "flags.h"
79949 +#include "intl.h"
79950 +#include "toplev.h"
79951 +#include "plugin.h"
79952 +//#include "expr.h" where are you...
79953 +#include "diagnostic.h"
79954 +#include "plugin-version.h"
79955 +#include "tm.h"
79956 +#include "function.h"
79957 +#include "basic-block.h"
79958 +#include "gimple.h"
79959 +#include "rtl.h"
79960 +#include "emit-rtl.h"
79961 +
79962 +extern void print_gimple_stmt(FILE *, gimple, int, int);
79963 +
79964 +int plugin_is_GPL_compatible;
79965 +
79966 +static const char * const kalloc_functions[] = {
79967 + "__kmalloc",
79968 + "kmalloc",
79969 + "kmalloc_large",
79970 + "kmalloc_node",
79971 + "kmalloc_order",
79972 + "kmalloc_order_trace",
79973 + "kmalloc_slab",
79974 + "kzalloc",
79975 + "kzalloc_node",
79976 +};
79977 +
79978 +static struct plugin_info kallocstat_plugin_info = {
79979 + .version = "201111150100",
79980 +};
79981 +
79982 +static unsigned int execute_kallocstat(void);
79983 +
79984 +static struct gimple_opt_pass kallocstat_pass = {
79985 + .pass = {
79986 + .type = GIMPLE_PASS,
79987 + .name = "kallocstat",
79988 + .gate = NULL,
79989 + .execute = execute_kallocstat,
79990 + .sub = NULL,
79991 + .next = NULL,
79992 + .static_pass_number = 0,
79993 + .tv_id = TV_NONE,
79994 + .properties_required = 0,
79995 + .properties_provided = 0,
79996 + .properties_destroyed = 0,
79997 + .todo_flags_start = 0,
79998 + .todo_flags_finish = 0
79999 + }
80000 +};
80001 +
80002 +static bool is_kalloc(const char *fnname)
80003 +{
80004 + size_t i;
80005 +
80006 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
80007 + if (!strcmp(fnname, kalloc_functions[i]))
80008 + return true;
80009 + return false;
80010 +}
80011 +
80012 +static unsigned int execute_kallocstat(void)
80013 +{
80014 + basic_block bb;
80015 +
80016 + // 1. loop through BBs and GIMPLE statements
80017 + FOR_EACH_BB(bb) {
80018 + gimple_stmt_iterator gsi;
80019 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
80020 + // gimple match:
80021 + tree fndecl, size;
80022 + gimple call_stmt;
80023 + const char *fnname;
80024 +
80025 + // is it a call
80026 + call_stmt = gsi_stmt(gsi);
80027 + if (!is_gimple_call(call_stmt))
80028 + continue;
80029 + fndecl = gimple_call_fndecl(call_stmt);
80030 + if (fndecl == NULL_TREE)
80031 + continue;
80032 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
80033 + continue;
80034 +
80035 + // is it a call to k*alloc
80036 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
80037 + if (!is_kalloc(fnname))
80038 + continue;
80039 +
80040 + // is the size arg the result of a simple const assignment
80041 + size = gimple_call_arg(call_stmt, 0);
80042 + while (true) {
80043 + gimple def_stmt;
80044 + expanded_location xloc;
80045 + size_t size_val;
80046 +
80047 + if (TREE_CODE(size) != SSA_NAME)
80048 + break;
80049 + def_stmt = SSA_NAME_DEF_STMT(size);
80050 + if (!def_stmt || !is_gimple_assign(def_stmt))
80051 + break;
80052 + if (gimple_num_ops(def_stmt) != 2)
80053 + break;
80054 + size = gimple_assign_rhs1(def_stmt);
80055 + if (!TREE_CONSTANT(size))
80056 + continue;
80057 + xloc = expand_location(gimple_location(def_stmt));
80058 + if (!xloc.file)
80059 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
80060 + size_val = TREE_INT_CST_LOW(size);
80061 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
80062 + break;
80063 + }
80064 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
80065 +//debug_tree(gimple_call_fn(call_stmt));
80066 +//print_node(stderr, "pax", fndecl, 4);
80067 + }
80068 + }
80069 +
80070 + return 0;
80071 +}
80072 +
80073 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80074 +{
80075 + const char * const plugin_name = plugin_info->base_name;
80076 + struct register_pass_info kallocstat_pass_info = {
80077 + .pass = &kallocstat_pass.pass,
80078 + .reference_pass_name = "ssa",
80079 + .ref_pass_instance_number = 0,
80080 + .pos_op = PASS_POS_INSERT_AFTER
80081 + };
80082 +
80083 + if (!plugin_default_version_check(version, &gcc_version)) {
80084 + error(G_("incompatible gcc/plugin versions"));
80085 + return 1;
80086 + }
80087 +
80088 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
80089 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
80090 +
80091 + return 0;
80092 +}
80093 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
80094 new file mode 100644
80095 index 0000000..008f159
80096 --- /dev/null
80097 +++ b/tools/gcc/kernexec_plugin.c
80098 @@ -0,0 +1,427 @@
80099 +/*
80100 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
80101 + * Licensed under the GPL v2
80102 + *
80103 + * Note: the choice of the license means that the compilation process is
80104 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80105 + * but for the kernel it doesn't matter since it doesn't link against
80106 + * any of the gcc libraries
80107 + *
80108 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
80109 + *
80110 + * TODO:
80111 + *
80112 + * BUGS:
80113 + * - none known
80114 + */
80115 +#include "gcc-plugin.h"
80116 +#include "config.h"
80117 +#include "system.h"
80118 +#include "coretypes.h"
80119 +#include "tree.h"
80120 +#include "tree-pass.h"
80121 +#include "flags.h"
80122 +#include "intl.h"
80123 +#include "toplev.h"
80124 +#include "plugin.h"
80125 +//#include "expr.h" where are you...
80126 +#include "diagnostic.h"
80127 +#include "plugin-version.h"
80128 +#include "tm.h"
80129 +#include "function.h"
80130 +#include "basic-block.h"
80131 +#include "gimple.h"
80132 +#include "rtl.h"
80133 +#include "emit-rtl.h"
80134 +#include "tree-flow.h"
80135 +
80136 +extern void print_gimple_stmt(FILE *, gimple, int, int);
80137 +extern rtx emit_move_insn(rtx x, rtx y);
80138 +
80139 +int plugin_is_GPL_compatible;
80140 +
80141 +static struct plugin_info kernexec_plugin_info = {
80142 + .version = "201111291120",
80143 + .help = "method=[bts|or]\tinstrumentation method\n"
80144 +};
80145 +
80146 +static unsigned int execute_kernexec_reload(void);
80147 +static unsigned int execute_kernexec_fptr(void);
80148 +static unsigned int execute_kernexec_retaddr(void);
80149 +static bool kernexec_cmodel_check(void);
80150 +
80151 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
80152 +static void (*kernexec_instrument_retaddr)(rtx);
80153 +
80154 +static struct gimple_opt_pass kernexec_reload_pass = {
80155 + .pass = {
80156 + .type = GIMPLE_PASS,
80157 + .name = "kernexec_reload",
80158 + .gate = kernexec_cmodel_check,
80159 + .execute = execute_kernexec_reload,
80160 + .sub = NULL,
80161 + .next = NULL,
80162 + .static_pass_number = 0,
80163 + .tv_id = TV_NONE,
80164 + .properties_required = 0,
80165 + .properties_provided = 0,
80166 + .properties_destroyed = 0,
80167 + .todo_flags_start = 0,
80168 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
80169 + }
80170 +};
80171 +
80172 +static struct gimple_opt_pass kernexec_fptr_pass = {
80173 + .pass = {
80174 + .type = GIMPLE_PASS,
80175 + .name = "kernexec_fptr",
80176 + .gate = kernexec_cmodel_check,
80177 + .execute = execute_kernexec_fptr,
80178 + .sub = NULL,
80179 + .next = NULL,
80180 + .static_pass_number = 0,
80181 + .tv_id = TV_NONE,
80182 + .properties_required = 0,
80183 + .properties_provided = 0,
80184 + .properties_destroyed = 0,
80185 + .todo_flags_start = 0,
80186 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
80187 + }
80188 +};
80189 +
80190 +static struct rtl_opt_pass kernexec_retaddr_pass = {
80191 + .pass = {
80192 + .type = RTL_PASS,
80193 + .name = "kernexec_retaddr",
80194 + .gate = kernexec_cmodel_check,
80195 + .execute = execute_kernexec_retaddr,
80196 + .sub = NULL,
80197 + .next = NULL,
80198 + .static_pass_number = 0,
80199 + .tv_id = TV_NONE,
80200 + .properties_required = 0,
80201 + .properties_provided = 0,
80202 + .properties_destroyed = 0,
80203 + .todo_flags_start = 0,
80204 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
80205 + }
80206 +};
80207 +
80208 +static bool kernexec_cmodel_check(void)
80209 +{
80210 + tree section;
80211 +
80212 + if (ix86_cmodel != CM_KERNEL)
80213 + return false;
80214 +
80215 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
80216 + if (!section || !TREE_VALUE(section))
80217 + return true;
80218 +
80219 + section = TREE_VALUE(TREE_VALUE(section));
80220 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
80221 + return true;
80222 +
80223 + return false;
80224 +}
80225 +
80226 +/*
80227 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
80228 + */
80229 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
80230 +{
80231 + gimple asm_movabs_stmt;
80232 +
80233 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
80234 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
80235 + gimple_asm_set_volatile(asm_movabs_stmt, true);
80236 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
80237 + update_stmt(asm_movabs_stmt);
80238 +}
80239 +
80240 +/*
80241 + * find all asm() stmts that clobber r10 and add a reload of r10
80242 + */
80243 +static unsigned int execute_kernexec_reload(void)
80244 +{
80245 + basic_block bb;
80246 +
80247 + // 1. loop through BBs and GIMPLE statements
80248 + FOR_EACH_BB(bb) {
80249 + gimple_stmt_iterator gsi;
80250 +
80251 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
80252 + // gimple match: __asm__ ("" : : : "r10");
80253 + gimple asm_stmt;
80254 + size_t nclobbers;
80255 +
80256 + // is it an asm ...
80257 + asm_stmt = gsi_stmt(gsi);
80258 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
80259 + continue;
80260 +
80261 + // ... clobbering r10
80262 + nclobbers = gimple_asm_nclobbers(asm_stmt);
80263 + while (nclobbers--) {
80264 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
80265 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
80266 + continue;
80267 + kernexec_reload_fptr_mask(&gsi);
80268 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
80269 + break;
80270 + }
80271 + }
80272 + }
80273 +
80274 + return 0;
80275 +}
80276 +
80277 +/*
80278 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
80279 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
80280 + */
80281 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
80282 +{
80283 + gimple assign_intptr, assign_new_fptr, call_stmt;
80284 + tree intptr, old_fptr, new_fptr, kernexec_mask;
80285 +
80286 + call_stmt = gsi_stmt(*gsi);
80287 + old_fptr = gimple_call_fn(call_stmt);
80288 +
80289 + // create temporary unsigned long variable used for bitops and cast fptr to it
80290 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
80291 + add_referenced_var(intptr);
80292 + mark_sym_for_renaming(intptr);
80293 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
80294 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
80295 + update_stmt(assign_intptr);
80296 +
80297 + // apply logical or to temporary unsigned long and bitmask
80298 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
80299 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
80300 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
80301 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
80302 + update_stmt(assign_intptr);
80303 +
80304 + // cast temporary unsigned long back to a temporary fptr variable
80305 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec");
80306 + add_referenced_var(new_fptr);
80307 + mark_sym_for_renaming(new_fptr);
80308 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
80309 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
80310 + update_stmt(assign_new_fptr);
80311 +
80312 + // replace call stmt fn with the new fptr
80313 + gimple_call_set_fn(call_stmt, new_fptr);
80314 + update_stmt(call_stmt);
80315 +}
80316 +
80317 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
80318 +{
80319 + gimple asm_or_stmt, call_stmt;
80320 + tree old_fptr, new_fptr, input, output;
80321 + VEC(tree, gc) *inputs = NULL;
80322 + VEC(tree, gc) *outputs = NULL;
80323 +
80324 + call_stmt = gsi_stmt(*gsi);
80325 + old_fptr = gimple_call_fn(call_stmt);
80326 +
80327 + // create temporary fptr variable
80328 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
80329 + add_referenced_var(new_fptr);
80330 + mark_sym_for_renaming(new_fptr);
80331 +
80332 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
80333 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
80334 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
80335 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
80336 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
80337 + VEC_safe_push(tree, gc, inputs, input);
80338 + VEC_safe_push(tree, gc, outputs, output);
80339 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
80340 + gimple_asm_set_volatile(asm_or_stmt, true);
80341 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
80342 + update_stmt(asm_or_stmt);
80343 +
80344 + // replace call stmt fn with the new fptr
80345 + gimple_call_set_fn(call_stmt, new_fptr);
80346 + update_stmt(call_stmt);
80347 +}
80348 +
80349 +/*
80350 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
80351 + */
80352 +static unsigned int execute_kernexec_fptr(void)
80353 +{
80354 + basic_block bb;
80355 +
80356 + // 1. loop through BBs and GIMPLE statements
80357 + FOR_EACH_BB(bb) {
80358 + gimple_stmt_iterator gsi;
80359 +
80360 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
80361 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
80362 + tree fn;
80363 + gimple call_stmt;
80364 +
80365 + // is it a call ...
80366 + call_stmt = gsi_stmt(gsi);
80367 + if (!is_gimple_call(call_stmt))
80368 + continue;
80369 + fn = gimple_call_fn(call_stmt);
80370 + if (TREE_CODE(fn) == ADDR_EXPR)
80371 + continue;
80372 + if (TREE_CODE(fn) != SSA_NAME)
80373 + gcc_unreachable();
80374 +
80375 + // ... through a function pointer
80376 + fn = SSA_NAME_VAR(fn);
80377 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
80378 + continue;
80379 + fn = TREE_TYPE(fn);
80380 + if (TREE_CODE(fn) != POINTER_TYPE)
80381 + continue;
80382 + fn = TREE_TYPE(fn);
80383 + if (TREE_CODE(fn) != FUNCTION_TYPE)
80384 + continue;
80385 +
80386 + kernexec_instrument_fptr(&gsi);
80387 +
80388 +//debug_tree(gimple_call_fn(call_stmt));
80389 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
80390 + }
80391 + }
80392 +
80393 + return 0;
80394 +}
80395 +
80396 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
80397 +static void kernexec_instrument_retaddr_bts(rtx insn)
80398 +{
80399 + rtx btsq;
80400 + rtvec argvec, constraintvec, labelvec;
80401 + int line;
80402 +
80403 + // create asm volatile("btsq $63,(%%rsp)":::)
80404 + argvec = rtvec_alloc(0);
80405 + constraintvec = rtvec_alloc(0);
80406 + labelvec = rtvec_alloc(0);
80407 + line = expand_location(RTL_LOCATION(insn)).line;
80408 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
80409 + MEM_VOLATILE_P(btsq) = 1;
80410 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
80411 + emit_insn_before(btsq, insn);
80412 +}
80413 +
80414 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
80415 +static void kernexec_instrument_retaddr_or(rtx insn)
80416 +{
80417 + rtx orq;
80418 + rtvec argvec, constraintvec, labelvec;
80419 + int line;
80420 +
80421 + // create asm volatile("orq %%r10,(%%rsp)":::)
80422 + argvec = rtvec_alloc(0);
80423 + constraintvec = rtvec_alloc(0);
80424 + labelvec = rtvec_alloc(0);
80425 + line = expand_location(RTL_LOCATION(insn)).line;
80426 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
80427 + MEM_VOLATILE_P(orq) = 1;
80428 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
80429 + emit_insn_before(orq, insn);
80430 +}
80431 +
80432 +/*
80433 + * find all asm level function returns and forcibly set the highest bit of the return address
80434 + */
80435 +static unsigned int execute_kernexec_retaddr(void)
80436 +{
80437 + rtx insn;
80438 +
80439 + // 1. find function returns
80440 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
80441 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
80442 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
80443 + rtx body;
80444 +
80445 + // is it a retn
80446 + if (!JUMP_P(insn))
80447 + continue;
80448 + body = PATTERN(insn);
80449 + if (GET_CODE(body) == PARALLEL)
80450 + body = XVECEXP(body, 0, 0);
80451 + if (GET_CODE(body) != RETURN)
80452 + continue;
80453 + kernexec_instrument_retaddr(insn);
80454 + }
80455 +
80456 +// print_simple_rtl(stderr, get_insns());
80457 +// print_rtl(stderr, get_insns());
80458 +
80459 + return 0;
80460 +}
80461 +
80462 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80463 +{
80464 + const char * const plugin_name = plugin_info->base_name;
80465 + const int argc = plugin_info->argc;
80466 + const struct plugin_argument * const argv = plugin_info->argv;
80467 + int i;
80468 + struct register_pass_info kernexec_reload_pass_info = {
80469 + .pass = &kernexec_reload_pass.pass,
80470 + .reference_pass_name = "ssa",
80471 + .ref_pass_instance_number = 0,
80472 + .pos_op = PASS_POS_INSERT_AFTER
80473 + };
80474 + struct register_pass_info kernexec_fptr_pass_info = {
80475 + .pass = &kernexec_fptr_pass.pass,
80476 + .reference_pass_name = "ssa",
80477 + .ref_pass_instance_number = 0,
80478 + .pos_op = PASS_POS_INSERT_AFTER
80479 + };
80480 + struct register_pass_info kernexec_retaddr_pass_info = {
80481 + .pass = &kernexec_retaddr_pass.pass,
80482 + .reference_pass_name = "pro_and_epilogue",
80483 + .ref_pass_instance_number = 0,
80484 + .pos_op = PASS_POS_INSERT_AFTER
80485 + };
80486 +
80487 + if (!plugin_default_version_check(version, &gcc_version)) {
80488 + error(G_("incompatible gcc/plugin versions"));
80489 + return 1;
80490 + }
80491 +
80492 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
80493 +
80494 + if (TARGET_64BIT == 0)
80495 + return 0;
80496 +
80497 + for (i = 0; i < argc; ++i) {
80498 + if (!strcmp(argv[i].key, "method")) {
80499 + if (!argv[i].value) {
80500 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80501 + continue;
80502 + }
80503 + if (!strcmp(argv[i].value, "bts")) {
80504 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
80505 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
80506 + } else if (!strcmp(argv[i].value, "or")) {
80507 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
80508 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
80509 + fix_register("r10", 1, 1);
80510 + } else
80511 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
80512 + continue;
80513 + }
80514 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80515 + }
80516 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
80517 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
80518 +
80519 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
80520 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
80521 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
80522 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
80523 +
80524 + return 0;
80525 +}
80526 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
80527 new file mode 100644
80528 index 0000000..ea79948
80529 --- /dev/null
80530 +++ b/tools/gcc/stackleak_plugin.c
80531 @@ -0,0 +1,326 @@
80532 +/*
80533 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
80534 + * Licensed under the GPL v2
80535 + *
80536 + * Note: the choice of the license means that the compilation process is
80537 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80538 + * but for the kernel it doesn't matter since it doesn't link against
80539 + * any of the gcc libraries
80540 + *
80541 + * gcc plugin to help implement various PaX features
80542 + *
80543 + * - track lowest stack pointer
80544 + *
80545 + * TODO:
80546 + * - initialize all local variables
80547 + *
80548 + * BUGS:
80549 + * - none known
80550 + */
80551 +#include "gcc-plugin.h"
80552 +#include "config.h"
80553 +#include "system.h"
80554 +#include "coretypes.h"
80555 +#include "tree.h"
80556 +#include "tree-pass.h"
80557 +#include "flags.h"
80558 +#include "intl.h"
80559 +#include "toplev.h"
80560 +#include "plugin.h"
80561 +//#include "expr.h" where are you...
80562 +#include "diagnostic.h"
80563 +#include "plugin-version.h"
80564 +#include "tm.h"
80565 +#include "function.h"
80566 +#include "basic-block.h"
80567 +#include "gimple.h"
80568 +#include "rtl.h"
80569 +#include "emit-rtl.h"
80570 +
80571 +extern void print_gimple_stmt(FILE *, gimple, int, int);
80572 +
80573 +int plugin_is_GPL_compatible;
80574 +
80575 +static int track_frame_size = -1;
80576 +static const char track_function[] = "pax_track_stack";
80577 +static const char check_function[] = "pax_check_alloca";
80578 +static tree pax_check_alloca_decl;
80579 +static tree pax_track_stack_decl;
80580 +static bool init_locals;
80581 +
80582 +static struct plugin_info stackleak_plugin_info = {
80583 + .version = "201203021600",
80584 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
80585 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
80586 +};
80587 +
80588 +static bool gate_stackleak_track_stack(void);
80589 +static unsigned int execute_stackleak_tree_instrument(void);
80590 +static unsigned int execute_stackleak_final(void);
80591 +
80592 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
80593 + .pass = {
80594 + .type = GIMPLE_PASS,
80595 + .name = "stackleak_tree_instrument",
80596 + .gate = gate_stackleak_track_stack,
80597 + .execute = execute_stackleak_tree_instrument,
80598 + .sub = NULL,
80599 + .next = NULL,
80600 + .static_pass_number = 0,
80601 + .tv_id = TV_NONE,
80602 + .properties_required = PROP_gimple_leh | PROP_cfg,
80603 + .properties_provided = 0,
80604 + .properties_destroyed = 0,
80605 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
80606 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
80607 + }
80608 +};
80609 +
80610 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
80611 + .pass = {
80612 + .type = RTL_PASS,
80613 + .name = "stackleak_final",
80614 + .gate = gate_stackleak_track_stack,
80615 + .execute = execute_stackleak_final,
80616 + .sub = NULL,
80617 + .next = NULL,
80618 + .static_pass_number = 0,
80619 + .tv_id = TV_NONE,
80620 + .properties_required = 0,
80621 + .properties_provided = 0,
80622 + .properties_destroyed = 0,
80623 + .todo_flags_start = 0,
80624 + .todo_flags_finish = TODO_dump_func
80625 + }
80626 +};
80627 +
80628 +static bool gate_stackleak_track_stack(void)
80629 +{
80630 + return track_frame_size >= 0;
80631 +}
80632 +
80633 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
80634 +{
80635 + gimple check_alloca;
80636 + tree alloca_size;
80637 +
80638 + // insert call to void pax_check_alloca(unsigned long size)
80639 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
80640 + check_alloca = gimple_build_call(pax_check_alloca_decl, 1, alloca_size);
80641 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
80642 +}
80643 +
80644 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
80645 +{
80646 + gimple track_stack;
80647 +
80648 + // insert call to void pax_track_stack(void)
80649 + track_stack = gimple_build_call(pax_track_stack_decl, 0);
80650 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
80651 +}
80652 +
80653 +#if BUILDING_GCC_VERSION == 4005
80654 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
80655 +{
80656 + tree fndecl;
80657 +
80658 + if (!is_gimple_call(stmt))
80659 + return false;
80660 + fndecl = gimple_call_fndecl(stmt);
80661 + if (!fndecl)
80662 + return false;
80663 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
80664 + return false;
80665 +// print_node(stderr, "pax", fndecl, 4);
80666 + return DECL_FUNCTION_CODE(fndecl) == code;
80667 +}
80668 +#endif
80669 +
80670 +static bool is_alloca(gimple stmt)
80671 +{
80672 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
80673 + return true;
80674 +
80675 +#if BUILDING_GCC_VERSION >= 4007
80676 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
80677 + return true;
80678 +#endif
80679 +
80680 + return false;
80681 +}
80682 +
80683 +static unsigned int execute_stackleak_tree_instrument(void)
80684 +{
80685 + basic_block bb, entry_bb;
80686 + bool prologue_instrumented = false, is_leaf = true;
80687 +
80688 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
80689 +
80690 + // 1. loop through BBs and GIMPLE statements
80691 + FOR_EACH_BB(bb) {
80692 + gimple_stmt_iterator gsi;
80693 +
80694 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
80695 + gimple stmt;
80696 +
80697 + stmt = gsi_stmt(gsi);
80698 +
80699 + if (is_gimple_call(stmt))
80700 + is_leaf = false;
80701 +
80702 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
80703 + if (!is_alloca(stmt))
80704 + continue;
80705 +
80706 + // 2. insert stack overflow check before each __builtin_alloca call
80707 + stackleak_check_alloca(&gsi);
80708 +
80709 + // 3. insert track call after each __builtin_alloca call
80710 + stackleak_add_instrumentation(&gsi);
80711 + if (bb == entry_bb)
80712 + prologue_instrumented = true;
80713 + }
80714 + }
80715 +
80716 + // special case for some bad linux code: taking the address of static inline functions will materialize them
80717 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
80718 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
80719 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
80720 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
80721 + return 0;
80722 +
80723 + // 4. insert track call at the beginning
80724 + if (!prologue_instrumented) {
80725 + gimple_stmt_iterator gsi;
80726 +
80727 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
80728 + if (dom_info_available_p(CDI_DOMINATORS))
80729 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
80730 + gsi = gsi_start_bb(bb);
80731 + stackleak_add_instrumentation(&gsi);
80732 + }
80733 +
80734 + return 0;
80735 +}
80736 +
80737 +static unsigned int execute_stackleak_final(void)
80738 +{
80739 + rtx insn;
80740 +
80741 + if (cfun->calls_alloca)
80742 + return 0;
80743 +
80744 + // keep calls only if function frame is big enough
80745 + if (get_frame_size() >= track_frame_size)
80746 + return 0;
80747 +
80748 + // 1. find pax_track_stack calls
80749 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
80750 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
80751 + rtx body;
80752 +
80753 + if (!CALL_P(insn))
80754 + continue;
80755 + body = PATTERN(insn);
80756 + if (GET_CODE(body) != CALL)
80757 + continue;
80758 + body = XEXP(body, 0);
80759 + if (GET_CODE(body) != MEM)
80760 + continue;
80761 + body = XEXP(body, 0);
80762 + if (GET_CODE(body) != SYMBOL_REF)
80763 + continue;
80764 + if (strcmp(XSTR(body, 0), track_function))
80765 + continue;
80766 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
80767 + // 2. delete call
80768 + insn = delete_insn_and_edges(insn);
80769 +#if BUILDING_GCC_VERSION >= 4007
80770 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
80771 + insn = delete_insn_and_edges(insn);
80772 +#endif
80773 + }
80774 +
80775 +// print_simple_rtl(stderr, get_insns());
80776 +// print_rtl(stderr, get_insns());
80777 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
80778 +
80779 + return 0;
80780 +}
80781 +
80782 +static void stackleak_start_unit(void *gcc_data, void *user_data)
80783 +{
80784 + tree fntype;
80785 +
80786 + // declare void pax_check_alloca(unsigned long size)
80787 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
80788 + pax_check_alloca_decl = build_fn_decl(check_function, fntype);
80789 + DECL_ASSEMBLER_NAME(pax_check_alloca_decl); // for LTO
80790 + TREE_PUBLIC(pax_check_alloca_decl) = 1;
80791 + DECL_EXTERNAL(pax_check_alloca_decl) = 1;
80792 + DECL_ARTIFICIAL(pax_check_alloca_decl) = 1;
80793 +
80794 + // declare void pax_track_stack(void)
80795 + fntype = build_function_type_list(void_type_node, NULL_TREE);
80796 + pax_track_stack_decl = build_fn_decl(track_function, fntype);
80797 + DECL_ASSEMBLER_NAME(pax_track_stack_decl); // for LTO
80798 + TREE_PUBLIC(pax_track_stack_decl) = 1;
80799 + DECL_EXTERNAL(pax_track_stack_decl) = 1;
80800 + DECL_ARTIFICIAL(pax_track_stack_decl) = 1;
80801 +}
80802 +
80803 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80804 +{
80805 + const char * const plugin_name = plugin_info->base_name;
80806 + const int argc = plugin_info->argc;
80807 + const struct plugin_argument * const argv = plugin_info->argv;
80808 + int i;
80809 + struct register_pass_info stackleak_tree_instrument_pass_info = {
80810 + .pass = &stackleak_tree_instrument_pass.pass,
80811 +// .reference_pass_name = "tree_profile",
80812 + .reference_pass_name = "optimized",
80813 + .ref_pass_instance_number = 0,
80814 + .pos_op = PASS_POS_INSERT_BEFORE
80815 + };
80816 + struct register_pass_info stackleak_final_pass_info = {
80817 + .pass = &stackleak_final_rtl_opt_pass.pass,
80818 + .reference_pass_name = "final",
80819 + .ref_pass_instance_number = 0,
80820 + .pos_op = PASS_POS_INSERT_BEFORE
80821 + };
80822 +
80823 + if (!plugin_default_version_check(version, &gcc_version)) {
80824 + error(G_("incompatible gcc/plugin versions"));
80825 + return 1;
80826 + }
80827 +
80828 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
80829 +
80830 + for (i = 0; i < argc; ++i) {
80831 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
80832 + if (!argv[i].value) {
80833 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80834 + continue;
80835 + }
80836 + track_frame_size = atoi(argv[i].value);
80837 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
80838 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
80839 + continue;
80840 + }
80841 + if (!strcmp(argv[i].key, "initialize-locals")) {
80842 + if (argv[i].value) {
80843 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
80844 + continue;
80845 + }
80846 + init_locals = true;
80847 + continue;
80848 + }
80849 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80850 + }
80851 +
80852 + register_callback(plugin_name, PLUGIN_START_UNIT, &stackleak_start_unit, NULL);
80853 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
80854 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
80855 +
80856 + return 0;
80857 +}
80858 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
80859 index 6789d78..4afd019 100644
80860 --- a/tools/perf/util/include/asm/alternative-asm.h
80861 +++ b/tools/perf/util/include/asm/alternative-asm.h
80862 @@ -5,4 +5,7 @@
80863
80864 #define altinstruction_entry #
80865
80866 + .macro pax_force_retaddr rip=0, reload=0
80867 + .endm
80868 +
80869 #endif
80870 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
80871 index af0f22f..9a7d479 100644
80872 --- a/usr/gen_init_cpio.c
80873 +++ b/usr/gen_init_cpio.c
80874 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
80875 int retval;
80876 int rc = -1;
80877 int namesize;
80878 - int i;
80879 + unsigned int i;
80880
80881 mode |= S_IFREG;
80882
80883 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location)
80884 *env_var = *expanded = '\0';
80885 strncat(env_var, start + 2, end - start - 2);
80886 strncat(expanded, new_location, start - new_location);
80887 - strncat(expanded, getenv(env_var), PATH_MAX);
80888 - strncat(expanded, end + 1, PATH_MAX);
80889 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
80890 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
80891 strncpy(new_location, expanded, PATH_MAX);
80892 + new_location[PATH_MAX] = 0;
80893 } else
80894 break;
80895 }
80896 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
80897 index d9cfb78..4f27c10 100644
80898 --- a/virt/kvm/kvm_main.c
80899 +++ b/virt/kvm/kvm_main.c
80900 @@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
80901
80902 static cpumask_var_t cpus_hardware_enabled;
80903 static int kvm_usage_count = 0;
80904 -static atomic_t hardware_enable_failed;
80905 +static atomic_unchecked_t hardware_enable_failed;
80906
80907 struct kmem_cache *kvm_vcpu_cache;
80908 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
80909 @@ -2268,7 +2268,7 @@ static void hardware_enable_nolock(void *junk)
80910
80911 if (r) {
80912 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
80913 - atomic_inc(&hardware_enable_failed);
80914 + atomic_inc_unchecked(&hardware_enable_failed);
80915 printk(KERN_INFO "kvm: enabling virtualization on "
80916 "CPU%d failed\n", cpu);
80917 }
80918 @@ -2322,10 +2322,10 @@ static int hardware_enable_all(void)
80919
80920 kvm_usage_count++;
80921 if (kvm_usage_count == 1) {
80922 - atomic_set(&hardware_enable_failed, 0);
80923 + atomic_set_unchecked(&hardware_enable_failed, 0);
80924 on_each_cpu(hardware_enable_nolock, NULL, 1);
80925
80926 - if (atomic_read(&hardware_enable_failed)) {
80927 + if (atomic_read_unchecked(&hardware_enable_failed)) {
80928 hardware_disable_all_nolock();
80929 r = -EBUSY;
80930 }
80931 @@ -2676,7 +2676,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
80932 kvm_arch_vcpu_put(vcpu);
80933 }
80934
80935 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
80936 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
80937 struct module *module)
80938 {
80939 int r;
80940 @@ -2739,7 +2739,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
80941 if (!vcpu_align)
80942 vcpu_align = __alignof__(struct kvm_vcpu);
80943 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
80944 - 0, NULL);
80945 + SLAB_USERCOPY, NULL);
80946 if (!kvm_vcpu_cache) {
80947 r = -ENOMEM;
80948 goto out_free_3;
80949 @@ -2749,9 +2749,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
80950 if (r)
80951 goto out_free;
80952
80953 - kvm_chardev_ops.owner = module;
80954 - kvm_vm_fops.owner = module;
80955 - kvm_vcpu_fops.owner = module;
80956 + pax_open_kernel();
80957 + *(void **)&kvm_chardev_ops.owner = module;
80958 + *(void **)&kvm_vm_fops.owner = module;
80959 + *(void **)&kvm_vcpu_fops.owner = module;
80960 + pax_close_kernel();
80961
80962 r = misc_register(&kvm_dev);
80963 if (r) {