]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9-3.2.11-201203141956.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-3.2.11-201203141956.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index dfa6fc6..6af9546 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -5,6 +5,7 @@
6 *.cis
7 *.cpio
8 *.csp
9 +*.dbg
10 *.dsp
11 *.dvi
12 *.elf
13 @@ -14,6 +15,7 @@
14 *.gcov
15 *.gen.S
16 *.gif
17 +*.gmo
18 *.grep
19 *.grp
20 *.gz
21 @@ -48,9 +50,11 @@
22 *.tab.h
23 *.tex
24 *.ver
25 +*.vim
26 *.xml
27 *.xz
28 *_MODULES
29 +*_reg_safe.h
30 *_vga16.c
31 *~
32 \#*#
33 @@ -70,6 +74,7 @@ Kerntypes
34 Module.markers
35 Module.symvers
36 PENDING
37 +PERF*
38 SCCS
39 System.map*
40 TAGS
41 @@ -93,19 +98,24 @@ bounds.h
42 bsetup
43 btfixupprep
44 build
45 +builtin-policy.h
46 bvmlinux
47 bzImage*
48 capability_names.h
49 capflags.c
50 classlist.h*
51 +clut_vga16.c
52 +common-cmds.h
53 comp*.log
54 compile.h*
55 conf
56 config
57 config-*
58 config_data.h*
59 +config.c
60 config.mak
61 config.mak.autogen
62 +config.tmp
63 conmakehash
64 consolemap_deftbl.c*
65 cpustr.h
66 @@ -116,9 +126,11 @@ devlist.h*
67 dnotify_test
68 docproc
69 dslm
70 +dtc-lexer.lex.c
71 elf2ecoff
72 elfconfig.h*
73 evergreen_reg_safe.h
74 +exception_policy.conf
75 fixdep
76 flask.h
77 fore200e_mkfirm
78 @@ -126,12 +138,15 @@ fore200e_pca_fw.c*
79 gconf
80 gconf.glade.h
81 gen-devlist
82 +gen-kdb_cmds.c
83 gen_crc32table
84 gen_init_cpio
85 generated
86 genheaders
87 genksyms
88 *_gray256.c
89 +hash
90 +hid-example
91 hpet_example
92 hugepage-mmap
93 hugepage-shm
94 @@ -146,7 +161,7 @@ int32.c
95 int4.c
96 int8.c
97 kallsyms
98 -kconfig
99 +kern_constants.h
100 keywords.c
101 ksym.c*
102 ksym.h*
103 @@ -154,7 +169,7 @@ kxgettext
104 lkc_defs.h
105 lex.c
106 lex.*.c
107 -linux
108 +lib1funcs.S
109 logo_*.c
110 logo_*_clut224.c
111 logo_*_mono.c
112 @@ -166,14 +181,15 @@ machtypes.h
113 map
114 map_hugetlb
115 maui_boot.h
116 -media
117 mconf
118 +mdp
119 miboot*
120 mk_elfconfig
121 mkboot
122 mkbugboot
123 mkcpustr
124 mkdep
125 +mkpiggy
126 mkprep
127 mkregtable
128 mktables
129 @@ -209,6 +225,7 @@ r300_reg_safe.h
130 r420_reg_safe.h
131 r600_reg_safe.h
132 recordmcount
133 +regdb.c
134 relocs
135 rlim_names.h
136 rn50_reg_safe.h
137 @@ -219,6 +236,7 @@ setup
138 setup.bin
139 setup.elf
140 sImage
141 +slabinfo
142 sm_tbl*
143 split-include
144 syscalltab.h
145 @@ -229,6 +247,7 @@ tftpboot.img
146 timeconst.h
147 times.h*
148 trix_boot.h
149 +user_constants.h
150 utsrelease.h*
151 vdso-syms.lds
152 vdso.lds
153 @@ -246,7 +265,9 @@ vmlinux
154 vmlinux-*
155 vmlinux.aout
156 vmlinux.bin.all
157 +vmlinux.bin.bz2
158 vmlinux.lds
159 +vmlinux.relocs
160 vmlinuz
161 voffset.h
162 vsyscall.lds
163 @@ -254,9 +275,11 @@ vsyscall_32.lds
164 wanxlfw.inc
165 uImage
166 unifdef
167 +utsrelease.h
168 wakeup.bin
169 wakeup.elf
170 wakeup.lds
171 zImage*
172 zconf.hash.c
173 +zconf.lex.c
174 zoffset.h
175 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
176 index 81c287f..d456d02 100644
177 --- a/Documentation/kernel-parameters.txt
178 +++ b/Documentation/kernel-parameters.txt
179 @@ -1935,6 +1935,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
180 the specified number of seconds. This is to be used if
181 your oopses keep scrolling off the screen.
182
183 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
184 + virtualization environments that don't cope well with the
185 + expand down segment used by UDEREF on X86-32 or the frequent
186 + page table updates on X86-64.
187 +
188 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
189 +
190 pcbit= [HW,ISDN]
191
192 pcd. [PARIDE]
193 diff --git a/Makefile b/Makefile
194 index 4b76371..53aa79c 100644
195 --- a/Makefile
196 +++ b/Makefile
197 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
198
199 HOSTCC = gcc
200 HOSTCXX = g++
201 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
202 -HOSTCXXFLAGS = -O2
203 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
204 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
205 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
206
207 # Decide whether to build built-in, modular, or both.
208 # Normally, just do built-in.
209 @@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
210 # Rules shared between *config targets and build targets
211
212 # Basic helpers built in scripts/
213 -PHONY += scripts_basic
214 -scripts_basic:
215 +PHONY += scripts_basic gcc-plugins
216 +scripts_basic: gcc-plugins
217 $(Q)$(MAKE) $(build)=scripts/basic
218 $(Q)rm -f .tmp_quiet_recordmcount
219
220 @@ -564,6 +565,50 @@ else
221 KBUILD_CFLAGS += -O2
222 endif
223
224 +ifndef DISABLE_PAX_PLUGINS
225 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
226 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
227 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
228 +endif
229 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
230 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
231 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
232 +endif
233 +ifdef CONFIG_KALLOCSTAT_PLUGIN
234 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
235 +endif
236 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
237 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
238 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
239 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
240 +endif
241 +ifdef CONFIG_CHECKER_PLUGIN
242 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
243 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
244 +endif
245 +endif
246 +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
247 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
248 +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
249 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
250 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
251 +ifeq ($(KBUILD_EXTMOD),)
252 +gcc-plugins:
253 + $(Q)$(MAKE) $(build)=tools/gcc
254 +else
255 +gcc-plugins: ;
256 +endif
257 +else
258 +gcc-plugins:
259 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
260 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
261 +else
262 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
263 +endif
264 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
265 +endif
266 +endif
267 +
268 include $(srctree)/arch/$(SRCARCH)/Makefile
269
270 ifneq ($(CONFIG_FRAME_WARN),0)
271 @@ -708,7 +753,7 @@ export mod_strip_cmd
272
273
274 ifeq ($(KBUILD_EXTMOD),)
275 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
276 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
277
278 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
279 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
280 @@ -932,6 +977,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
281
282 # The actual objects are generated when descending,
283 # make sure no implicit rule kicks in
284 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
285 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
286 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
287
288 # Handle descending into subdirectories listed in $(vmlinux-dirs)
289 @@ -941,7 +988,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
290 # Error messages still appears in the original language
291
292 PHONY += $(vmlinux-dirs)
293 -$(vmlinux-dirs): prepare scripts
294 +$(vmlinux-dirs): gcc-plugins prepare scripts
295 $(Q)$(MAKE) $(build)=$@
296
297 # Store (new) KERNELRELASE string in include/config/kernel.release
298 @@ -985,6 +1032,7 @@ prepare0: archprepare FORCE
299 $(Q)$(MAKE) $(build)=.
300
301 # All the preparing..
302 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
303 prepare: prepare0
304
305 # Generate some files
306 @@ -1086,6 +1134,8 @@ all: modules
307 # using awk while concatenating to the final file.
308
309 PHONY += modules
310 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
311 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
312 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
313 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
314 @$(kecho) ' Building modules, stage 2.';
315 @@ -1101,7 +1151,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
316
317 # Target to prepare building external modules
318 PHONY += modules_prepare
319 -modules_prepare: prepare scripts
320 +modules_prepare: gcc-plugins prepare scripts
321
322 # Target to install modules
323 PHONY += modules_install
324 @@ -1198,6 +1248,7 @@ distclean: mrproper
325 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
326 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
327 -o -name '.*.rej' \
328 + -o -name '.*.rej' -o -name '*.so' \
329 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
330 -type f -print | xargs rm -f
331
332 @@ -1358,6 +1409,8 @@ PHONY += $(module-dirs) modules
333 $(module-dirs): crmodverdir $(objtree)/Module.symvers
334 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
335
336 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
337 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
338 modules: $(module-dirs)
339 @$(kecho) ' Building modules, stage 2.';
340 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
341 @@ -1484,17 +1537,21 @@ else
342 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
343 endif
344
345 -%.s: %.c prepare scripts FORCE
346 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
347 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
348 +%.s: %.c gcc-plugins prepare scripts FORCE
349 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
350 %.i: %.c prepare scripts FORCE
351 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
352 -%.o: %.c prepare scripts FORCE
353 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
354 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
355 +%.o: %.c gcc-plugins prepare scripts FORCE
356 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
357 %.lst: %.c prepare scripts FORCE
358 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
359 -%.s: %.S prepare scripts FORCE
360 +%.s: %.S gcc-plugins prepare scripts FORCE
361 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
362 -%.o: %.S prepare scripts FORCE
363 +%.o: %.S gcc-plugins prepare scripts FORCE
364 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
365 %.symtypes: %.c prepare scripts FORCE
366 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
367 @@ -1504,11 +1561,15 @@ endif
368 $(cmd_crmodverdir)
369 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
370 $(build)=$(build-dir)
371 -%/: prepare scripts FORCE
372 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
373 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
374 +%/: gcc-plugins prepare scripts FORCE
375 $(cmd_crmodverdir)
376 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
377 $(build)=$(build-dir)
378 -%.ko: prepare scripts FORCE
379 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
380 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
381 +%.ko: gcc-plugins prepare scripts FORCE
382 $(cmd_crmodverdir)
383 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
384 $(build)=$(build-dir) $(@:.ko=.o)
385 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
386 index 640f909..48b6597 100644
387 --- a/arch/alpha/include/asm/atomic.h
388 +++ b/arch/alpha/include/asm/atomic.h
389 @@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
390 #define atomic_dec(v) atomic_sub(1,(v))
391 #define atomic64_dec(v) atomic64_sub(1,(v))
392
393 +#define atomic64_read_unchecked(v) atomic64_read(v)
394 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
395 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
396 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
397 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
398 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
399 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
400 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
401 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
402 +
403 #define smp_mb__before_atomic_dec() smp_mb()
404 #define smp_mb__after_atomic_dec() smp_mb()
405 #define smp_mb__before_atomic_inc() smp_mb()
406 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
407 index ad368a9..fbe0f25 100644
408 --- a/arch/alpha/include/asm/cache.h
409 +++ b/arch/alpha/include/asm/cache.h
410 @@ -4,19 +4,19 @@
411 #ifndef __ARCH_ALPHA_CACHE_H
412 #define __ARCH_ALPHA_CACHE_H
413
414 +#include <linux/const.h>
415
416 /* Bytes per L1 (data) cache line. */
417 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
418 -# define L1_CACHE_BYTES 64
419 # define L1_CACHE_SHIFT 6
420 #else
421 /* Both EV4 and EV5 are write-through, read-allocate,
422 direct-mapped, physical.
423 */
424 -# define L1_CACHE_BYTES 32
425 # define L1_CACHE_SHIFT 5
426 #endif
427
428 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
429 #define SMP_CACHE_BYTES L1_CACHE_BYTES
430
431 #endif
432 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
433 index da5449e..7418343 100644
434 --- a/arch/alpha/include/asm/elf.h
435 +++ b/arch/alpha/include/asm/elf.h
436 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
437
438 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
439
440 +#ifdef CONFIG_PAX_ASLR
441 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
442 +
443 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
444 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
445 +#endif
446 +
447 /* $0 is set by ld.so to a pointer to a function which might be
448 registered using atexit. This provides a mean for the dynamic
449 linker to call DT_FINI functions for shared libraries that have
450 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
451 index de98a73..bd4f1f8 100644
452 --- a/arch/alpha/include/asm/pgtable.h
453 +++ b/arch/alpha/include/asm/pgtable.h
454 @@ -101,6 +101,17 @@ struct vm_area_struct;
455 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
456 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
457 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
458 +
459 +#ifdef CONFIG_PAX_PAGEEXEC
460 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
461 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
462 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
463 +#else
464 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
465 +# define PAGE_COPY_NOEXEC PAGE_COPY
466 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
467 +#endif
468 +
469 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
470
471 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
472 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
473 index 2fd00b7..cfd5069 100644
474 --- a/arch/alpha/kernel/module.c
475 +++ b/arch/alpha/kernel/module.c
476 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
477
478 /* The small sections were sorted to the end of the segment.
479 The following should definitely cover them. */
480 - gp = (u64)me->module_core + me->core_size - 0x8000;
481 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
482 got = sechdrs[me->arch.gotsecindex].sh_addr;
483
484 for (i = 0; i < n; i++) {
485 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
486 index 01e8715..be0e80f 100644
487 --- a/arch/alpha/kernel/osf_sys.c
488 +++ b/arch/alpha/kernel/osf_sys.c
489 @@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
490 /* At this point: (!vma || addr < vma->vm_end). */
491 if (limit - len < addr)
492 return -ENOMEM;
493 - if (!vma || addr + len <= vma->vm_start)
494 + if (check_heap_stack_gap(vma, addr, len))
495 return addr;
496 addr = vma->vm_end;
497 vma = vma->vm_next;
498 @@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
499 merely specific addresses, but regions of memory -- perhaps
500 this feature should be incorporated into all ports? */
501
502 +#ifdef CONFIG_PAX_RANDMMAP
503 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
504 +#endif
505 +
506 if (addr) {
507 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
508 if (addr != (unsigned long) -ENOMEM)
509 @@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
510 }
511
512 /* Next, try allocating at TASK_UNMAPPED_BASE. */
513 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
514 - len, limit);
515 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
516 +
517 if (addr != (unsigned long) -ENOMEM)
518 return addr;
519
520 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
521 index fadd5f8..904e73a 100644
522 --- a/arch/alpha/mm/fault.c
523 +++ b/arch/alpha/mm/fault.c
524 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
525 __reload_thread(pcb);
526 }
527
528 +#ifdef CONFIG_PAX_PAGEEXEC
529 +/*
530 + * PaX: decide what to do with offenders (regs->pc = fault address)
531 + *
532 + * returns 1 when task should be killed
533 + * 2 when patched PLT trampoline was detected
534 + * 3 when unpatched PLT trampoline was detected
535 + */
536 +static int pax_handle_fetch_fault(struct pt_regs *regs)
537 +{
538 +
539 +#ifdef CONFIG_PAX_EMUPLT
540 + int err;
541 +
542 + do { /* PaX: patched PLT emulation #1 */
543 + unsigned int ldah, ldq, jmp;
544 +
545 + err = get_user(ldah, (unsigned int *)regs->pc);
546 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
547 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
548 +
549 + if (err)
550 + break;
551 +
552 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
553 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
554 + jmp == 0x6BFB0000U)
555 + {
556 + unsigned long r27, addr;
557 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
558 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
559 +
560 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
561 + err = get_user(r27, (unsigned long *)addr);
562 + if (err)
563 + break;
564 +
565 + regs->r27 = r27;
566 + regs->pc = r27;
567 + return 2;
568 + }
569 + } while (0);
570 +
571 + do { /* PaX: patched PLT emulation #2 */
572 + unsigned int ldah, lda, br;
573 +
574 + err = get_user(ldah, (unsigned int *)regs->pc);
575 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
576 + err |= get_user(br, (unsigned int *)(regs->pc+8));
577 +
578 + if (err)
579 + break;
580 +
581 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
582 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
583 + (br & 0xFFE00000U) == 0xC3E00000U)
584 + {
585 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
586 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
587 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
588 +
589 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
590 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
591 + return 2;
592 + }
593 + } while (0);
594 +
595 + do { /* PaX: unpatched PLT emulation */
596 + unsigned int br;
597 +
598 + err = get_user(br, (unsigned int *)regs->pc);
599 +
600 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
601 + unsigned int br2, ldq, nop, jmp;
602 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
603 +
604 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
605 + err = get_user(br2, (unsigned int *)addr);
606 + err |= get_user(ldq, (unsigned int *)(addr+4));
607 + err |= get_user(nop, (unsigned int *)(addr+8));
608 + err |= get_user(jmp, (unsigned int *)(addr+12));
609 + err |= get_user(resolver, (unsigned long *)(addr+16));
610 +
611 + if (err)
612 + break;
613 +
614 + if (br2 == 0xC3600000U &&
615 + ldq == 0xA77B000CU &&
616 + nop == 0x47FF041FU &&
617 + jmp == 0x6B7B0000U)
618 + {
619 + regs->r28 = regs->pc+4;
620 + regs->r27 = addr+16;
621 + regs->pc = resolver;
622 + return 3;
623 + }
624 + }
625 + } while (0);
626 +#endif
627 +
628 + return 1;
629 +}
630 +
631 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
632 +{
633 + unsigned long i;
634 +
635 + printk(KERN_ERR "PAX: bytes at PC: ");
636 + for (i = 0; i < 5; i++) {
637 + unsigned int c;
638 + if (get_user(c, (unsigned int *)pc+i))
639 + printk(KERN_CONT "???????? ");
640 + else
641 + printk(KERN_CONT "%08x ", c);
642 + }
643 + printk("\n");
644 +}
645 +#endif
646
647 /*
648 * This routine handles page faults. It determines the address,
649 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
650 good_area:
651 si_code = SEGV_ACCERR;
652 if (cause < 0) {
653 - if (!(vma->vm_flags & VM_EXEC))
654 + if (!(vma->vm_flags & VM_EXEC)) {
655 +
656 +#ifdef CONFIG_PAX_PAGEEXEC
657 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
658 + goto bad_area;
659 +
660 + up_read(&mm->mmap_sem);
661 + switch (pax_handle_fetch_fault(regs)) {
662 +
663 +#ifdef CONFIG_PAX_EMUPLT
664 + case 2:
665 + case 3:
666 + return;
667 +#endif
668 +
669 + }
670 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
671 + do_group_exit(SIGKILL);
672 +#else
673 goto bad_area;
674 +#endif
675 +
676 + }
677 } else if (!cause) {
678 /* Allow reads even for write-only mappings */
679 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
680 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
681 index 86976d0..683de93 100644
682 --- a/arch/arm/include/asm/atomic.h
683 +++ b/arch/arm/include/asm/atomic.h
684 @@ -15,6 +15,10 @@
685 #include <linux/types.h>
686 #include <asm/system.h>
687
688 +#ifdef CONFIG_GENERIC_ATOMIC64
689 +#include <asm-generic/atomic64.h>
690 +#endif
691 +
692 #define ATOMIC_INIT(i) { (i) }
693
694 #ifdef __KERNEL__
695 @@ -25,7 +29,15 @@
696 * atomic_set() is the clrex or dummy strex done on every exception return.
697 */
698 #define atomic_read(v) (*(volatile int *)&(v)->counter)
699 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
700 +{
701 + return v->counter;
702 +}
703 #define atomic_set(v,i) (((v)->counter) = (i))
704 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
705 +{
706 + v->counter = i;
707 +}
708
709 #if __LINUX_ARM_ARCH__ >= 6
710
711 @@ -40,6 +52,35 @@ static inline void atomic_add(int i, atomic_t *v)
712 int result;
713
714 __asm__ __volatile__("@ atomic_add\n"
715 +"1: ldrex %1, [%3]\n"
716 +" adds %0, %1, %4\n"
717 +
718 +#ifdef CONFIG_PAX_REFCOUNT
719 +" bvc 3f\n"
720 +"2: bkpt 0xf103\n"
721 +"3:\n"
722 +#endif
723 +
724 +" strex %1, %0, [%3]\n"
725 +" teq %1, #0\n"
726 +" bne 1b"
727 +
728 +#ifdef CONFIG_PAX_REFCOUNT
729 +"\n4:\n"
730 + _ASM_EXTABLE(2b, 4b)
731 +#endif
732 +
733 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
734 + : "r" (&v->counter), "Ir" (i)
735 + : "cc");
736 +}
737 +
738 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
739 +{
740 + unsigned long tmp;
741 + int result;
742 +
743 + __asm__ __volatile__("@ atomic_add_unchecked\n"
744 "1: ldrex %0, [%3]\n"
745 " add %0, %0, %4\n"
746 " strex %1, %0, [%3]\n"
747 @@ -58,6 +99,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
748 smp_mb();
749
750 __asm__ __volatile__("@ atomic_add_return\n"
751 +"1: ldrex %1, [%3]\n"
752 +" adds %0, %1, %4\n"
753 +
754 +#ifdef CONFIG_PAX_REFCOUNT
755 +" bvc 3f\n"
756 +" mov %0, %1\n"
757 +"2: bkpt 0xf103\n"
758 +"3:\n"
759 +#endif
760 +
761 +" strex %1, %0, [%3]\n"
762 +" teq %1, #0\n"
763 +" bne 1b"
764 +
765 +#ifdef CONFIG_PAX_REFCOUNT
766 +"\n4:\n"
767 + _ASM_EXTABLE(2b, 4b)
768 +#endif
769 +
770 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
771 + : "r" (&v->counter), "Ir" (i)
772 + : "cc");
773 +
774 + smp_mb();
775 +
776 + return result;
777 +}
778 +
779 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
780 +{
781 + unsigned long tmp;
782 + int result;
783 +
784 + smp_mb();
785 +
786 + __asm__ __volatile__("@ atomic_add_return_unchecked\n"
787 "1: ldrex %0, [%3]\n"
788 " add %0, %0, %4\n"
789 " strex %1, %0, [%3]\n"
790 @@ -78,6 +155,35 @@ static inline void atomic_sub(int i, atomic_t *v)
791 int result;
792
793 __asm__ __volatile__("@ atomic_sub\n"
794 +"1: ldrex %1, [%3]\n"
795 +" subs %0, %1, %4\n"
796 +
797 +#ifdef CONFIG_PAX_REFCOUNT
798 +" bvc 3f\n"
799 +"2: bkpt 0xf103\n"
800 +"3:\n"
801 +#endif
802 +
803 +" strex %1, %0, [%3]\n"
804 +" teq %1, #0\n"
805 +" bne 1b"
806 +
807 +#ifdef CONFIG_PAX_REFCOUNT
808 +"\n4:\n"
809 + _ASM_EXTABLE(2b, 4b)
810 +#endif
811 +
812 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
813 + : "r" (&v->counter), "Ir" (i)
814 + : "cc");
815 +}
816 +
817 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
818 +{
819 + unsigned long tmp;
820 + int result;
821 +
822 + __asm__ __volatile__("@ atomic_sub_unchecked\n"
823 "1: ldrex %0, [%3]\n"
824 " sub %0, %0, %4\n"
825 " strex %1, %0, [%3]\n"
826 @@ -96,11 +202,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
827 smp_mb();
828
829 __asm__ __volatile__("@ atomic_sub_return\n"
830 -"1: ldrex %0, [%3]\n"
831 -" sub %0, %0, %4\n"
832 +"1: ldrex %1, [%3]\n"
833 +" sub %0, %1, %4\n"
834 +
835 +#ifdef CONFIG_PAX_REFCOUNT
836 +" bvc 3f\n"
837 +" mov %0, %1\n"
838 +"2: bkpt 0xf103\n"
839 +"3:\n"
840 +#endif
841 +
842 " strex %1, %0, [%3]\n"
843 " teq %1, #0\n"
844 " bne 1b"
845 +
846 +#ifdef CONFIG_PAX_REFCOUNT
847 +"\n4:\n"
848 + _ASM_EXTABLE(2b, 4b)
849 +#endif
850 +
851 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
852 : "r" (&v->counter), "Ir" (i)
853 : "cc");
854 @@ -132,6 +252,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
855 return oldval;
856 }
857
858 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
859 +{
860 + unsigned long oldval, res;
861 +
862 + smp_mb();
863 +
864 + do {
865 + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
866 + "ldrex %1, [%3]\n"
867 + "mov %0, #0\n"
868 + "teq %1, %4\n"
869 + "strexeq %0, %5, [%3]\n"
870 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
871 + : "r" (&ptr->counter), "Ir" (old), "r" (new)
872 + : "cc");
873 + } while (res);
874 +
875 + smp_mb();
876 +
877 + return oldval;
878 +}
879 +
880 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
881 {
882 unsigned long tmp, tmp2;
883 @@ -207,6 +349,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
884 #endif /* __LINUX_ARM_ARCH__ */
885
886 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
887 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
888 +{
889 + return xchg(&v->counter, new);
890 +}
891
892 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
893 {
894 @@ -219,11 +365,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
895 }
896
897 #define atomic_inc(v) atomic_add(1, v)
898 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
899 +{
900 + atomic_add_unchecked(1, v);
901 +}
902 #define atomic_dec(v) atomic_sub(1, v)
903 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
904 +{
905 + atomic_sub_unchecked(1, v);
906 +}
907
908 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
909 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
910 +{
911 + return atomic_add_return_unchecked(1, v) == 0;
912 +}
913 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
914 #define atomic_inc_return(v) (atomic_add_return(1, v))
915 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
916 +{
917 + return atomic_add_return_unchecked(1, v);
918 +}
919 #define atomic_dec_return(v) (atomic_sub_return(1, v))
920 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
921
922 @@ -239,6 +401,14 @@ typedef struct {
923 u64 __aligned(8) counter;
924 } atomic64_t;
925
926 +#ifdef CONFIG_PAX_REFCOUNT
927 +typedef struct {
928 + u64 __aligned(8) counter;
929 +} atomic64_unchecked_t;
930 +#else
931 +typedef atomic64_t atomic64_unchecked_t;
932 +#endif
933 +
934 #define ATOMIC64_INIT(i) { (i) }
935
936 static inline u64 atomic64_read(atomic64_t *v)
937 @@ -254,6 +424,19 @@ static inline u64 atomic64_read(atomic64_t *v)
938 return result;
939 }
940
941 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
942 +{
943 + u64 result;
944 +
945 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
946 +" ldrexd %0, %H0, [%1]"
947 + : "=&r" (result)
948 + : "r" (&v->counter), "Qo" (v->counter)
949 + );
950 +
951 + return result;
952 +}
953 +
954 static inline void atomic64_set(atomic64_t *v, u64 i)
955 {
956 u64 tmp;
957 @@ -268,6 +451,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
958 : "cc");
959 }
960
961 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
962 +{
963 + u64 tmp;
964 +
965 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
966 +"1: ldrexd %0, %H0, [%2]\n"
967 +" strexd %0, %3, %H3, [%2]\n"
968 +" teq %0, #0\n"
969 +" bne 1b"
970 + : "=&r" (tmp), "=Qo" (v->counter)
971 + : "r" (&v->counter), "r" (i)
972 + : "cc");
973 +}
974 +
975 static inline void atomic64_add(u64 i, atomic64_t *v)
976 {
977 u64 result;
978 @@ -276,6 +473,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
979 __asm__ __volatile__("@ atomic64_add\n"
980 "1: ldrexd %0, %H0, [%3]\n"
981 " adds %0, %0, %4\n"
982 +" adcs %H0, %H0, %H4\n"
983 +
984 +#ifdef CONFIG_PAX_REFCOUNT
985 +" bvc 3f\n"
986 +"2: bkpt 0xf103\n"
987 +"3:\n"
988 +#endif
989 +
990 +" strexd %1, %0, %H0, [%3]\n"
991 +" teq %1, #0\n"
992 +" bne 1b"
993 +
994 +#ifdef CONFIG_PAX_REFCOUNT
995 +"\n4:\n"
996 + _ASM_EXTABLE(2b, 4b)
997 +#endif
998 +
999 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1000 + : "r" (&v->counter), "r" (i)
1001 + : "cc");
1002 +}
1003 +
1004 +static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1005 +{
1006 + u64 result;
1007 + unsigned long tmp;
1008 +
1009 + __asm__ __volatile__("@ atomic64_add_unchecked\n"
1010 +"1: ldrexd %0, %H0, [%3]\n"
1011 +" adds %0, %0, %4\n"
1012 " adc %H0, %H0, %H4\n"
1013 " strexd %1, %0, %H0, [%3]\n"
1014 " teq %1, #0\n"
1015 @@ -287,12 +514,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1016
1017 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1018 {
1019 - u64 result;
1020 - unsigned long tmp;
1021 + u64 result, tmp;
1022
1023 smp_mb();
1024
1025 __asm__ __volatile__("@ atomic64_add_return\n"
1026 +"1: ldrexd %1, %H1, [%3]\n"
1027 +" adds %0, %1, %4\n"
1028 +" adcs %H0, %H1, %H4\n"
1029 +
1030 +#ifdef CONFIG_PAX_REFCOUNT
1031 +" bvc 3f\n"
1032 +" mov %0, %1\n"
1033 +" mov %H0, %H1\n"
1034 +"2: bkpt 0xf103\n"
1035 +"3:\n"
1036 +#endif
1037 +
1038 +" strexd %1, %0, %H0, [%3]\n"
1039 +" teq %1, #0\n"
1040 +" bne 1b"
1041 +
1042 +#ifdef CONFIG_PAX_REFCOUNT
1043 +"\n4:\n"
1044 + _ASM_EXTABLE(2b, 4b)
1045 +#endif
1046 +
1047 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1048 + : "r" (&v->counter), "r" (i)
1049 + : "cc");
1050 +
1051 + smp_mb();
1052 +
1053 + return result;
1054 +}
1055 +
1056 +static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1057 +{
1058 + u64 result;
1059 + unsigned long tmp;
1060 +
1061 + smp_mb();
1062 +
1063 + __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1064 "1: ldrexd %0, %H0, [%3]\n"
1065 " adds %0, %0, %4\n"
1066 " adc %H0, %H0, %H4\n"
1067 @@ -316,6 +580,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1068 __asm__ __volatile__("@ atomic64_sub\n"
1069 "1: ldrexd %0, %H0, [%3]\n"
1070 " subs %0, %0, %4\n"
1071 +" sbcs %H0, %H0, %H4\n"
1072 +
1073 +#ifdef CONFIG_PAX_REFCOUNT
1074 +" bvc 3f\n"
1075 +"2: bkpt 0xf103\n"
1076 +"3:\n"
1077 +#endif
1078 +
1079 +" strexd %1, %0, %H0, [%3]\n"
1080 +" teq %1, #0\n"
1081 +" bne 1b"
1082 +
1083 +#ifdef CONFIG_PAX_REFCOUNT
1084 +"\n4:\n"
1085 + _ASM_EXTABLE(2b, 4b)
1086 +#endif
1087 +
1088 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1089 + : "r" (&v->counter), "r" (i)
1090 + : "cc");
1091 +}
1092 +
1093 +static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1094 +{
1095 + u64 result;
1096 + unsigned long tmp;
1097 +
1098 + __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1099 +"1: ldrexd %0, %H0, [%3]\n"
1100 +" subs %0, %0, %4\n"
1101 " sbc %H0, %H0, %H4\n"
1102 " strexd %1, %0, %H0, [%3]\n"
1103 " teq %1, #0\n"
1104 @@ -327,18 +621,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1105
1106 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1107 {
1108 - u64 result;
1109 - unsigned long tmp;
1110 + u64 result, tmp;
1111
1112 smp_mb();
1113
1114 __asm__ __volatile__("@ atomic64_sub_return\n"
1115 -"1: ldrexd %0, %H0, [%3]\n"
1116 -" subs %0, %0, %4\n"
1117 -" sbc %H0, %H0, %H4\n"
1118 +"1: ldrexd %1, %H1, [%3]\n"
1119 +" subs %0, %1, %4\n"
1120 +" sbc %H0, %H1, %H4\n"
1121 +
1122 +#ifdef CONFIG_PAX_REFCOUNT
1123 +" bvc 3f\n"
1124 +" mov %0, %1\n"
1125 +" mov %H0, %H1\n"
1126 +"2: bkpt 0xf103\n"
1127 +"3:\n"
1128 +#endif
1129 +
1130 " strexd %1, %0, %H0, [%3]\n"
1131 " teq %1, #0\n"
1132 " bne 1b"
1133 +
1134 +#ifdef CONFIG_PAX_REFCOUNT
1135 +"\n4:\n"
1136 + _ASM_EXTABLE(2b, 4b)
1137 +#endif
1138 +
1139 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1140 : "r" (&v->counter), "r" (i)
1141 : "cc");
1142 @@ -372,6 +680,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1143 return oldval;
1144 }
1145
1146 +static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1147 +{
1148 + u64 oldval;
1149 + unsigned long res;
1150 +
1151 + smp_mb();
1152 +
1153 + do {
1154 + __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1155 + "ldrexd %1, %H1, [%3]\n"
1156 + "mov %0, #0\n"
1157 + "teq %1, %4\n"
1158 + "teqeq %H1, %H4\n"
1159 + "strexdeq %0, %5, %H5, [%3]"
1160 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1161 + : "r" (&ptr->counter), "r" (old), "r" (new)
1162 + : "cc");
1163 + } while (res);
1164 +
1165 + smp_mb();
1166 +
1167 + return oldval;
1168 +}
1169 +
1170 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1171 {
1172 u64 result;
1173 @@ -395,21 +727,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1174
1175 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1176 {
1177 - u64 result;
1178 - unsigned long tmp;
1179 + u64 result, tmp;
1180
1181 smp_mb();
1182
1183 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1184 -"1: ldrexd %0, %H0, [%3]\n"
1185 -" subs %0, %0, #1\n"
1186 -" sbc %H0, %H0, #0\n"
1187 +"1: ldrexd %1, %H1, [%3]\n"
1188 +" subs %0, %1, #1\n"
1189 +" sbc %H0, %H1, #0\n"
1190 +
1191 +#ifdef CONFIG_PAX_REFCOUNT
1192 +" bvc 3f\n"
1193 +" mov %0, %1\n"
1194 +" mov %H0, %H1\n"
1195 +"2: bkpt 0xf103\n"
1196 +"3:\n"
1197 +#endif
1198 +
1199 " teq %H0, #0\n"
1200 -" bmi 2f\n"
1201 +" bmi 4f\n"
1202 " strexd %1, %0, %H0, [%3]\n"
1203 " teq %1, #0\n"
1204 " bne 1b\n"
1205 -"2:"
1206 +"4:\n"
1207 +
1208 +#ifdef CONFIG_PAX_REFCOUNT
1209 + _ASM_EXTABLE(2b, 4b)
1210 +#endif
1211 +
1212 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1213 : "r" (&v->counter)
1214 : "cc");
1215 @@ -432,13 +777,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1216 " teq %0, %5\n"
1217 " teqeq %H0, %H5\n"
1218 " moveq %1, #0\n"
1219 -" beq 2f\n"
1220 +" beq 4f\n"
1221 " adds %0, %0, %6\n"
1222 " adc %H0, %H0, %H6\n"
1223 +
1224 +#ifdef CONFIG_PAX_REFCOUNT
1225 +" bvc 3f\n"
1226 +"2: bkpt 0xf103\n"
1227 +"3:\n"
1228 +#endif
1229 +
1230 " strexd %2, %0, %H0, [%4]\n"
1231 " teq %2, #0\n"
1232 " bne 1b\n"
1233 -"2:"
1234 +"4:\n"
1235 +
1236 +#ifdef CONFIG_PAX_REFCOUNT
1237 + _ASM_EXTABLE(2b, 4b)
1238 +#endif
1239 +
1240 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1241 : "r" (&v->counter), "r" (u), "r" (a)
1242 : "cc");
1243 @@ -451,10 +808,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1244
1245 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1246 #define atomic64_inc(v) atomic64_add(1LL, (v))
1247 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1248 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1249 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1250 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1251 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1252 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1253 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1254 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1255 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1256 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1257 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1258 index 75fe66b..2255c86 100644
1259 --- a/arch/arm/include/asm/cache.h
1260 +++ b/arch/arm/include/asm/cache.h
1261 @@ -4,8 +4,10 @@
1262 #ifndef __ASMARM_CACHE_H
1263 #define __ASMARM_CACHE_H
1264
1265 +#include <linux/const.h>
1266 +
1267 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1268 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1269 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1270
1271 /*
1272 * Memory returned by kmalloc() may be used for DMA, so we must make
1273 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1274 index d5d8d5c..ad92c96 100644
1275 --- a/arch/arm/include/asm/cacheflush.h
1276 +++ b/arch/arm/include/asm/cacheflush.h
1277 @@ -108,7 +108,7 @@ struct cpu_cache_fns {
1278 void (*dma_unmap_area)(const void *, size_t, int);
1279
1280 void (*dma_flush_range)(const void *, const void *);
1281 -};
1282 +} __no_const;
1283
1284 /*
1285 * Select the calling method
1286 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1287 index 0e9ce8d..6ef1e03 100644
1288 --- a/arch/arm/include/asm/elf.h
1289 +++ b/arch/arm/include/asm/elf.h
1290 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1291 the loader. We need to make sure that it is out of the way of the program
1292 that it will "exec", and that there is sufficient room for the brk. */
1293
1294 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1295 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1296 +
1297 +#ifdef CONFIG_PAX_ASLR
1298 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1299 +
1300 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1301 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1302 +#endif
1303
1304 /* When the program starts, a1 contains a pointer to a function to be
1305 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1306 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1307 extern void elf_set_personality(const struct elf32_hdr *);
1308 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1309
1310 -struct mm_struct;
1311 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1312 -#define arch_randomize_brk arch_randomize_brk
1313 -
1314 extern int vectors_user_mapping(void);
1315 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
1316 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
1317 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1318 index e51b1e8..32a3113 100644
1319 --- a/arch/arm/include/asm/kmap_types.h
1320 +++ b/arch/arm/include/asm/kmap_types.h
1321 @@ -21,6 +21,7 @@ enum km_type {
1322 KM_L1_CACHE,
1323 KM_L2_CACHE,
1324 KM_KDB,
1325 + KM_CLEARPAGE,
1326 KM_TYPE_NR
1327 };
1328
1329 diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1330 index 53426c6..c7baff3 100644
1331 --- a/arch/arm/include/asm/outercache.h
1332 +++ b/arch/arm/include/asm/outercache.h
1333 @@ -35,7 +35,7 @@ struct outer_cache_fns {
1334 #endif
1335 void (*set_debug)(unsigned long);
1336 void (*resume)(void);
1337 -};
1338 +} __no_const;
1339
1340 #ifdef CONFIG_OUTER_CACHE
1341
1342 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1343 index ca94653..6ac0d56 100644
1344 --- a/arch/arm/include/asm/page.h
1345 +++ b/arch/arm/include/asm/page.h
1346 @@ -123,7 +123,7 @@ struct cpu_user_fns {
1347 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1348 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1349 unsigned long vaddr, struct vm_area_struct *vma);
1350 -};
1351 +} __no_const;
1352
1353 #ifdef MULTI_USER
1354 extern struct cpu_user_fns cpu_user;
1355 diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
1356 index 984014b..a6d914f 100644
1357 --- a/arch/arm/include/asm/system.h
1358 +++ b/arch/arm/include/asm/system.h
1359 @@ -90,6 +90,8 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
1360
1361 #define xchg(ptr,x) \
1362 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1363 +#define xchg_unchecked(ptr,x) \
1364 + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1365
1366 extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
1367
1368 @@ -101,7 +103,7 @@ extern int __pure cpu_architecture(void);
1369 extern void cpu_init(void);
1370
1371 void arm_machine_restart(char mode, const char *cmd);
1372 -extern void (*arm_pm_restart)(char str, const char *cmd);
1373 +extern void (*arm_pm_restart)(char str, const char *cmd) __noreturn;
1374
1375 #define UDBG_UNDEFINED (1 << 0)
1376 #define UDBG_SYSCALL (1 << 1)
1377 @@ -526,6 +528,13 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
1378
1379 #endif /* __LINUX_ARM_ARCH__ >= 6 */
1380
1381 +#define _ASM_EXTABLE(from, to) \
1382 +" .pushsection __ex_table,\"a\"\n"\
1383 +" .align 3\n" \
1384 +" .long " #from ", " #to"\n" \
1385 +" .popsection"
1386 +
1387 +
1388 #endif /* __ASSEMBLY__ */
1389
1390 #define arch_align_stack(x) (x)
1391 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1392 index b293616..96310e5 100644
1393 --- a/arch/arm/include/asm/uaccess.h
1394 +++ b/arch/arm/include/asm/uaccess.h
1395 @@ -22,6 +22,8 @@
1396 #define VERIFY_READ 0
1397 #define VERIFY_WRITE 1
1398
1399 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1400 +
1401 /*
1402 * The exception table consists of pairs of addresses: the first is the
1403 * address of an instruction that is allowed to fault, and the second is
1404 @@ -387,8 +389,23 @@ do { \
1405
1406
1407 #ifdef CONFIG_MMU
1408 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1409 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1410 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1411 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1412 +
1413 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1414 +{
1415 + if (!__builtin_constant_p(n))
1416 + check_object_size(to, n, false);
1417 + return ___copy_from_user(to, from, n);
1418 +}
1419 +
1420 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1421 +{
1422 + if (!__builtin_constant_p(n))
1423 + check_object_size(from, n, true);
1424 + return ___copy_to_user(to, from, n);
1425 +}
1426 +
1427 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1428 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1429 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1430 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1431
1432 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1433 {
1434 + if ((long)n < 0)
1435 + return n;
1436 +
1437 if (access_ok(VERIFY_READ, from, n))
1438 n = __copy_from_user(to, from, n);
1439 else /* security hole - plug it */
1440 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1441
1442 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1443 {
1444 + if ((long)n < 0)
1445 + return n;
1446 +
1447 if (access_ok(VERIFY_WRITE, to, n))
1448 n = __copy_to_user(to, from, n);
1449 return n;
1450 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1451 index 5b0bce6..becd81c 100644
1452 --- a/arch/arm/kernel/armksyms.c
1453 +++ b/arch/arm/kernel/armksyms.c
1454 @@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1455 #ifdef CONFIG_MMU
1456 EXPORT_SYMBOL(copy_page);
1457
1458 -EXPORT_SYMBOL(__copy_from_user);
1459 -EXPORT_SYMBOL(__copy_to_user);
1460 +EXPORT_SYMBOL(___copy_from_user);
1461 +EXPORT_SYMBOL(___copy_to_user);
1462 EXPORT_SYMBOL(__clear_user);
1463
1464 EXPORT_SYMBOL(__get_user_1);
1465 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1466 index 3d0c6fb..9d326fa 100644
1467 --- a/arch/arm/kernel/process.c
1468 +++ b/arch/arm/kernel/process.c
1469 @@ -28,7 +28,6 @@
1470 #include <linux/tick.h>
1471 #include <linux/utsname.h>
1472 #include <linux/uaccess.h>
1473 -#include <linux/random.h>
1474 #include <linux/hw_breakpoint.h>
1475 #include <linux/cpuidle.h>
1476
1477 @@ -92,7 +91,7 @@ static int __init hlt_setup(char *__unused)
1478 __setup("nohlt", nohlt_setup);
1479 __setup("hlt", hlt_setup);
1480
1481 -void arm_machine_restart(char mode, const char *cmd)
1482 +__noreturn void arm_machine_restart(char mode, const char *cmd)
1483 {
1484 /* Disable interrupts first */
1485 local_irq_disable();
1486 @@ -134,7 +133,7 @@ void arm_machine_restart(char mode, const char *cmd)
1487 void (*pm_power_off)(void);
1488 EXPORT_SYMBOL(pm_power_off);
1489
1490 -void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart;
1491 +void (*arm_pm_restart)(char str, const char *cmd) __noreturn = arm_machine_restart;
1492 EXPORT_SYMBOL_GPL(arm_pm_restart);
1493
1494 static void do_nothing(void *unused)
1495 @@ -248,6 +247,7 @@ void machine_power_off(void)
1496 machine_shutdown();
1497 if (pm_power_off)
1498 pm_power_off();
1499 + BUG();
1500 }
1501
1502 void machine_restart(char *cmd)
1503 @@ -484,12 +484,6 @@ unsigned long get_wchan(struct task_struct *p)
1504 return 0;
1505 }
1506
1507 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1508 -{
1509 - unsigned long range_end = mm->brk + 0x02000000;
1510 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
1511 -}
1512 -
1513 #ifdef CONFIG_MMU
1514 /*
1515 * The vectors page is always readable from user space for the
1516 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1517 index 8fc2c8f..064c150 100644
1518 --- a/arch/arm/kernel/setup.c
1519 +++ b/arch/arm/kernel/setup.c
1520 @@ -108,13 +108,13 @@ struct processor processor __read_mostly;
1521 struct cpu_tlb_fns cpu_tlb __read_mostly;
1522 #endif
1523 #ifdef MULTI_USER
1524 -struct cpu_user_fns cpu_user __read_mostly;
1525 +struct cpu_user_fns cpu_user __read_only;
1526 #endif
1527 #ifdef MULTI_CACHE
1528 -struct cpu_cache_fns cpu_cache __read_mostly;
1529 +struct cpu_cache_fns cpu_cache __read_only;
1530 #endif
1531 #ifdef CONFIG_OUTER_CACHE
1532 -struct outer_cache_fns outer_cache __read_mostly;
1533 +struct outer_cache_fns outer_cache __read_only;
1534 EXPORT_SYMBOL(outer_cache);
1535 #endif
1536
1537 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1538 index 99a5727..a3d5bb1 100644
1539 --- a/arch/arm/kernel/traps.c
1540 +++ b/arch/arm/kernel/traps.c
1541 @@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
1542
1543 static DEFINE_RAW_SPINLOCK(die_lock);
1544
1545 +extern void gr_handle_kernel_exploit(void);
1546 +
1547 /*
1548 * This function is protected against re-entrancy.
1549 */
1550 @@ -288,6 +290,9 @@ void die(const char *str, struct pt_regs *regs, int err)
1551 panic("Fatal exception in interrupt");
1552 if (panic_on_oops)
1553 panic("Fatal exception");
1554 +
1555 + gr_handle_kernel_exploit();
1556 +
1557 if (ret != NOTIFY_STOP)
1558 do_exit(SIGSEGV);
1559 }
1560 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1561 index 66a477a..bee61d3 100644
1562 --- a/arch/arm/lib/copy_from_user.S
1563 +++ b/arch/arm/lib/copy_from_user.S
1564 @@ -16,7 +16,7 @@
1565 /*
1566 * Prototype:
1567 *
1568 - * size_t __copy_from_user(void *to, const void *from, size_t n)
1569 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
1570 *
1571 * Purpose:
1572 *
1573 @@ -84,11 +84,11 @@
1574
1575 .text
1576
1577 -ENTRY(__copy_from_user)
1578 +ENTRY(___copy_from_user)
1579
1580 #include "copy_template.S"
1581
1582 -ENDPROC(__copy_from_user)
1583 +ENDPROC(___copy_from_user)
1584
1585 .pushsection .fixup,"ax"
1586 .align 0
1587 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1588 index 6ee2f67..d1cce76 100644
1589 --- a/arch/arm/lib/copy_page.S
1590 +++ b/arch/arm/lib/copy_page.S
1591 @@ -10,6 +10,7 @@
1592 * ASM optimised string functions
1593 */
1594 #include <linux/linkage.h>
1595 +#include <linux/const.h>
1596 #include <asm/assembler.h>
1597 #include <asm/asm-offsets.h>
1598 #include <asm/cache.h>
1599 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1600 index d066df6..df28194 100644
1601 --- a/arch/arm/lib/copy_to_user.S
1602 +++ b/arch/arm/lib/copy_to_user.S
1603 @@ -16,7 +16,7 @@
1604 /*
1605 * Prototype:
1606 *
1607 - * size_t __copy_to_user(void *to, const void *from, size_t n)
1608 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
1609 *
1610 * Purpose:
1611 *
1612 @@ -88,11 +88,11 @@
1613 .text
1614
1615 ENTRY(__copy_to_user_std)
1616 -WEAK(__copy_to_user)
1617 +WEAK(___copy_to_user)
1618
1619 #include "copy_template.S"
1620
1621 -ENDPROC(__copy_to_user)
1622 +ENDPROC(___copy_to_user)
1623 ENDPROC(__copy_to_user_std)
1624
1625 .pushsection .fixup,"ax"
1626 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
1627 index d0ece2a..5ae2f39 100644
1628 --- a/arch/arm/lib/uaccess.S
1629 +++ b/arch/arm/lib/uaccess.S
1630 @@ -20,7 +20,7 @@
1631
1632 #define PAGE_SHIFT 12
1633
1634 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1635 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1636 * Purpose : copy a block to user memory from kernel memory
1637 * Params : to - user memory
1638 * : from - kernel memory
1639 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
1640 sub r2, r2, ip
1641 b .Lc2u_dest_aligned
1642
1643 -ENTRY(__copy_to_user)
1644 +ENTRY(___copy_to_user)
1645 stmfd sp!, {r2, r4 - r7, lr}
1646 cmp r2, #4
1647 blt .Lc2u_not_enough
1648 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May fault
1649 ldrgtb r3, [r1], #0
1650 USER( T(strgtb) r3, [r0], #1) @ May fault
1651 b .Lc2u_finished
1652 -ENDPROC(__copy_to_user)
1653 +ENDPROC(___copy_to_user)
1654
1655 .pushsection .fixup,"ax"
1656 .align 0
1657 9001: ldmfd sp!, {r0, r4 - r7, pc}
1658 .popsection
1659
1660 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1661 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1662 * Purpose : copy a block from user memory to kernel memory
1663 * Params : to - kernel memory
1664 * : from - user memory
1665 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault
1666 sub r2, r2, ip
1667 b .Lcfu_dest_aligned
1668
1669 -ENTRY(__copy_from_user)
1670 +ENTRY(___copy_from_user)
1671 stmfd sp!, {r0, r2, r4 - r7, lr}
1672 cmp r2, #4
1673 blt .Lcfu_not_enough
1674 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May fault
1675 USER( T(ldrgtb) r3, [r1], #1) @ May fault
1676 strgtb r3, [r0], #1
1677 b .Lcfu_finished
1678 -ENDPROC(__copy_from_user)
1679 +ENDPROC(___copy_from_user)
1680
1681 .pushsection .fixup,"ax"
1682 .align 0
1683 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1684 index 025f742..8432b08 100644
1685 --- a/arch/arm/lib/uaccess_with_memcpy.c
1686 +++ b/arch/arm/lib/uaccess_with_memcpy.c
1687 @@ -104,7 +104,7 @@ out:
1688 }
1689
1690 unsigned long
1691 -__copy_to_user(void __user *to, const void *from, unsigned long n)
1692 +___copy_to_user(void __user *to, const void *from, unsigned long n)
1693 {
1694 /*
1695 * This test is stubbed out of the main function above to keep
1696 diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
1697 index e9d5f4a..f099699 100644
1698 --- a/arch/arm/mach-omap2/board-n8x0.c
1699 +++ b/arch/arm/mach-omap2/board-n8x0.c
1700 @@ -593,7 +593,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
1701 }
1702 #endif
1703
1704 -static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
1705 +static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
1706 .late_init = n8x0_menelaus_late_init,
1707 };
1708
1709 diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
1710 index 2b2d51c..0127490 100644
1711 --- a/arch/arm/mach-ux500/mbox-db5500.c
1712 +++ b/arch/arm/mach-ux500/mbox-db5500.c
1713 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
1714 return sprintf(buf, "0x%X\n", mbox_value);
1715 }
1716
1717 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
1718 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
1719
1720 static int mbox_show(struct seq_file *s, void *data)
1721 {
1722 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1723 index aa33949..d366075 100644
1724 --- a/arch/arm/mm/fault.c
1725 +++ b/arch/arm/mm/fault.c
1726 @@ -183,6 +183,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1727 }
1728 #endif
1729
1730 +#ifdef CONFIG_PAX_PAGEEXEC
1731 + if (fsr & FSR_LNX_PF) {
1732 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1733 + do_group_exit(SIGKILL);
1734 + }
1735 +#endif
1736 +
1737 tsk->thread.address = addr;
1738 tsk->thread.error_code = fsr;
1739 tsk->thread.trap_no = 14;
1740 @@ -384,6 +391,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1741 }
1742 #endif /* CONFIG_MMU */
1743
1744 +#ifdef CONFIG_PAX_PAGEEXEC
1745 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1746 +{
1747 + long i;
1748 +
1749 + printk(KERN_ERR "PAX: bytes at PC: ");
1750 + for (i = 0; i < 20; i++) {
1751 + unsigned char c;
1752 + if (get_user(c, (__force unsigned char __user *)pc+i))
1753 + printk(KERN_CONT "?? ");
1754 + else
1755 + printk(KERN_CONT "%02x ", c);
1756 + }
1757 + printk("\n");
1758 +
1759 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1760 + for (i = -1; i < 20; i++) {
1761 + unsigned long c;
1762 + if (get_user(c, (__force unsigned long __user *)sp+i))
1763 + printk(KERN_CONT "???????? ");
1764 + else
1765 + printk(KERN_CONT "%08lx ", c);
1766 + }
1767 + printk("\n");
1768 +}
1769 +#endif
1770 +
1771 /*
1772 * First Level Translation Fault Handler
1773 *
1774 @@ -628,6 +662,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1775 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1776 struct siginfo info;
1777
1778 +#ifdef CONFIG_PAX_REFCOUNT
1779 + if (fsr_fs(ifsr) == 2) {
1780 + unsigned int bkpt;
1781 +
1782 + if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1783 + current->thread.error_code = ifsr;
1784 + current->thread.trap_no = 0;
1785 + pax_report_refcount_overflow(regs);
1786 + fixup_exception(regs);
1787 + return;
1788 + }
1789 + }
1790 +#endif
1791 +
1792 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1793 return;
1794
1795 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1796 index 44b628e..623ee2a 100644
1797 --- a/arch/arm/mm/mmap.c
1798 +++ b/arch/arm/mm/mmap.c
1799 @@ -54,6 +54,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1800 if (len > TASK_SIZE)
1801 return -ENOMEM;
1802
1803 +#ifdef CONFIG_PAX_RANDMMAP
1804 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1805 +#endif
1806 +
1807 if (addr) {
1808 if (do_align)
1809 addr = COLOUR_ALIGN(addr, pgoff);
1810 @@ -61,15 +65,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1811 addr = PAGE_ALIGN(addr);
1812
1813 vma = find_vma(mm, addr);
1814 - if (TASK_SIZE - len >= addr &&
1815 - (!vma || addr + len <= vma->vm_start))
1816 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1817 return addr;
1818 }
1819 if (len > mm->cached_hole_size) {
1820 - start_addr = addr = mm->free_area_cache;
1821 + start_addr = addr = mm->free_area_cache;
1822 } else {
1823 - start_addr = addr = TASK_UNMAPPED_BASE;
1824 - mm->cached_hole_size = 0;
1825 + start_addr = addr = mm->mmap_base;
1826 + mm->cached_hole_size = 0;
1827 }
1828 /* 8 bits of randomness in 20 address space bits */
1829 if ((current->flags & PF_RANDOMIZE) &&
1830 @@ -89,14 +92,14 @@ full_search:
1831 * Start a new search - just in case we missed
1832 * some holes.
1833 */
1834 - if (start_addr != TASK_UNMAPPED_BASE) {
1835 - start_addr = addr = TASK_UNMAPPED_BASE;
1836 + if (start_addr != mm->mmap_base) {
1837 + start_addr = addr = mm->mmap_base;
1838 mm->cached_hole_size = 0;
1839 goto full_search;
1840 }
1841 return -ENOMEM;
1842 }
1843 - if (!vma || addr + len <= vma->vm_start) {
1844 + if (check_heap_stack_gap(vma, addr, len)) {
1845 /*
1846 * Remember the place where we stopped the search:
1847 */
1848 diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
1849 index 4c1a363..df311d0 100644
1850 --- a/arch/arm/plat-samsung/include/plat/dma-ops.h
1851 +++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
1852 @@ -41,7 +41,7 @@ struct samsung_dma_ops {
1853 int (*started)(unsigned ch);
1854 int (*flush)(unsigned ch);
1855 int (*stop)(unsigned ch);
1856 -};
1857 +} __no_const;
1858
1859 extern void *samsung_dmadev_get_ops(void);
1860 extern void *s3c_dma_get_ops(void);
1861 diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
1862 index 5f28cae..3d23723 100644
1863 --- a/arch/arm/plat-samsung/include/plat/ehci.h
1864 +++ b/arch/arm/plat-samsung/include/plat/ehci.h
1865 @@ -14,7 +14,7 @@
1866 struct s5p_ehci_platdata {
1867 int (*phy_init)(struct platform_device *pdev, int type);
1868 int (*phy_exit)(struct platform_device *pdev, int type);
1869 -};
1870 +} __no_const;
1871
1872 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
1873
1874 diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
1875 index c3a58a1..78fbf54 100644
1876 --- a/arch/avr32/include/asm/cache.h
1877 +++ b/arch/avr32/include/asm/cache.h
1878 @@ -1,8 +1,10 @@
1879 #ifndef __ASM_AVR32_CACHE_H
1880 #define __ASM_AVR32_CACHE_H
1881
1882 +#include <linux/const.h>
1883 +
1884 #define L1_CACHE_SHIFT 5
1885 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1886 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1887
1888 /*
1889 * Memory returned by kmalloc() may be used for DMA, so we must make
1890 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1891 index 3b3159b..425ea94 100644
1892 --- a/arch/avr32/include/asm/elf.h
1893 +++ b/arch/avr32/include/asm/elf.h
1894 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1895 the loader. We need to make sure that it is out of the way of the program
1896 that it will "exec", and that there is sufficient room for the brk. */
1897
1898 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1899 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1900
1901 +#ifdef CONFIG_PAX_ASLR
1902 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1903 +
1904 +#define PAX_DELTA_MMAP_LEN 15
1905 +#define PAX_DELTA_STACK_LEN 15
1906 +#endif
1907
1908 /* This yields a mask that user programs can use to figure out what
1909 instruction set this CPU supports. This could be done in user space,
1910 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1911 index b7f5c68..556135c 100644
1912 --- a/arch/avr32/include/asm/kmap_types.h
1913 +++ b/arch/avr32/include/asm/kmap_types.h
1914 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1915 D(11) KM_IRQ1,
1916 D(12) KM_SOFTIRQ0,
1917 D(13) KM_SOFTIRQ1,
1918 -D(14) KM_TYPE_NR
1919 +D(14) KM_CLEARPAGE,
1920 +D(15) KM_TYPE_NR
1921 };
1922
1923 #undef D
1924 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1925 index f7040a1..db9f300 100644
1926 --- a/arch/avr32/mm/fault.c
1927 +++ b/arch/avr32/mm/fault.c
1928 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1929
1930 int exception_trace = 1;
1931
1932 +#ifdef CONFIG_PAX_PAGEEXEC
1933 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1934 +{
1935 + unsigned long i;
1936 +
1937 + printk(KERN_ERR "PAX: bytes at PC: ");
1938 + for (i = 0; i < 20; i++) {
1939 + unsigned char c;
1940 + if (get_user(c, (unsigned char *)pc+i))
1941 + printk(KERN_CONT "???????? ");
1942 + else
1943 + printk(KERN_CONT "%02x ", c);
1944 + }
1945 + printk("\n");
1946 +}
1947 +#endif
1948 +
1949 /*
1950 * This routine handles page faults. It determines the address and the
1951 * problem, and then passes it off to one of the appropriate routines.
1952 @@ -156,6 +173,16 @@ bad_area:
1953 up_read(&mm->mmap_sem);
1954
1955 if (user_mode(regs)) {
1956 +
1957 +#ifdef CONFIG_PAX_PAGEEXEC
1958 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1959 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1960 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1961 + do_group_exit(SIGKILL);
1962 + }
1963 + }
1964 +#endif
1965 +
1966 if (exception_trace && printk_ratelimit())
1967 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1968 "sp %08lx ecr %lu\n",
1969 diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
1970 index 568885a..f8008df 100644
1971 --- a/arch/blackfin/include/asm/cache.h
1972 +++ b/arch/blackfin/include/asm/cache.h
1973 @@ -7,6 +7,7 @@
1974 #ifndef __ARCH_BLACKFIN_CACHE_H
1975 #define __ARCH_BLACKFIN_CACHE_H
1976
1977 +#include <linux/const.h>
1978 #include <linux/linkage.h> /* for asmlinkage */
1979
1980 /*
1981 @@ -14,7 +15,7 @@
1982 * Blackfin loads 32 bytes for cache
1983 */
1984 #define L1_CACHE_SHIFT 5
1985 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1986 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1987 #define SMP_CACHE_BYTES L1_CACHE_BYTES
1988
1989 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
1990 diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
1991 index aea2718..3639a60 100644
1992 --- a/arch/cris/include/arch-v10/arch/cache.h
1993 +++ b/arch/cris/include/arch-v10/arch/cache.h
1994 @@ -1,8 +1,9 @@
1995 #ifndef _ASM_ARCH_CACHE_H
1996 #define _ASM_ARCH_CACHE_H
1997
1998 +#include <linux/const.h>
1999 /* Etrax 100LX have 32-byte cache-lines. */
2000 -#define L1_CACHE_BYTES 32
2001 #define L1_CACHE_SHIFT 5
2002 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2003
2004 #endif /* _ASM_ARCH_CACHE_H */
2005 diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
2006 index 1de779f..336fad3 100644
2007 --- a/arch/cris/include/arch-v32/arch/cache.h
2008 +++ b/arch/cris/include/arch-v32/arch/cache.h
2009 @@ -1,11 +1,12 @@
2010 #ifndef _ASM_CRIS_ARCH_CACHE_H
2011 #define _ASM_CRIS_ARCH_CACHE_H
2012
2013 +#include <linux/const.h>
2014 #include <arch/hwregs/dma.h>
2015
2016 /* A cache-line is 32 bytes. */
2017 -#define L1_CACHE_BYTES 32
2018 #define L1_CACHE_SHIFT 5
2019 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2020
2021 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
2022
2023 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
2024 index 0d8a7d6..d0c9ff5 100644
2025 --- a/arch/frv/include/asm/atomic.h
2026 +++ b/arch/frv/include/asm/atomic.h
2027 @@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
2028 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
2029 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
2030
2031 +#define atomic64_read_unchecked(v) atomic64_read(v)
2032 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2033 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2034 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2035 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2036 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2037 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2038 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2039 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2040 +
2041 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2042 {
2043 int c, old;
2044 diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
2045 index 2797163..c2a401d 100644
2046 --- a/arch/frv/include/asm/cache.h
2047 +++ b/arch/frv/include/asm/cache.h
2048 @@ -12,10 +12,11 @@
2049 #ifndef __ASM_CACHE_H
2050 #define __ASM_CACHE_H
2051
2052 +#include <linux/const.h>
2053
2054 /* bytes per L1 cache line */
2055 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
2056 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2057 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2058
2059 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2060 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2061 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
2062 index f8e16b2..c73ff79 100644
2063 --- a/arch/frv/include/asm/kmap_types.h
2064 +++ b/arch/frv/include/asm/kmap_types.h
2065 @@ -23,6 +23,7 @@ enum km_type {
2066 KM_IRQ1,
2067 KM_SOFTIRQ0,
2068 KM_SOFTIRQ1,
2069 + KM_CLEARPAGE,
2070 KM_TYPE_NR
2071 };
2072
2073 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
2074 index 385fd30..6c3d97e 100644
2075 --- a/arch/frv/mm/elf-fdpic.c
2076 +++ b/arch/frv/mm/elf-fdpic.c
2077 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2078 if (addr) {
2079 addr = PAGE_ALIGN(addr);
2080 vma = find_vma(current->mm, addr);
2081 - if (TASK_SIZE - len >= addr &&
2082 - (!vma || addr + len <= vma->vm_start))
2083 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2084 goto success;
2085 }
2086
2087 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2088 for (; vma; vma = vma->vm_next) {
2089 if (addr > limit)
2090 break;
2091 - if (addr + len <= vma->vm_start)
2092 + if (check_heap_stack_gap(vma, addr, len))
2093 goto success;
2094 addr = vma->vm_end;
2095 }
2096 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2097 for (; vma; vma = vma->vm_next) {
2098 if (addr > limit)
2099 break;
2100 - if (addr + len <= vma->vm_start)
2101 + if (check_heap_stack_gap(vma, addr, len))
2102 goto success;
2103 addr = vma->vm_end;
2104 }
2105 diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
2106 index c635028..6d9445a 100644
2107 --- a/arch/h8300/include/asm/cache.h
2108 +++ b/arch/h8300/include/asm/cache.h
2109 @@ -1,8 +1,10 @@
2110 #ifndef __ARCH_H8300_CACHE_H
2111 #define __ARCH_H8300_CACHE_H
2112
2113 +#include <linux/const.h>
2114 +
2115 /* bytes per L1 cache line */
2116 -#define L1_CACHE_BYTES 4
2117 +#define L1_CACHE_BYTES _AC(4,UL)
2118
2119 /* m68k-elf-gcc 2.95.2 doesn't like these */
2120
2121 diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
2122 index 0f01de2..d37d309 100644
2123 --- a/arch/hexagon/include/asm/cache.h
2124 +++ b/arch/hexagon/include/asm/cache.h
2125 @@ -21,9 +21,11 @@
2126 #ifndef __ASM_CACHE_H
2127 #define __ASM_CACHE_H
2128
2129 +#include <linux/const.h>
2130 +
2131 /* Bytes per L1 cache line */
2132 -#define L1_CACHE_SHIFT (5)
2133 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2134 +#define L1_CACHE_SHIFT 5
2135 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2136
2137 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
2138 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
2139 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2140 index 3fad89e..3047da5 100644
2141 --- a/arch/ia64/include/asm/atomic.h
2142 +++ b/arch/ia64/include/asm/atomic.h
2143 @@ -209,6 +209,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2144 #define atomic64_inc(v) atomic64_add(1, (v))
2145 #define atomic64_dec(v) atomic64_sub(1, (v))
2146
2147 +#define atomic64_read_unchecked(v) atomic64_read(v)
2148 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2149 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2150 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2151 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2152 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2153 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2154 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2155 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2156 +
2157 /* Atomic operations are already serializing */
2158 #define smp_mb__before_atomic_dec() barrier()
2159 #define smp_mb__after_atomic_dec() barrier()
2160 diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2161 index 988254a..e1ee885 100644
2162 --- a/arch/ia64/include/asm/cache.h
2163 +++ b/arch/ia64/include/asm/cache.h
2164 @@ -1,6 +1,7 @@
2165 #ifndef _ASM_IA64_CACHE_H
2166 #define _ASM_IA64_CACHE_H
2167
2168 +#include <linux/const.h>
2169
2170 /*
2171 * Copyright (C) 1998-2000 Hewlett-Packard Co
2172 @@ -9,7 +10,7 @@
2173
2174 /* Bytes per L1 (data) cache line. */
2175 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2176 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2177 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2178
2179 #ifdef CONFIG_SMP
2180 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2181 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2182 index b5298eb..67c6e62 100644
2183 --- a/arch/ia64/include/asm/elf.h
2184 +++ b/arch/ia64/include/asm/elf.h
2185 @@ -42,6 +42,13 @@
2186 */
2187 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2188
2189 +#ifdef CONFIG_PAX_ASLR
2190 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2191 +
2192 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2193 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2194 +#endif
2195 +
2196 #define PT_IA_64_UNWIND 0x70000001
2197
2198 /* IA-64 relocations: */
2199 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2200 index 1a97af3..7529d31 100644
2201 --- a/arch/ia64/include/asm/pgtable.h
2202 +++ b/arch/ia64/include/asm/pgtable.h
2203 @@ -12,7 +12,7 @@
2204 * David Mosberger-Tang <davidm@hpl.hp.com>
2205 */
2206
2207 -
2208 +#include <linux/const.h>
2209 #include <asm/mman.h>
2210 #include <asm/page.h>
2211 #include <asm/processor.h>
2212 @@ -143,6 +143,17 @@
2213 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2214 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2215 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2216 +
2217 +#ifdef CONFIG_PAX_PAGEEXEC
2218 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2219 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2220 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2221 +#else
2222 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2223 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2224 +# define PAGE_COPY_NOEXEC PAGE_COPY
2225 +#endif
2226 +
2227 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2228 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2229 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2230 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2231 index b77768d..e0795eb 100644
2232 --- a/arch/ia64/include/asm/spinlock.h
2233 +++ b/arch/ia64/include/asm/spinlock.h
2234 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
2235 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2236
2237 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2238 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2239 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2240 }
2241
2242 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
2243 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2244 index 449c8c0..432a3d2 100644
2245 --- a/arch/ia64/include/asm/uaccess.h
2246 +++ b/arch/ia64/include/asm/uaccess.h
2247 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2248 const void *__cu_from = (from); \
2249 long __cu_len = (n); \
2250 \
2251 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
2252 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2253 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2254 __cu_len; \
2255 })
2256 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2257 long __cu_len = (n); \
2258 \
2259 __chk_user_ptr(__cu_from); \
2260 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
2261 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2262 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2263 __cu_len; \
2264 })
2265 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2266 index 24603be..948052d 100644
2267 --- a/arch/ia64/kernel/module.c
2268 +++ b/arch/ia64/kernel/module.c
2269 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
2270 void
2271 module_free (struct module *mod, void *module_region)
2272 {
2273 - if (mod && mod->arch.init_unw_table &&
2274 - module_region == mod->module_init) {
2275 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2276 unw_remove_unwind_table(mod->arch.init_unw_table);
2277 mod->arch.init_unw_table = NULL;
2278 }
2279 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2280 }
2281
2282 static inline int
2283 +in_init_rx (const struct module *mod, uint64_t addr)
2284 +{
2285 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2286 +}
2287 +
2288 +static inline int
2289 +in_init_rw (const struct module *mod, uint64_t addr)
2290 +{
2291 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2292 +}
2293 +
2294 +static inline int
2295 in_init (const struct module *mod, uint64_t addr)
2296 {
2297 - return addr - (uint64_t) mod->module_init < mod->init_size;
2298 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2299 +}
2300 +
2301 +static inline int
2302 +in_core_rx (const struct module *mod, uint64_t addr)
2303 +{
2304 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2305 +}
2306 +
2307 +static inline int
2308 +in_core_rw (const struct module *mod, uint64_t addr)
2309 +{
2310 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2311 }
2312
2313 static inline int
2314 in_core (const struct module *mod, uint64_t addr)
2315 {
2316 - return addr - (uint64_t) mod->module_core < mod->core_size;
2317 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2318 }
2319
2320 static inline int
2321 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2322 break;
2323
2324 case RV_BDREL:
2325 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2326 + if (in_init_rx(mod, val))
2327 + val -= (uint64_t) mod->module_init_rx;
2328 + else if (in_init_rw(mod, val))
2329 + val -= (uint64_t) mod->module_init_rw;
2330 + else if (in_core_rx(mod, val))
2331 + val -= (uint64_t) mod->module_core_rx;
2332 + else if (in_core_rw(mod, val))
2333 + val -= (uint64_t) mod->module_core_rw;
2334 break;
2335
2336 case RV_LTV:
2337 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2338 * addresses have been selected...
2339 */
2340 uint64_t gp;
2341 - if (mod->core_size > MAX_LTOFF)
2342 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2343 /*
2344 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2345 * at the end of the module.
2346 */
2347 - gp = mod->core_size - MAX_LTOFF / 2;
2348 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2349 else
2350 - gp = mod->core_size / 2;
2351 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2352 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2353 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2354 mod->arch.gp = gp;
2355 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2356 }
2357 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2358 index 609d500..7dde2a8 100644
2359 --- a/arch/ia64/kernel/sys_ia64.c
2360 +++ b/arch/ia64/kernel/sys_ia64.c
2361 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2362 if (REGION_NUMBER(addr) == RGN_HPAGE)
2363 addr = 0;
2364 #endif
2365 +
2366 +#ifdef CONFIG_PAX_RANDMMAP
2367 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2368 + addr = mm->free_area_cache;
2369 + else
2370 +#endif
2371 +
2372 if (!addr)
2373 addr = mm->free_area_cache;
2374
2375 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2376 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2377 /* At this point: (!vma || addr < vma->vm_end). */
2378 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2379 - if (start_addr != TASK_UNMAPPED_BASE) {
2380 + if (start_addr != mm->mmap_base) {
2381 /* Start a new search --- just in case we missed some holes. */
2382 - addr = TASK_UNMAPPED_BASE;
2383 + addr = mm->mmap_base;
2384 goto full_search;
2385 }
2386 return -ENOMEM;
2387 }
2388 - if (!vma || addr + len <= vma->vm_start) {
2389 + if (check_heap_stack_gap(vma, addr, len)) {
2390 /* Remember the address where we stopped this search: */
2391 mm->free_area_cache = addr + len;
2392 return addr;
2393 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2394 index 53c0ba0..2accdde 100644
2395 --- a/arch/ia64/kernel/vmlinux.lds.S
2396 +++ b/arch/ia64/kernel/vmlinux.lds.S
2397 @@ -199,7 +199,7 @@ SECTIONS {
2398 /* Per-cpu data: */
2399 . = ALIGN(PERCPU_PAGE_SIZE);
2400 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
2401 - __phys_per_cpu_start = __per_cpu_load;
2402 + __phys_per_cpu_start = per_cpu_load;
2403 /*
2404 * ensure percpu data fits
2405 * into percpu page size
2406 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2407 index 20b3593..1ce77f0 100644
2408 --- a/arch/ia64/mm/fault.c
2409 +++ b/arch/ia64/mm/fault.c
2410 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
2411 return pte_present(pte);
2412 }
2413
2414 +#ifdef CONFIG_PAX_PAGEEXEC
2415 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2416 +{
2417 + unsigned long i;
2418 +
2419 + printk(KERN_ERR "PAX: bytes at PC: ");
2420 + for (i = 0; i < 8; i++) {
2421 + unsigned int c;
2422 + if (get_user(c, (unsigned int *)pc+i))
2423 + printk(KERN_CONT "???????? ");
2424 + else
2425 + printk(KERN_CONT "%08x ", c);
2426 + }
2427 + printk("\n");
2428 +}
2429 +#endif
2430 +
2431 void __kprobes
2432 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2433 {
2434 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2435 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2436 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2437
2438 - if ((vma->vm_flags & mask) != mask)
2439 + if ((vma->vm_flags & mask) != mask) {
2440 +
2441 +#ifdef CONFIG_PAX_PAGEEXEC
2442 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2443 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2444 + goto bad_area;
2445 +
2446 + up_read(&mm->mmap_sem);
2447 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2448 + do_group_exit(SIGKILL);
2449 + }
2450 +#endif
2451 +
2452 goto bad_area;
2453
2454 + }
2455 +
2456 /*
2457 * If for any reason at all we couldn't handle the fault, make
2458 * sure we exit gracefully rather than endlessly redo the
2459 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2460 index 5ca674b..e0e1b70 100644
2461 --- a/arch/ia64/mm/hugetlbpage.c
2462 +++ b/arch/ia64/mm/hugetlbpage.c
2463 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2464 /* At this point: (!vmm || addr < vmm->vm_end). */
2465 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2466 return -ENOMEM;
2467 - if (!vmm || (addr + len) <= vmm->vm_start)
2468 + if (check_heap_stack_gap(vmm, addr, len))
2469 return addr;
2470 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2471 }
2472 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2473 index 00cb0e2..2ad8024 100644
2474 --- a/arch/ia64/mm/init.c
2475 +++ b/arch/ia64/mm/init.c
2476 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
2477 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2478 vma->vm_end = vma->vm_start + PAGE_SIZE;
2479 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2480 +
2481 +#ifdef CONFIG_PAX_PAGEEXEC
2482 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2483 + vma->vm_flags &= ~VM_EXEC;
2484 +
2485 +#ifdef CONFIG_PAX_MPROTECT
2486 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
2487 + vma->vm_flags &= ~VM_MAYEXEC;
2488 +#endif
2489 +
2490 + }
2491 +#endif
2492 +
2493 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2494 down_write(&current->mm->mmap_sem);
2495 if (insert_vm_struct(current->mm, vma)) {
2496 diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2497 index 40b3ee9..8c2c112 100644
2498 --- a/arch/m32r/include/asm/cache.h
2499 +++ b/arch/m32r/include/asm/cache.h
2500 @@ -1,8 +1,10 @@
2501 #ifndef _ASM_M32R_CACHE_H
2502 #define _ASM_M32R_CACHE_H
2503
2504 +#include <linux/const.h>
2505 +
2506 /* L1 cache line size */
2507 #define L1_CACHE_SHIFT 4
2508 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2509 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2510
2511 #endif /* _ASM_M32R_CACHE_H */
2512 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2513 index 82abd15..d95ae5d 100644
2514 --- a/arch/m32r/lib/usercopy.c
2515 +++ b/arch/m32r/lib/usercopy.c
2516 @@ -14,6 +14,9 @@
2517 unsigned long
2518 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2519 {
2520 + if ((long)n < 0)
2521 + return n;
2522 +
2523 prefetch(from);
2524 if (access_ok(VERIFY_WRITE, to, n))
2525 __copy_user(to,from,n);
2526 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2527 unsigned long
2528 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2529 {
2530 + if ((long)n < 0)
2531 + return n;
2532 +
2533 prefetchw(to);
2534 if (access_ok(VERIFY_READ, from, n))
2535 __copy_user_zeroing(to,from,n);
2536 diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2537 index 0395c51..5f26031 100644
2538 --- a/arch/m68k/include/asm/cache.h
2539 +++ b/arch/m68k/include/asm/cache.h
2540 @@ -4,9 +4,11 @@
2541 #ifndef __ARCH_M68K_CACHE_H
2542 #define __ARCH_M68K_CACHE_H
2543
2544 +#include <linux/const.h>
2545 +
2546 /* bytes per L1 cache line */
2547 #define L1_CACHE_SHIFT 4
2548 -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2549 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2550
2551 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2552
2553 diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2554 index 4efe96a..60e8699 100644
2555 --- a/arch/microblaze/include/asm/cache.h
2556 +++ b/arch/microblaze/include/asm/cache.h
2557 @@ -13,11 +13,12 @@
2558 #ifndef _ASM_MICROBLAZE_CACHE_H
2559 #define _ASM_MICROBLAZE_CACHE_H
2560
2561 +#include <linux/const.h>
2562 #include <asm/registers.h>
2563
2564 #define L1_CACHE_SHIFT 5
2565 /* word-granular cache in microblaze */
2566 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2567 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2568
2569 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2570
2571 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2572 index 1d93f81..67794d0 100644
2573 --- a/arch/mips/include/asm/atomic.h
2574 +++ b/arch/mips/include/asm/atomic.h
2575 @@ -21,6 +21,10 @@
2576 #include <asm/war.h>
2577 #include <asm/system.h>
2578
2579 +#ifdef CONFIG_GENERIC_ATOMIC64
2580 +#include <asm-generic/atomic64.h>
2581 +#endif
2582 +
2583 #define ATOMIC_INIT(i) { (i) }
2584
2585 /*
2586 @@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2587 */
2588 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2589
2590 +#define atomic64_read_unchecked(v) atomic64_read(v)
2591 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2592 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2593 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2594 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2595 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2596 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2597 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2598 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2599 +
2600 #endif /* CONFIG_64BIT */
2601
2602 /*
2603 diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2604 index b4db69f..8f3b093 100644
2605 --- a/arch/mips/include/asm/cache.h
2606 +++ b/arch/mips/include/asm/cache.h
2607 @@ -9,10 +9,11 @@
2608 #ifndef _ASM_CACHE_H
2609 #define _ASM_CACHE_H
2610
2611 +#include <linux/const.h>
2612 #include <kmalloc.h>
2613
2614 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2615 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2616 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2617
2618 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2619 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2620 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2621 index 455c0ac..ad65fbe 100644
2622 --- a/arch/mips/include/asm/elf.h
2623 +++ b/arch/mips/include/asm/elf.h
2624 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
2625 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2626 #endif
2627
2628 +#ifdef CONFIG_PAX_ASLR
2629 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2630 +
2631 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2632 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2633 +#endif
2634 +
2635 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2636 struct linux_binprm;
2637 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2638 int uses_interp);
2639
2640 -struct mm_struct;
2641 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2642 -#define arch_randomize_brk arch_randomize_brk
2643 -
2644 #endif /* _ASM_ELF_H */
2645 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2646 index e59cd1a..8e329d6 100644
2647 --- a/arch/mips/include/asm/page.h
2648 +++ b/arch/mips/include/asm/page.h
2649 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2650 #ifdef CONFIG_CPU_MIPS32
2651 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2652 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2653 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2654 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2655 #else
2656 typedef struct { unsigned long long pte; } pte_t;
2657 #define pte_val(x) ((x).pte)
2658 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
2659 index 6018c80..7c37203 100644
2660 --- a/arch/mips/include/asm/system.h
2661 +++ b/arch/mips/include/asm/system.h
2662 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
2663 */
2664 #define __ARCH_WANT_UNLOCKED_CTXSW
2665
2666 -extern unsigned long arch_align_stack(unsigned long sp);
2667 +#define arch_align_stack(x) ((x) & ~0xfUL)
2668
2669 #endif /* _ASM_SYSTEM_H */
2670 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2671 index 9fdd8bc..4bd7f1a 100644
2672 --- a/arch/mips/kernel/binfmt_elfn32.c
2673 +++ b/arch/mips/kernel/binfmt_elfn32.c
2674 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2675 #undef ELF_ET_DYN_BASE
2676 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2677
2678 +#ifdef CONFIG_PAX_ASLR
2679 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2680 +
2681 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2682 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2683 +#endif
2684 +
2685 #include <asm/processor.h>
2686 #include <linux/module.h>
2687 #include <linux/elfcore.h>
2688 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2689 index ff44823..97f8906 100644
2690 --- a/arch/mips/kernel/binfmt_elfo32.c
2691 +++ b/arch/mips/kernel/binfmt_elfo32.c
2692 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2693 #undef ELF_ET_DYN_BASE
2694 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2695
2696 +#ifdef CONFIG_PAX_ASLR
2697 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2698 +
2699 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2700 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2701 +#endif
2702 +
2703 #include <asm/processor.h>
2704
2705 /*
2706 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2707 index c47f96e..661d418 100644
2708 --- a/arch/mips/kernel/process.c
2709 +++ b/arch/mips/kernel/process.c
2710 @@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_struct *task)
2711 out:
2712 return pc;
2713 }
2714 -
2715 -/*
2716 - * Don't forget that the stack pointer must be aligned on a 8 bytes
2717 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2718 - */
2719 -unsigned long arch_align_stack(unsigned long sp)
2720 -{
2721 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2722 - sp -= get_random_int() & ~PAGE_MASK;
2723 -
2724 - return sp & ALMASK;
2725 -}
2726 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2727 index 937cf33..adb39bb 100644
2728 --- a/arch/mips/mm/fault.c
2729 +++ b/arch/mips/mm/fault.c
2730 @@ -28,6 +28,23 @@
2731 #include <asm/highmem.h> /* For VMALLOC_END */
2732 #include <linux/kdebug.h>
2733
2734 +#ifdef CONFIG_PAX_PAGEEXEC
2735 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2736 +{
2737 + unsigned long i;
2738 +
2739 + printk(KERN_ERR "PAX: bytes at PC: ");
2740 + for (i = 0; i < 5; i++) {
2741 + unsigned int c;
2742 + if (get_user(c, (unsigned int *)pc+i))
2743 + printk(KERN_CONT "???????? ");
2744 + else
2745 + printk(KERN_CONT "%08x ", c);
2746 + }
2747 + printk("\n");
2748 +}
2749 +#endif
2750 +
2751 /*
2752 * This routine handles page faults. It determines the address,
2753 * and the problem, and then passes it off to one of the appropriate
2754 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
2755 index 302d779..7d35bf8 100644
2756 --- a/arch/mips/mm/mmap.c
2757 +++ b/arch/mips/mm/mmap.c
2758 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2759 do_color_align = 1;
2760
2761 /* requesting a specific address */
2762 +
2763 +#ifdef CONFIG_PAX_RANDMMAP
2764 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2765 +#endif
2766 +
2767 if (addr) {
2768 if (do_color_align)
2769 addr = COLOUR_ALIGN(addr, pgoff);
2770 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2771 addr = PAGE_ALIGN(addr);
2772
2773 vma = find_vma(mm, addr);
2774 - if (TASK_SIZE - len >= addr &&
2775 - (!vma || addr + len <= vma->vm_start))
2776 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
2777 return addr;
2778 }
2779
2780 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2781 /* At this point: (!vma || addr < vma->vm_end). */
2782 if (TASK_SIZE - len < addr)
2783 return -ENOMEM;
2784 - if (!vma || addr + len <= vma->vm_start)
2785 + if (check_heap_stack_gap(vmm, addr, len))
2786 return addr;
2787 addr = vma->vm_end;
2788 if (do_color_align)
2789 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2790 /* make sure it can fit in the remaining address space */
2791 if (likely(addr > len)) {
2792 vma = find_vma(mm, addr - len);
2793 - if (!vma || addr <= vma->vm_start) {
2794 + if (check_heap_stack_gap(vmm, addr - len, len))
2795 /* cache the address as a hint for next time */
2796 return mm->free_area_cache = addr - len;
2797 }
2798 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2799 * return with success:
2800 */
2801 vma = find_vma(mm, addr);
2802 - if (likely(!vma || addr + len <= vma->vm_start)) {
2803 + if (check_heap_stack_gap(vmm, addr, len)) {
2804 /* cache the address as a hint for next time */
2805 return mm->free_area_cache = addr;
2806 }
2807 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2808 mm->unmap_area = arch_unmap_area_topdown;
2809 }
2810 }
2811 -
2812 -static inline unsigned long brk_rnd(void)
2813 -{
2814 - unsigned long rnd = get_random_int();
2815 -
2816 - rnd = rnd << PAGE_SHIFT;
2817 - /* 8MB for 32bit, 256MB for 64bit */
2818 - if (TASK_IS_32BIT_ADDR)
2819 - rnd = rnd & 0x7ffffful;
2820 - else
2821 - rnd = rnd & 0xffffffful;
2822 -
2823 - return rnd;
2824 -}
2825 -
2826 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2827 -{
2828 - unsigned long base = mm->brk;
2829 - unsigned long ret;
2830 -
2831 - ret = PAGE_ALIGN(base + brk_rnd());
2832 -
2833 - if (ret < mm->brk)
2834 - return mm->brk;
2835 -
2836 - return ret;
2837 -}
2838 diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2839 index 967d144..db12197 100644
2840 --- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
2841 +++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2842 @@ -11,12 +11,14 @@
2843 #ifndef _ASM_PROC_CACHE_H
2844 #define _ASM_PROC_CACHE_H
2845
2846 +#include <linux/const.h>
2847 +
2848 /* L1 cache */
2849
2850 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
2851 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
2852 -#define L1_CACHE_BYTES 16 /* bytes per entry */
2853 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
2854 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
2855 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
2856
2857 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
2858 diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
2859 index bcb5df2..84fabd2 100644
2860 --- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
2861 +++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
2862 @@ -16,13 +16,15 @@
2863 #ifndef _ASM_PROC_CACHE_H
2864 #define _ASM_PROC_CACHE_H
2865
2866 +#include <linux/const.h>
2867 +
2868 /*
2869 * L1 cache
2870 */
2871 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
2872 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
2873 -#define L1_CACHE_BYTES 32 /* bytes per entry */
2874 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
2875 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
2876 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
2877
2878 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
2879 diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
2880 index 4ce7a01..449202a 100644
2881 --- a/arch/openrisc/include/asm/cache.h
2882 +++ b/arch/openrisc/include/asm/cache.h
2883 @@ -19,11 +19,13 @@
2884 #ifndef __ASM_OPENRISC_CACHE_H
2885 #define __ASM_OPENRISC_CACHE_H
2886
2887 +#include <linux/const.h>
2888 +
2889 /* FIXME: How can we replace these with values from the CPU...
2890 * they shouldn't be hard-coded!
2891 */
2892
2893 -#define L1_CACHE_BYTES 16
2894 #define L1_CACHE_SHIFT 4
2895 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2896
2897 #endif /* __ASM_OPENRISC_CACHE_H */
2898 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
2899 index 4054b31..a10c105 100644
2900 --- a/arch/parisc/include/asm/atomic.h
2901 +++ b/arch/parisc/include/asm/atomic.h
2902 @@ -335,6 +335,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2903
2904 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
2905
2906 +#define atomic64_read_unchecked(v) atomic64_read(v)
2907 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2908 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2909 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2910 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2911 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2912 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2913 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2914 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2915 +
2916 #endif /* !CONFIG_64BIT */
2917
2918
2919 diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
2920 index 47f11c7..3420df2 100644
2921 --- a/arch/parisc/include/asm/cache.h
2922 +++ b/arch/parisc/include/asm/cache.h
2923 @@ -5,6 +5,7 @@
2924 #ifndef __ARCH_PARISC_CACHE_H
2925 #define __ARCH_PARISC_CACHE_H
2926
2927 +#include <linux/const.h>
2928
2929 /*
2930 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
2931 @@ -15,13 +16,13 @@
2932 * just ruin performance.
2933 */
2934 #ifdef CONFIG_PA20
2935 -#define L1_CACHE_BYTES 64
2936 #define L1_CACHE_SHIFT 6
2937 #else
2938 -#define L1_CACHE_BYTES 32
2939 #define L1_CACHE_SHIFT 5
2940 #endif
2941
2942 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2943 +
2944 #ifndef __ASSEMBLY__
2945
2946 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2947 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
2948 index 19f6cb1..6c78cf2 100644
2949 --- a/arch/parisc/include/asm/elf.h
2950 +++ b/arch/parisc/include/asm/elf.h
2951 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
2952
2953 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
2954
2955 +#ifdef CONFIG_PAX_ASLR
2956 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
2957 +
2958 +#define PAX_DELTA_MMAP_LEN 16
2959 +#define PAX_DELTA_STACK_LEN 16
2960 +#endif
2961 +
2962 /* This yields a mask that user programs can use to figure out what
2963 instruction set this CPU supports. This could be done in user space,
2964 but it's not easy, and we've already done it here. */
2965 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
2966 index 22dadeb..f6c2be4 100644
2967 --- a/arch/parisc/include/asm/pgtable.h
2968 +++ b/arch/parisc/include/asm/pgtable.h
2969 @@ -210,6 +210,17 @@ struct vm_area_struct;
2970 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
2971 #define PAGE_COPY PAGE_EXECREAD
2972 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
2973 +
2974 +#ifdef CONFIG_PAX_PAGEEXEC
2975 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
2976 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2977 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2978 +#else
2979 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2980 +# define PAGE_COPY_NOEXEC PAGE_COPY
2981 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2982 +#endif
2983 +
2984 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
2985 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
2986 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
2987 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
2988 index 5e34ccf..672bc9c 100644
2989 --- a/arch/parisc/kernel/module.c
2990 +++ b/arch/parisc/kernel/module.c
2991 @@ -98,16 +98,38 @@
2992
2993 /* three functions to determine where in the module core
2994 * or init pieces the location is */
2995 +static inline int in_init_rx(struct module *me, void *loc)
2996 +{
2997 + return (loc >= me->module_init_rx &&
2998 + loc < (me->module_init_rx + me->init_size_rx));
2999 +}
3000 +
3001 +static inline int in_init_rw(struct module *me, void *loc)
3002 +{
3003 + return (loc >= me->module_init_rw &&
3004 + loc < (me->module_init_rw + me->init_size_rw));
3005 +}
3006 +
3007 static inline int in_init(struct module *me, void *loc)
3008 {
3009 - return (loc >= me->module_init &&
3010 - loc <= (me->module_init + me->init_size));
3011 + return in_init_rx(me, loc) || in_init_rw(me, loc);
3012 +}
3013 +
3014 +static inline int in_core_rx(struct module *me, void *loc)
3015 +{
3016 + return (loc >= me->module_core_rx &&
3017 + loc < (me->module_core_rx + me->core_size_rx));
3018 +}
3019 +
3020 +static inline int in_core_rw(struct module *me, void *loc)
3021 +{
3022 + return (loc >= me->module_core_rw &&
3023 + loc < (me->module_core_rw + me->core_size_rw));
3024 }
3025
3026 static inline int in_core(struct module *me, void *loc)
3027 {
3028 - return (loc >= me->module_core &&
3029 - loc <= (me->module_core + me->core_size));
3030 + return in_core_rx(me, loc) || in_core_rw(me, loc);
3031 }
3032
3033 static inline int in_local(struct module *me, void *loc)
3034 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3035 }
3036
3037 /* align things a bit */
3038 - me->core_size = ALIGN(me->core_size, 16);
3039 - me->arch.got_offset = me->core_size;
3040 - me->core_size += gots * sizeof(struct got_entry);
3041 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3042 + me->arch.got_offset = me->core_size_rw;
3043 + me->core_size_rw += gots * sizeof(struct got_entry);
3044
3045 - me->core_size = ALIGN(me->core_size, 16);
3046 - me->arch.fdesc_offset = me->core_size;
3047 - me->core_size += fdescs * sizeof(Elf_Fdesc);
3048 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3049 + me->arch.fdesc_offset = me->core_size_rw;
3050 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3051
3052 me->arch.got_max = gots;
3053 me->arch.fdesc_max = fdescs;
3054 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3055
3056 BUG_ON(value == 0);
3057
3058 - got = me->module_core + me->arch.got_offset;
3059 + got = me->module_core_rw + me->arch.got_offset;
3060 for (i = 0; got[i].addr; i++)
3061 if (got[i].addr == value)
3062 goto out;
3063 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3064 #ifdef CONFIG_64BIT
3065 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3066 {
3067 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3068 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3069
3070 if (!value) {
3071 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3072 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3073
3074 /* Create new one */
3075 fdesc->addr = value;
3076 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3077 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3078 return (Elf_Addr)fdesc;
3079 }
3080 #endif /* CONFIG_64BIT */
3081 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
3082
3083 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3084 end = table + sechdrs[me->arch.unwind_section].sh_size;
3085 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3086 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3087
3088 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3089 me->arch.unwind_section, table, end, gp);
3090 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3091 index c9b9322..02d8940 100644
3092 --- a/arch/parisc/kernel/sys_parisc.c
3093 +++ b/arch/parisc/kernel/sys_parisc.c
3094 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3095 /* At this point: (!vma || addr < vma->vm_end). */
3096 if (TASK_SIZE - len < addr)
3097 return -ENOMEM;
3098 - if (!vma || addr + len <= vma->vm_start)
3099 + if (check_heap_stack_gap(vma, addr, len))
3100 return addr;
3101 addr = vma->vm_end;
3102 }
3103 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3104 /* At this point: (!vma || addr < vma->vm_end). */
3105 if (TASK_SIZE - len < addr)
3106 return -ENOMEM;
3107 - if (!vma || addr + len <= vma->vm_start)
3108 + if (check_heap_stack_gap(vma, addr, len))
3109 return addr;
3110 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3111 if (addr < vma->vm_end) /* handle wraparound */
3112 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3113 if (flags & MAP_FIXED)
3114 return addr;
3115 if (!addr)
3116 - addr = TASK_UNMAPPED_BASE;
3117 + addr = current->mm->mmap_base;
3118
3119 if (filp) {
3120 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3121 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3122 index f19e660..414fe24 100644
3123 --- a/arch/parisc/kernel/traps.c
3124 +++ b/arch/parisc/kernel/traps.c
3125 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3126
3127 down_read(&current->mm->mmap_sem);
3128 vma = find_vma(current->mm,regs->iaoq[0]);
3129 - if (vma && (regs->iaoq[0] >= vma->vm_start)
3130 - && (vma->vm_flags & VM_EXEC)) {
3131 -
3132 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3133 fault_address = regs->iaoq[0];
3134 fault_space = regs->iasq[0];
3135
3136 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3137 index 18162ce..94de376 100644
3138 --- a/arch/parisc/mm/fault.c
3139 +++ b/arch/parisc/mm/fault.c
3140 @@ -15,6 +15,7 @@
3141 #include <linux/sched.h>
3142 #include <linux/interrupt.h>
3143 #include <linux/module.h>
3144 +#include <linux/unistd.h>
3145
3146 #include <asm/uaccess.h>
3147 #include <asm/traps.h>
3148 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3149 static unsigned long
3150 parisc_acctyp(unsigned long code, unsigned int inst)
3151 {
3152 - if (code == 6 || code == 16)
3153 + if (code == 6 || code == 7 || code == 16)
3154 return VM_EXEC;
3155
3156 switch (inst & 0xf0000000) {
3157 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3158 }
3159 #endif
3160
3161 +#ifdef CONFIG_PAX_PAGEEXEC
3162 +/*
3163 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3164 + *
3165 + * returns 1 when task should be killed
3166 + * 2 when rt_sigreturn trampoline was detected
3167 + * 3 when unpatched PLT trampoline was detected
3168 + */
3169 +static int pax_handle_fetch_fault(struct pt_regs *regs)
3170 +{
3171 +
3172 +#ifdef CONFIG_PAX_EMUPLT
3173 + int err;
3174 +
3175 + do { /* PaX: unpatched PLT emulation */
3176 + unsigned int bl, depwi;
3177 +
3178 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3179 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3180 +
3181 + if (err)
3182 + break;
3183 +
3184 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3185 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3186 +
3187 + err = get_user(ldw, (unsigned int *)addr);
3188 + err |= get_user(bv, (unsigned int *)(addr+4));
3189 + err |= get_user(ldw2, (unsigned int *)(addr+8));
3190 +
3191 + if (err)
3192 + break;
3193 +
3194 + if (ldw == 0x0E801096U &&
3195 + bv == 0xEAC0C000U &&
3196 + ldw2 == 0x0E881095U)
3197 + {
3198 + unsigned int resolver, map;
3199 +
3200 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3201 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3202 + if (err)
3203 + break;
3204 +
3205 + regs->gr[20] = instruction_pointer(regs)+8;
3206 + regs->gr[21] = map;
3207 + regs->gr[22] = resolver;
3208 + regs->iaoq[0] = resolver | 3UL;
3209 + regs->iaoq[1] = regs->iaoq[0] + 4;
3210 + return 3;
3211 + }
3212 + }
3213 + } while (0);
3214 +#endif
3215 +
3216 +#ifdef CONFIG_PAX_EMUTRAMP
3217 +
3218 +#ifndef CONFIG_PAX_EMUSIGRT
3219 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3220 + return 1;
3221 +#endif
3222 +
3223 + do { /* PaX: rt_sigreturn emulation */
3224 + unsigned int ldi1, ldi2, bel, nop;
3225 +
3226 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3227 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3228 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3229 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3230 +
3231 + if (err)
3232 + break;
3233 +
3234 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3235 + ldi2 == 0x3414015AU &&
3236 + bel == 0xE4008200U &&
3237 + nop == 0x08000240U)
3238 + {
3239 + regs->gr[25] = (ldi1 & 2) >> 1;
3240 + regs->gr[20] = __NR_rt_sigreturn;
3241 + regs->gr[31] = regs->iaoq[1] + 16;
3242 + regs->sr[0] = regs->iasq[1];
3243 + regs->iaoq[0] = 0x100UL;
3244 + regs->iaoq[1] = regs->iaoq[0] + 4;
3245 + regs->iasq[0] = regs->sr[2];
3246 + regs->iasq[1] = regs->sr[2];
3247 + return 2;
3248 + }
3249 + } while (0);
3250 +#endif
3251 +
3252 + return 1;
3253 +}
3254 +
3255 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3256 +{
3257 + unsigned long i;
3258 +
3259 + printk(KERN_ERR "PAX: bytes at PC: ");
3260 + for (i = 0; i < 5; i++) {
3261 + unsigned int c;
3262 + if (get_user(c, (unsigned int *)pc+i))
3263 + printk(KERN_CONT "???????? ");
3264 + else
3265 + printk(KERN_CONT "%08x ", c);
3266 + }
3267 + printk("\n");
3268 +}
3269 +#endif
3270 +
3271 int fixup_exception(struct pt_regs *regs)
3272 {
3273 const struct exception_table_entry *fix;
3274 @@ -192,8 +303,33 @@ good_area:
3275
3276 acc_type = parisc_acctyp(code,regs->iir);
3277
3278 - if ((vma->vm_flags & acc_type) != acc_type)
3279 + if ((vma->vm_flags & acc_type) != acc_type) {
3280 +
3281 +#ifdef CONFIG_PAX_PAGEEXEC
3282 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3283 + (address & ~3UL) == instruction_pointer(regs))
3284 + {
3285 + up_read(&mm->mmap_sem);
3286 + switch (pax_handle_fetch_fault(regs)) {
3287 +
3288 +#ifdef CONFIG_PAX_EMUPLT
3289 + case 3:
3290 + return;
3291 +#endif
3292 +
3293 +#ifdef CONFIG_PAX_EMUTRAMP
3294 + case 2:
3295 + return;
3296 +#endif
3297 +
3298 + }
3299 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3300 + do_group_exit(SIGKILL);
3301 + }
3302 +#endif
3303 +
3304 goto bad_area;
3305 + }
3306
3307 /*
3308 * If for any reason at all we couldn't handle the fault, make
3309 diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
3310 index 02e41b5..ec6e26c 100644
3311 --- a/arch/powerpc/include/asm/atomic.h
3312 +++ b/arch/powerpc/include/asm/atomic.h
3313 @@ -469,6 +469,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3314
3315 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3316
3317 +#define atomic64_read_unchecked(v) atomic64_read(v)
3318 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3319 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3320 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3321 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3322 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3323 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3324 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3325 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3326 +
3327 #endif /* __powerpc64__ */
3328
3329 #endif /* __KERNEL__ */
3330 diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3331 index 4b50941..5605819 100644
3332 --- a/arch/powerpc/include/asm/cache.h
3333 +++ b/arch/powerpc/include/asm/cache.h
3334 @@ -3,6 +3,7 @@
3335
3336 #ifdef __KERNEL__
3337
3338 +#include <linux/const.h>
3339
3340 /* bytes per L1 cache line */
3341 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3342 @@ -22,7 +23,7 @@
3343 #define L1_CACHE_SHIFT 7
3344 #endif
3345
3346 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3347 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3348
3349 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3350
3351 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3352 index 3bf9cca..e7457d0 100644
3353 --- a/arch/powerpc/include/asm/elf.h
3354 +++ b/arch/powerpc/include/asm/elf.h
3355 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3356 the loader. We need to make sure that it is out of the way of the program
3357 that it will "exec", and that there is sufficient room for the brk. */
3358
3359 -extern unsigned long randomize_et_dyn(unsigned long base);
3360 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3361 +#define ELF_ET_DYN_BASE (0x20000000)
3362 +
3363 +#ifdef CONFIG_PAX_ASLR
3364 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3365 +
3366 +#ifdef __powerpc64__
3367 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
3368 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
3369 +#else
3370 +#define PAX_DELTA_MMAP_LEN 15
3371 +#define PAX_DELTA_STACK_LEN 15
3372 +#endif
3373 +#endif
3374
3375 /*
3376 * Our registers are always unsigned longs, whether we're a 32 bit
3377 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3378 (0x7ff >> (PAGE_SHIFT - 12)) : \
3379 (0x3ffff >> (PAGE_SHIFT - 12)))
3380
3381 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3382 -#define arch_randomize_brk arch_randomize_brk
3383 -
3384 #endif /* __KERNEL__ */
3385
3386 /*
3387 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3388 index bca8fdc..61e9580 100644
3389 --- a/arch/powerpc/include/asm/kmap_types.h
3390 +++ b/arch/powerpc/include/asm/kmap_types.h
3391 @@ -27,6 +27,7 @@ enum km_type {
3392 KM_PPC_SYNC_PAGE,
3393 KM_PPC_SYNC_ICACHE,
3394 KM_KDB,
3395 + KM_CLEARPAGE,
3396 KM_TYPE_NR
3397 };
3398
3399 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
3400 index d4a7f64..451de1c 100644
3401 --- a/arch/powerpc/include/asm/mman.h
3402 +++ b/arch/powerpc/include/asm/mman.h
3403 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
3404 }
3405 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
3406
3407 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
3408 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
3409 {
3410 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
3411 }
3412 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3413 index dd9c4fd..a2ced87 100644
3414 --- a/arch/powerpc/include/asm/page.h
3415 +++ b/arch/powerpc/include/asm/page.h
3416 @@ -141,8 +141,9 @@ extern phys_addr_t kernstart_addr;
3417 * and needs to be executable. This means the whole heap ends
3418 * up being executable.
3419 */
3420 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3421 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3422 +#define VM_DATA_DEFAULT_FLAGS32 \
3423 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3424 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3425
3426 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3427 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3428 @@ -170,6 +171,9 @@ extern phys_addr_t kernstart_addr;
3429 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3430 #endif
3431
3432 +#define ktla_ktva(addr) (addr)
3433 +#define ktva_ktla(addr) (addr)
3434 +
3435 /*
3436 * Use the top bit of the higher-level page table entries to indicate whether
3437 * the entries we point to contain hugepages. This works because we know that
3438 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3439 index fb40ede..d3ce956 100644
3440 --- a/arch/powerpc/include/asm/page_64.h
3441 +++ b/arch/powerpc/include/asm/page_64.h
3442 @@ -144,15 +144,18 @@ do { \
3443 * stack by default, so in the absence of a PT_GNU_STACK program header
3444 * we turn execute permission off.
3445 */
3446 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3447 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3448 +#define VM_STACK_DEFAULT_FLAGS32 \
3449 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3450 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3451
3452 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3453 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3454
3455 +#ifndef CONFIG_PAX_PAGEEXEC
3456 #define VM_STACK_DEFAULT_FLAGS \
3457 (is_32bit_task() ? \
3458 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3459 +#endif
3460
3461 #include <asm-generic/getorder.h>
3462
3463 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3464 index 88b0bd9..e32bc67 100644
3465 --- a/arch/powerpc/include/asm/pgtable.h
3466 +++ b/arch/powerpc/include/asm/pgtable.h
3467 @@ -2,6 +2,7 @@
3468 #define _ASM_POWERPC_PGTABLE_H
3469 #ifdef __KERNEL__
3470
3471 +#include <linux/const.h>
3472 #ifndef __ASSEMBLY__
3473 #include <asm/processor.h> /* For TASK_SIZE */
3474 #include <asm/mmu.h>
3475 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3476 index 4aad413..85d86bf 100644
3477 --- a/arch/powerpc/include/asm/pte-hash32.h
3478 +++ b/arch/powerpc/include/asm/pte-hash32.h
3479 @@ -21,6 +21,7 @@
3480 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3481 #define _PAGE_USER 0x004 /* usermode access allowed */
3482 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3483 +#define _PAGE_EXEC _PAGE_GUARDED
3484 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3485 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3486 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3487 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3488 index 559da19..7e5835c 100644
3489 --- a/arch/powerpc/include/asm/reg.h
3490 +++ b/arch/powerpc/include/asm/reg.h
3491 @@ -212,6 +212,7 @@
3492 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3493 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3494 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3495 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3496 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3497 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3498 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3499 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
3500 index e30a13d..2b7d994 100644
3501 --- a/arch/powerpc/include/asm/system.h
3502 +++ b/arch/powerpc/include/asm/system.h
3503 @@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
3504 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
3505 #endif
3506
3507 -extern unsigned long arch_align_stack(unsigned long sp);
3508 +#define arch_align_stack(x) ((x) & ~0xfUL)
3509
3510 /* Used in very early kernel initialization. */
3511 extern unsigned long reloc_offset(void);
3512 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3513 index bd0fb84..a42a14b 100644
3514 --- a/arch/powerpc/include/asm/uaccess.h
3515 +++ b/arch/powerpc/include/asm/uaccess.h
3516 @@ -13,6 +13,8 @@
3517 #define VERIFY_READ 0
3518 #define VERIFY_WRITE 1
3519
3520 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3521 +
3522 /*
3523 * The fs value determines whether argument validity checking should be
3524 * performed or not. If get_fs() == USER_DS, checking is performed, with
3525 @@ -327,52 +329,6 @@ do { \
3526 extern unsigned long __copy_tofrom_user(void __user *to,
3527 const void __user *from, unsigned long size);
3528
3529 -#ifndef __powerpc64__
3530 -
3531 -static inline unsigned long copy_from_user(void *to,
3532 - const void __user *from, unsigned long n)
3533 -{
3534 - unsigned long over;
3535 -
3536 - if (access_ok(VERIFY_READ, from, n))
3537 - return __copy_tofrom_user((__force void __user *)to, from, n);
3538 - if ((unsigned long)from < TASK_SIZE) {
3539 - over = (unsigned long)from + n - TASK_SIZE;
3540 - return __copy_tofrom_user((__force void __user *)to, from,
3541 - n - over) + over;
3542 - }
3543 - return n;
3544 -}
3545 -
3546 -static inline unsigned long copy_to_user(void __user *to,
3547 - const void *from, unsigned long n)
3548 -{
3549 - unsigned long over;
3550 -
3551 - if (access_ok(VERIFY_WRITE, to, n))
3552 - return __copy_tofrom_user(to, (__force void __user *)from, n);
3553 - if ((unsigned long)to < TASK_SIZE) {
3554 - over = (unsigned long)to + n - TASK_SIZE;
3555 - return __copy_tofrom_user(to, (__force void __user *)from,
3556 - n - over) + over;
3557 - }
3558 - return n;
3559 -}
3560 -
3561 -#else /* __powerpc64__ */
3562 -
3563 -#define __copy_in_user(to, from, size) \
3564 - __copy_tofrom_user((to), (from), (size))
3565 -
3566 -extern unsigned long copy_from_user(void *to, const void __user *from,
3567 - unsigned long n);
3568 -extern unsigned long copy_to_user(void __user *to, const void *from,
3569 - unsigned long n);
3570 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
3571 - unsigned long n);
3572 -
3573 -#endif /* __powerpc64__ */
3574 -
3575 static inline unsigned long __copy_from_user_inatomic(void *to,
3576 const void __user *from, unsigned long n)
3577 {
3578 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3579 if (ret == 0)
3580 return 0;
3581 }
3582 +
3583 + if (!__builtin_constant_p(n))
3584 + check_object_size(to, n, false);
3585 +
3586 return __copy_tofrom_user((__force void __user *)to, from, n);
3587 }
3588
3589 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
3590 if (ret == 0)
3591 return 0;
3592 }
3593 +
3594 + if (!__builtin_constant_p(n))
3595 + check_object_size(from, n, true);
3596 +
3597 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3598 }
3599
3600 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
3601 return __copy_to_user_inatomic(to, from, size);
3602 }
3603
3604 +#ifndef __powerpc64__
3605 +
3606 +static inline unsigned long __must_check copy_from_user(void *to,
3607 + const void __user *from, unsigned long n)
3608 +{
3609 + unsigned long over;
3610 +
3611 + if ((long)n < 0)
3612 + return n;
3613 +
3614 + if (access_ok(VERIFY_READ, from, n)) {
3615 + if (!__builtin_constant_p(n))
3616 + check_object_size(to, n, false);
3617 + return __copy_tofrom_user((__force void __user *)to, from, n);
3618 + }
3619 + if ((unsigned long)from < TASK_SIZE) {
3620 + over = (unsigned long)from + n - TASK_SIZE;
3621 + if (!__builtin_constant_p(n - over))
3622 + check_object_size(to, n - over, false);
3623 + return __copy_tofrom_user((__force void __user *)to, from,
3624 + n - over) + over;
3625 + }
3626 + return n;
3627 +}
3628 +
3629 +static inline unsigned long __must_check copy_to_user(void __user *to,
3630 + const void *from, unsigned long n)
3631 +{
3632 + unsigned long over;
3633 +
3634 + if ((long)n < 0)
3635 + return n;
3636 +
3637 + if (access_ok(VERIFY_WRITE, to, n)) {
3638 + if (!__builtin_constant_p(n))
3639 + check_object_size(from, n, true);
3640 + return __copy_tofrom_user(to, (__force void __user *)from, n);
3641 + }
3642 + if ((unsigned long)to < TASK_SIZE) {
3643 + over = (unsigned long)to + n - TASK_SIZE;
3644 + if (!__builtin_constant_p(n))
3645 + check_object_size(from, n - over, true);
3646 + return __copy_tofrom_user(to, (__force void __user *)from,
3647 + n - over) + over;
3648 + }
3649 + return n;
3650 +}
3651 +
3652 +#else /* __powerpc64__ */
3653 +
3654 +#define __copy_in_user(to, from, size) \
3655 + __copy_tofrom_user((to), (from), (size))
3656 +
3657 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
3658 +{
3659 + if ((long)n < 0 || n > INT_MAX)
3660 + return n;
3661 +
3662 + if (!__builtin_constant_p(n))
3663 + check_object_size(to, n, false);
3664 +
3665 + if (likely(access_ok(VERIFY_READ, from, n)))
3666 + n = __copy_from_user(to, from, n);
3667 + else
3668 + memset(to, 0, n);
3669 + return n;
3670 +}
3671 +
3672 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
3673 +{
3674 + if ((long)n < 0 || n > INT_MAX)
3675 + return n;
3676 +
3677 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
3678 + if (!__builtin_constant_p(n))
3679 + check_object_size(from, n, true);
3680 + n = __copy_to_user(to, from, n);
3681 + }
3682 + return n;
3683 +}
3684 +
3685 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
3686 + unsigned long n);
3687 +
3688 +#endif /* __powerpc64__ */
3689 +
3690 extern unsigned long __clear_user(void __user *addr, unsigned long size);
3691
3692 static inline unsigned long clear_user(void __user *addr, unsigned long size)
3693 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3694 index 429983c..7af363b 100644
3695 --- a/arch/powerpc/kernel/exceptions-64e.S
3696 +++ b/arch/powerpc/kernel/exceptions-64e.S
3697 @@ -587,6 +587,7 @@ storage_fault_common:
3698 std r14,_DAR(r1)
3699 std r15,_DSISR(r1)
3700 addi r3,r1,STACK_FRAME_OVERHEAD
3701 + bl .save_nvgprs
3702 mr r4,r14
3703 mr r5,r15
3704 ld r14,PACA_EXGEN+EX_R14(r13)
3705 @@ -596,8 +597,7 @@ storage_fault_common:
3706 cmpdi r3,0
3707 bne- 1f
3708 b .ret_from_except_lite
3709 -1: bl .save_nvgprs
3710 - mr r5,r3
3711 +1: mr r5,r3
3712 addi r3,r1,STACK_FRAME_OVERHEAD
3713 ld r4,_DAR(r1)
3714 bl .bad_page_fault
3715 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3716 index cf9c69b..ebc9640 100644
3717 --- a/arch/powerpc/kernel/exceptions-64s.S
3718 +++ b/arch/powerpc/kernel/exceptions-64s.S
3719 @@ -1004,10 +1004,10 @@ handle_page_fault:
3720 11: ld r4,_DAR(r1)
3721 ld r5,_DSISR(r1)
3722 addi r3,r1,STACK_FRAME_OVERHEAD
3723 + bl .save_nvgprs
3724 bl .do_page_fault
3725 cmpdi r3,0
3726 beq+ 13f
3727 - bl .save_nvgprs
3728 mr r5,r3
3729 addi r3,r1,STACK_FRAME_OVERHEAD
3730 lwz r4,_DAR(r1)
3731 diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
3732 index 745c1e7..59d97a6 100644
3733 --- a/arch/powerpc/kernel/irq.c
3734 +++ b/arch/powerpc/kernel/irq.c
3735 @@ -547,9 +547,6 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
3736 host->ops = ops;
3737 host->of_node = of_node_get(of_node);
3738
3739 - if (host->ops->match == NULL)
3740 - host->ops->match = default_irq_host_match;
3741 -
3742 raw_spin_lock_irqsave(&irq_big_lock, flags);
3743
3744 /* If it's a legacy controller, check for duplicates and
3745 @@ -622,7 +619,12 @@ struct irq_host *irq_find_host(struct device_node *node)
3746 */
3747 raw_spin_lock_irqsave(&irq_big_lock, flags);
3748 list_for_each_entry(h, &irq_hosts, link)
3749 - if (h->ops->match(h, node)) {
3750 + if (h->ops->match) {
3751 + if (h->ops->match(h, node)) {
3752 + found = h;
3753 + break;
3754 + }
3755 + } else if (default_irq_host_match(h, node)) {
3756 found = h;
3757 break;
3758 }
3759 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
3760 index 0b6d796..d760ddb 100644
3761 --- a/arch/powerpc/kernel/module_32.c
3762 +++ b/arch/powerpc/kernel/module_32.c
3763 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
3764 me->arch.core_plt_section = i;
3765 }
3766 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
3767 - printk("Module doesn't contain .plt or .init.plt sections.\n");
3768 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
3769 return -ENOEXEC;
3770 }
3771
3772 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
3773
3774 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
3775 /* Init, or core PLT? */
3776 - if (location >= mod->module_core
3777 - && location < mod->module_core + mod->core_size)
3778 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
3779 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
3780 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
3781 - else
3782 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
3783 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
3784 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
3785 + else {
3786 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
3787 + return ~0UL;
3788 + }
3789
3790 /* Find this entry, or if that fails, the next avail. entry */
3791 while (entry->jump[0]) {
3792 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
3793 index 6457574..08b28d3 100644
3794 --- a/arch/powerpc/kernel/process.c
3795 +++ b/arch/powerpc/kernel/process.c
3796 @@ -660,8 +660,8 @@ void show_regs(struct pt_regs * regs)
3797 * Lookup NIP late so we have the best change of getting the
3798 * above info out without failing
3799 */
3800 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
3801 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
3802 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
3803 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
3804 #endif
3805 show_stack(current, (unsigned long *) regs->gpr[1]);
3806 if (!user_mode(regs))
3807 @@ -1165,10 +1165,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3808 newsp = stack[0];
3809 ip = stack[STACK_FRAME_LR_SAVE];
3810 if (!firstframe || ip != lr) {
3811 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
3812 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
3813 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3814 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
3815 - printk(" (%pS)",
3816 + printk(" (%pA)",
3817 (void *)current->ret_stack[curr_frame].ret);
3818 curr_frame--;
3819 }
3820 @@ -1188,7 +1188,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3821 struct pt_regs *regs = (struct pt_regs *)
3822 (sp + STACK_FRAME_OVERHEAD);
3823 lr = regs->link;
3824 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
3825 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
3826 regs->trap, (void *)regs->nip, (void *)lr);
3827 firstframe = 1;
3828 }
3829 @@ -1263,58 +1263,3 @@ void thread_info_cache_init(void)
3830 }
3831
3832 #endif /* THREAD_SHIFT < PAGE_SHIFT */
3833 -
3834 -unsigned long arch_align_stack(unsigned long sp)
3835 -{
3836 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3837 - sp -= get_random_int() & ~PAGE_MASK;
3838 - return sp & ~0xf;
3839 -}
3840 -
3841 -static inline unsigned long brk_rnd(void)
3842 -{
3843 - unsigned long rnd = 0;
3844 -
3845 - /* 8MB for 32bit, 1GB for 64bit */
3846 - if (is_32bit_task())
3847 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
3848 - else
3849 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
3850 -
3851 - return rnd << PAGE_SHIFT;
3852 -}
3853 -
3854 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3855 -{
3856 - unsigned long base = mm->brk;
3857 - unsigned long ret;
3858 -
3859 -#ifdef CONFIG_PPC_STD_MMU_64
3860 - /*
3861 - * If we are using 1TB segments and we are allowed to randomise
3862 - * the heap, we can put it above 1TB so it is backed by a 1TB
3863 - * segment. Otherwise the heap will be in the bottom 1TB
3864 - * which always uses 256MB segments and this may result in a
3865 - * performance penalty.
3866 - */
3867 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
3868 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
3869 -#endif
3870 -
3871 - ret = PAGE_ALIGN(base + brk_rnd());
3872 -
3873 - if (ret < mm->brk)
3874 - return mm->brk;
3875 -
3876 - return ret;
3877 -}
3878 -
3879 -unsigned long randomize_et_dyn(unsigned long base)
3880 -{
3881 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3882 -
3883 - if (ret < base)
3884 - return base;
3885 -
3886 - return ret;
3887 -}
3888 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
3889 index 836a5a1..27289a3 100644
3890 --- a/arch/powerpc/kernel/signal_32.c
3891 +++ b/arch/powerpc/kernel/signal_32.c
3892 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
3893 /* Save user registers on the stack */
3894 frame = &rt_sf->uc.uc_mcontext;
3895 addr = frame;
3896 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
3897 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3898 if (save_user_regs(regs, frame, 0, 1))
3899 goto badframe;
3900 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
3901 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
3902 index a50b5ec..547078a 100644
3903 --- a/arch/powerpc/kernel/signal_64.c
3904 +++ b/arch/powerpc/kernel/signal_64.c
3905 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
3906 current->thread.fpscr.val = 0;
3907
3908 /* Set up to return from userspace. */
3909 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
3910 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3911 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
3912 } else {
3913 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
3914 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
3915 index 5459d14..10f8070 100644
3916 --- a/arch/powerpc/kernel/traps.c
3917 +++ b/arch/powerpc/kernel/traps.c
3918 @@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
3919 static inline void pmac_backlight_unblank(void) { }
3920 #endif
3921
3922 +extern void gr_handle_kernel_exploit(void);
3923 +
3924 int die(const char *str, struct pt_regs *regs, long err)
3925 {
3926 static struct {
3927 @@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err)
3928 if (panic_on_oops)
3929 panic("Fatal exception");
3930
3931 + gr_handle_kernel_exploit();
3932 +
3933 oops_exit();
3934 do_exit(err);
3935
3936 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
3937 index 7d14bb6..1305601 100644
3938 --- a/arch/powerpc/kernel/vdso.c
3939 +++ b/arch/powerpc/kernel/vdso.c
3940 @@ -35,6 +35,7 @@
3941 #include <asm/firmware.h>
3942 #include <asm/vdso.h>
3943 #include <asm/vdso_datapage.h>
3944 +#include <asm/mman.h>
3945
3946 #include "setup.h"
3947
3948 @@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3949 vdso_base = VDSO32_MBASE;
3950 #endif
3951
3952 - current->mm->context.vdso_base = 0;
3953 + current->mm->context.vdso_base = ~0UL;
3954
3955 /* vDSO has a problem and was disabled, just don't "enable" it for the
3956 * process
3957 @@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3958 vdso_base = get_unmapped_area(NULL, vdso_base,
3959 (vdso_pages << PAGE_SHIFT) +
3960 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
3961 - 0, 0);
3962 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
3963 if (IS_ERR_VALUE(vdso_base)) {
3964 rc = vdso_base;
3965 goto fail_mmapsem;
3966 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
3967 index 5eea6f3..5d10396 100644
3968 --- a/arch/powerpc/lib/usercopy_64.c
3969 +++ b/arch/powerpc/lib/usercopy_64.c
3970 @@ -9,22 +9,6 @@
3971 #include <linux/module.h>
3972 #include <asm/uaccess.h>
3973
3974 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3975 -{
3976 - if (likely(access_ok(VERIFY_READ, from, n)))
3977 - n = __copy_from_user(to, from, n);
3978 - else
3979 - memset(to, 0, n);
3980 - return n;
3981 -}
3982 -
3983 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3984 -{
3985 - if (likely(access_ok(VERIFY_WRITE, to, n)))
3986 - n = __copy_to_user(to, from, n);
3987 - return n;
3988 -}
3989 -
3990 unsigned long copy_in_user(void __user *to, const void __user *from,
3991 unsigned long n)
3992 {
3993 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
3994 return n;
3995 }
3996
3997 -EXPORT_SYMBOL(copy_from_user);
3998 -EXPORT_SYMBOL(copy_to_user);
3999 EXPORT_SYMBOL(copy_in_user);
4000
4001 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4002 index 5efe8c9..db9ceef 100644
4003 --- a/arch/powerpc/mm/fault.c
4004 +++ b/arch/powerpc/mm/fault.c
4005 @@ -32,6 +32,10 @@
4006 #include <linux/perf_event.h>
4007 #include <linux/magic.h>
4008 #include <linux/ratelimit.h>
4009 +#include <linux/slab.h>
4010 +#include <linux/pagemap.h>
4011 +#include <linux/compiler.h>
4012 +#include <linux/unistd.h>
4013
4014 #include <asm/firmware.h>
4015 #include <asm/page.h>
4016 @@ -43,6 +47,7 @@
4017 #include <asm/tlbflush.h>
4018 #include <asm/siginfo.h>
4019 #include <mm/mmu_decl.h>
4020 +#include <asm/ptrace.h>
4021
4022 #ifdef CONFIG_KPROBES
4023 static inline int notify_page_fault(struct pt_regs *regs)
4024 @@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4025 }
4026 #endif
4027
4028 +#ifdef CONFIG_PAX_PAGEEXEC
4029 +/*
4030 + * PaX: decide what to do with offenders (regs->nip = fault address)
4031 + *
4032 + * returns 1 when task should be killed
4033 + */
4034 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4035 +{
4036 + return 1;
4037 +}
4038 +
4039 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4040 +{
4041 + unsigned long i;
4042 +
4043 + printk(KERN_ERR "PAX: bytes at PC: ");
4044 + for (i = 0; i < 5; i++) {
4045 + unsigned int c;
4046 + if (get_user(c, (unsigned int __user *)pc+i))
4047 + printk(KERN_CONT "???????? ");
4048 + else
4049 + printk(KERN_CONT "%08x ", c);
4050 + }
4051 + printk("\n");
4052 +}
4053 +#endif
4054 +
4055 /*
4056 * Check whether the instruction at regs->nip is a store using
4057 * an update addressing form which will update r1.
4058 @@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4059 * indicate errors in DSISR but can validly be set in SRR1.
4060 */
4061 if (trap == 0x400)
4062 - error_code &= 0x48200000;
4063 + error_code &= 0x58200000;
4064 else
4065 is_write = error_code & DSISR_ISSTORE;
4066 #else
4067 @@ -259,7 +291,7 @@ good_area:
4068 * "undefined". Of those that can be set, this is the only
4069 * one which seems bad.
4070 */
4071 - if (error_code & 0x10000000)
4072 + if (error_code & DSISR_GUARDED)
4073 /* Guarded storage error. */
4074 goto bad_area;
4075 #endif /* CONFIG_8xx */
4076 @@ -274,7 +306,7 @@ good_area:
4077 * processors use the same I/D cache coherency mechanism
4078 * as embedded.
4079 */
4080 - if (error_code & DSISR_PROTFAULT)
4081 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4082 goto bad_area;
4083 #endif /* CONFIG_PPC_STD_MMU */
4084
4085 @@ -343,6 +375,23 @@ bad_area:
4086 bad_area_nosemaphore:
4087 /* User mode accesses cause a SIGSEGV */
4088 if (user_mode(regs)) {
4089 +
4090 +#ifdef CONFIG_PAX_PAGEEXEC
4091 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4092 +#ifdef CONFIG_PPC_STD_MMU
4093 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4094 +#else
4095 + if (is_exec && regs->nip == address) {
4096 +#endif
4097 + switch (pax_handle_fetch_fault(regs)) {
4098 + }
4099 +
4100 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4101 + do_group_exit(SIGKILL);
4102 + }
4103 + }
4104 +#endif
4105 +
4106 _exception(SIGSEGV, regs, code, address);
4107 return 0;
4108 }
4109 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4110 index 5a783d8..c23e14b 100644
4111 --- a/arch/powerpc/mm/mmap_64.c
4112 +++ b/arch/powerpc/mm/mmap_64.c
4113 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4114 */
4115 if (mmap_is_legacy()) {
4116 mm->mmap_base = TASK_UNMAPPED_BASE;
4117 +
4118 +#ifdef CONFIG_PAX_RANDMMAP
4119 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4120 + mm->mmap_base += mm->delta_mmap;
4121 +#endif
4122 +
4123 mm->get_unmapped_area = arch_get_unmapped_area;
4124 mm->unmap_area = arch_unmap_area;
4125 } else {
4126 mm->mmap_base = mmap_base();
4127 +
4128 +#ifdef CONFIG_PAX_RANDMMAP
4129 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4130 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4131 +#endif
4132 +
4133 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4134 mm->unmap_area = arch_unmap_area_topdown;
4135 }
4136 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4137 index 73709f7..6b90313 100644
4138 --- a/arch/powerpc/mm/slice.c
4139 +++ b/arch/powerpc/mm/slice.c
4140 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4141 if ((mm->task_size - len) < addr)
4142 return 0;
4143 vma = find_vma(mm, addr);
4144 - return (!vma || (addr + len) <= vma->vm_start);
4145 + return check_heap_stack_gap(vma, addr, len);
4146 }
4147
4148 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4149 @@ -256,7 +256,7 @@ full_search:
4150 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4151 continue;
4152 }
4153 - if (!vma || addr + len <= vma->vm_start) {
4154 + if (check_heap_stack_gap(vma, addr, len)) {
4155 /*
4156 * Remember the place where we stopped the search:
4157 */
4158 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4159 }
4160 }
4161
4162 - addr = mm->mmap_base;
4163 - while (addr > len) {
4164 + if (mm->mmap_base < len)
4165 + addr = -ENOMEM;
4166 + else
4167 + addr = mm->mmap_base - len;
4168 +
4169 + while (!IS_ERR_VALUE(addr)) {
4170 /* Go down by chunk size */
4171 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4172 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
4173
4174 /* Check for hit with different page size */
4175 mask = slice_range_to_mask(addr, len);
4176 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4177 * return with success:
4178 */
4179 vma = find_vma(mm, addr);
4180 - if (!vma || (addr + len) <= vma->vm_start) {
4181 + if (check_heap_stack_gap(vma, addr, len)) {
4182 /* remember the address as a hint for next time */
4183 if (use_cache)
4184 mm->free_area_cache = addr;
4185 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4186 mm->cached_hole_size = vma->vm_start - addr;
4187
4188 /* try just below the current vma->vm_start */
4189 - addr = vma->vm_start;
4190 + addr = skip_heap_stack_gap(vma, len);
4191 }
4192
4193 /*
4194 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4195 if (fixed && addr > (mm->task_size - len))
4196 return -EINVAL;
4197
4198 +#ifdef CONFIG_PAX_RANDMMAP
4199 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4200 + addr = 0;
4201 +#endif
4202 +
4203 /* If hint, make sure it matches our alignment restrictions */
4204 if (!fixed && addr) {
4205 addr = _ALIGN_UP(addr, 1ul << pshift);
4206 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4207 index 8517d2a..d2738d4 100644
4208 --- a/arch/s390/include/asm/atomic.h
4209 +++ b/arch/s390/include/asm/atomic.h
4210 @@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
4211 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4212 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4213
4214 +#define atomic64_read_unchecked(v) atomic64_read(v)
4215 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4216 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4217 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4218 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4219 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4220 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4221 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4222 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4223 +
4224 #define smp_mb__before_atomic_dec() smp_mb()
4225 #define smp_mb__after_atomic_dec() smp_mb()
4226 #define smp_mb__before_atomic_inc() smp_mb()
4227 diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4228 index 2a30d5a..5e5586f 100644
4229 --- a/arch/s390/include/asm/cache.h
4230 +++ b/arch/s390/include/asm/cache.h
4231 @@ -11,8 +11,10 @@
4232 #ifndef __ARCH_S390_CACHE_H
4233 #define __ARCH_S390_CACHE_H
4234
4235 -#define L1_CACHE_BYTES 256
4236 +#include <linux/const.h>
4237 +
4238 #define L1_CACHE_SHIFT 8
4239 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4240 #define NET_SKB_PAD 32
4241
4242 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4243 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4244 index 547f1a6..0b22b53 100644
4245 --- a/arch/s390/include/asm/elf.h
4246 +++ b/arch/s390/include/asm/elf.h
4247 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
4248 the loader. We need to make sure that it is out of the way of the program
4249 that it will "exec", and that there is sufficient room for the brk. */
4250
4251 -extern unsigned long randomize_et_dyn(unsigned long base);
4252 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
4253 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4254 +
4255 +#ifdef CONFIG_PAX_ASLR
4256 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4257 +
4258 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4259 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4260 +#endif
4261
4262 /* This yields a mask that user programs can use to figure out what
4263 instruction set this CPU supports. */
4264 @@ -211,7 +217,4 @@ struct linux_binprm;
4265 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
4266 int arch_setup_additional_pages(struct linux_binprm *, int);
4267
4268 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4269 -#define arch_randomize_brk arch_randomize_brk
4270 -
4271 #endif
4272 diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
4273 index ef573c1..75a1ce6 100644
4274 --- a/arch/s390/include/asm/system.h
4275 +++ b/arch/s390/include/asm/system.h
4276 @@ -262,7 +262,7 @@ extern void (*_machine_restart)(char *command);
4277 extern void (*_machine_halt)(void);
4278 extern void (*_machine_power_off)(void);
4279
4280 -extern unsigned long arch_align_stack(unsigned long sp);
4281 +#define arch_align_stack(x) ((x) & ~0xfUL)
4282
4283 static inline int tprot(unsigned long addr)
4284 {
4285 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4286 index 2b23885..e136e31 100644
4287 --- a/arch/s390/include/asm/uaccess.h
4288 +++ b/arch/s390/include/asm/uaccess.h
4289 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
4290 copy_to_user(void __user *to, const void *from, unsigned long n)
4291 {
4292 might_fault();
4293 +
4294 + if ((long)n < 0)
4295 + return n;
4296 +
4297 if (access_ok(VERIFY_WRITE, to, n))
4298 n = __copy_to_user(to, from, n);
4299 return n;
4300 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4301 static inline unsigned long __must_check
4302 __copy_from_user(void *to, const void __user *from, unsigned long n)
4303 {
4304 + if ((long)n < 0)
4305 + return n;
4306 +
4307 if (__builtin_constant_p(n) && (n <= 256))
4308 return uaccess.copy_from_user_small(n, from, to);
4309 else
4310 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
4311 unsigned int sz = __compiletime_object_size(to);
4312
4313 might_fault();
4314 +
4315 + if ((long)n < 0)
4316 + return n;
4317 +
4318 if (unlikely(sz != -1 && sz < n)) {
4319 copy_from_user_overflow();
4320 return n;
4321 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4322 index dfcb343..eda788a 100644
4323 --- a/arch/s390/kernel/module.c
4324 +++ b/arch/s390/kernel/module.c
4325 @@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4326
4327 /* Increase core size by size of got & plt and set start
4328 offsets for got and plt. */
4329 - me->core_size = ALIGN(me->core_size, 4);
4330 - me->arch.got_offset = me->core_size;
4331 - me->core_size += me->arch.got_size;
4332 - me->arch.plt_offset = me->core_size;
4333 - me->core_size += me->arch.plt_size;
4334 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
4335 + me->arch.got_offset = me->core_size_rw;
4336 + me->core_size_rw += me->arch.got_size;
4337 + me->arch.plt_offset = me->core_size_rx;
4338 + me->core_size_rx += me->arch.plt_size;
4339 return 0;
4340 }
4341
4342 @@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4343 if (info->got_initialized == 0) {
4344 Elf_Addr *gotent;
4345
4346 - gotent = me->module_core + me->arch.got_offset +
4347 + gotent = me->module_core_rw + me->arch.got_offset +
4348 info->got_offset;
4349 *gotent = val;
4350 info->got_initialized = 1;
4351 @@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4352 else if (r_type == R_390_GOTENT ||
4353 r_type == R_390_GOTPLTENT)
4354 *(unsigned int *) loc =
4355 - (val + (Elf_Addr) me->module_core - loc) >> 1;
4356 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4357 else if (r_type == R_390_GOT64 ||
4358 r_type == R_390_GOTPLT64)
4359 *(unsigned long *) loc = val;
4360 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4361 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4362 if (info->plt_initialized == 0) {
4363 unsigned int *ip;
4364 - ip = me->module_core + me->arch.plt_offset +
4365 + ip = me->module_core_rx + me->arch.plt_offset +
4366 info->plt_offset;
4367 #ifndef CONFIG_64BIT
4368 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4369 @@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4370 val - loc + 0xffffUL < 0x1ffffeUL) ||
4371 (r_type == R_390_PLT32DBL &&
4372 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4373 - val = (Elf_Addr) me->module_core +
4374 + val = (Elf_Addr) me->module_core_rx +
4375 me->arch.plt_offset +
4376 info->plt_offset;
4377 val += rela->r_addend - loc;
4378 @@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4379 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4380 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4381 val = val + rela->r_addend -
4382 - ((Elf_Addr) me->module_core + me->arch.got_offset);
4383 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4384 if (r_type == R_390_GOTOFF16)
4385 *(unsigned short *) loc = val;
4386 else if (r_type == R_390_GOTOFF32)
4387 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4388 break;
4389 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4390 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4391 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
4392 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4393 rela->r_addend - loc;
4394 if (r_type == R_390_GOTPC)
4395 *(unsigned int *) loc = val;
4396 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
4397 index 53088e2..9f44a36 100644
4398 --- a/arch/s390/kernel/process.c
4399 +++ b/arch/s390/kernel/process.c
4400 @@ -320,39 +320,3 @@ unsigned long get_wchan(struct task_struct *p)
4401 }
4402 return 0;
4403 }
4404 -
4405 -unsigned long arch_align_stack(unsigned long sp)
4406 -{
4407 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4408 - sp -= get_random_int() & ~PAGE_MASK;
4409 - return sp & ~0xf;
4410 -}
4411 -
4412 -static inline unsigned long brk_rnd(void)
4413 -{
4414 - /* 8MB for 32bit, 1GB for 64bit */
4415 - if (is_32bit_task())
4416 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
4417 - else
4418 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
4419 -}
4420 -
4421 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4422 -{
4423 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
4424 -
4425 - if (ret < mm->brk)
4426 - return mm->brk;
4427 - return ret;
4428 -}
4429 -
4430 -unsigned long randomize_et_dyn(unsigned long base)
4431 -{
4432 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4433 -
4434 - if (!(current->flags & PF_RANDOMIZE))
4435 - return base;
4436 - if (ret < base)
4437 - return base;
4438 - return ret;
4439 -}
4440 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4441 index a0155c0..34cc491 100644
4442 --- a/arch/s390/mm/mmap.c
4443 +++ b/arch/s390/mm/mmap.c
4444 @@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4445 */
4446 if (mmap_is_legacy()) {
4447 mm->mmap_base = TASK_UNMAPPED_BASE;
4448 +
4449 +#ifdef CONFIG_PAX_RANDMMAP
4450 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4451 + mm->mmap_base += mm->delta_mmap;
4452 +#endif
4453 +
4454 mm->get_unmapped_area = arch_get_unmapped_area;
4455 mm->unmap_area = arch_unmap_area;
4456 } else {
4457 mm->mmap_base = mmap_base();
4458 +
4459 +#ifdef CONFIG_PAX_RANDMMAP
4460 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4461 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4462 +#endif
4463 +
4464 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4465 mm->unmap_area = arch_unmap_area_topdown;
4466 }
4467 @@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4468 */
4469 if (mmap_is_legacy()) {
4470 mm->mmap_base = TASK_UNMAPPED_BASE;
4471 +
4472 +#ifdef CONFIG_PAX_RANDMMAP
4473 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4474 + mm->mmap_base += mm->delta_mmap;
4475 +#endif
4476 +
4477 mm->get_unmapped_area = s390_get_unmapped_area;
4478 mm->unmap_area = arch_unmap_area;
4479 } else {
4480 mm->mmap_base = mmap_base();
4481 +
4482 +#ifdef CONFIG_PAX_RANDMMAP
4483 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4484 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4485 +#endif
4486 +
4487 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4488 mm->unmap_area = arch_unmap_area_topdown;
4489 }
4490 diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
4491 index ae3d59f..f65f075 100644
4492 --- a/arch/score/include/asm/cache.h
4493 +++ b/arch/score/include/asm/cache.h
4494 @@ -1,7 +1,9 @@
4495 #ifndef _ASM_SCORE_CACHE_H
4496 #define _ASM_SCORE_CACHE_H
4497
4498 +#include <linux/const.h>
4499 +
4500 #define L1_CACHE_SHIFT 4
4501 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4502 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4503
4504 #endif /* _ASM_SCORE_CACHE_H */
4505 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4506 index 589d5c7..669e274 100644
4507 --- a/arch/score/include/asm/system.h
4508 +++ b/arch/score/include/asm/system.h
4509 @@ -17,7 +17,7 @@ do { \
4510 #define finish_arch_switch(prev) do {} while (0)
4511
4512 typedef void (*vi_handler_t)(void);
4513 -extern unsigned long arch_align_stack(unsigned long sp);
4514 +#define arch_align_stack(x) (x)
4515
4516 #define mb() barrier()
4517 #define rmb() barrier()
4518 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4519 index 25d0803..d6c8e36 100644
4520 --- a/arch/score/kernel/process.c
4521 +++ b/arch/score/kernel/process.c
4522 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4523
4524 return task_pt_regs(task)->cp0_epc;
4525 }
4526 -
4527 -unsigned long arch_align_stack(unsigned long sp)
4528 -{
4529 - return sp;
4530 -}
4531 diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
4532 index ef9e555..331bd29 100644
4533 --- a/arch/sh/include/asm/cache.h
4534 +++ b/arch/sh/include/asm/cache.h
4535 @@ -9,10 +9,11 @@
4536 #define __ASM_SH_CACHE_H
4537 #ifdef __KERNEL__
4538
4539 +#include <linux/const.h>
4540 #include <linux/init.h>
4541 #include <cpu/cache.h>
4542
4543 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4544 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4545
4546 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4547
4548 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4549 index afeb710..d1d1289 100644
4550 --- a/arch/sh/mm/mmap.c
4551 +++ b/arch/sh/mm/mmap.c
4552 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4553 addr = PAGE_ALIGN(addr);
4554
4555 vma = find_vma(mm, addr);
4556 - if (TASK_SIZE - len >= addr &&
4557 - (!vma || addr + len <= vma->vm_start))
4558 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4559 return addr;
4560 }
4561
4562 @@ -106,7 +105,7 @@ full_search:
4563 }
4564 return -ENOMEM;
4565 }
4566 - if (likely(!vma || addr + len <= vma->vm_start)) {
4567 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4568 /*
4569 * Remember the place where we stopped the search:
4570 */
4571 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4572 addr = PAGE_ALIGN(addr);
4573
4574 vma = find_vma(mm, addr);
4575 - if (TASK_SIZE - len >= addr &&
4576 - (!vma || addr + len <= vma->vm_start))
4577 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4578 return addr;
4579 }
4580
4581 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4582 /* make sure it can fit in the remaining address space */
4583 if (likely(addr > len)) {
4584 vma = find_vma(mm, addr-len);
4585 - if (!vma || addr <= vma->vm_start) {
4586 + if (check_heap_stack_gap(vma, addr - len, len)) {
4587 /* remember the address as a hint for next time */
4588 return (mm->free_area_cache = addr-len);
4589 }
4590 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4591 if (unlikely(mm->mmap_base < len))
4592 goto bottomup;
4593
4594 - addr = mm->mmap_base-len;
4595 - if (do_colour_align)
4596 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4597 + addr = mm->mmap_base - len;
4598
4599 do {
4600 + if (do_colour_align)
4601 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4602 /*
4603 * Lookup failure means no vma is above this address,
4604 * else if new region fits below vma->vm_start,
4605 * return with success:
4606 */
4607 vma = find_vma(mm, addr);
4608 - if (likely(!vma || addr+len <= vma->vm_start)) {
4609 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4610 /* remember the address as a hint for next time */
4611 return (mm->free_area_cache = addr);
4612 }
4613 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4614 mm->cached_hole_size = vma->vm_start - addr;
4615
4616 /* try just below the current vma->vm_start */
4617 - addr = vma->vm_start-len;
4618 - if (do_colour_align)
4619 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4620 - } while (likely(len < vma->vm_start));
4621 + addr = skip_heap_stack_gap(vma, len);
4622 + } while (!IS_ERR_VALUE(addr));
4623
4624 bottomup:
4625 /*
4626 diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
4627 index f92602e..27060b2 100644
4628 --- a/arch/sparc/Kconfig
4629 +++ b/arch/sparc/Kconfig
4630 @@ -31,6 +31,7 @@ config SPARC
4631
4632 config SPARC32
4633 def_bool !64BIT
4634 + select GENERIC_ATOMIC64
4635
4636 config SPARC64
4637 def_bool 64BIT
4638 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4639 index ad1fb5d..fc5315b 100644
4640 --- a/arch/sparc/Makefile
4641 +++ b/arch/sparc/Makefile
4642 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
4643 # Export what is needed by arch/sparc/boot/Makefile
4644 export VMLINUX_INIT VMLINUX_MAIN
4645 VMLINUX_INIT := $(head-y) $(init-y)
4646 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4647 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4648 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4649 VMLINUX_MAIN += $(drivers-y) $(net-y)
4650
4651 diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
4652 index 5c3c8b6..ba822fa 100644
4653 --- a/arch/sparc/include/asm/atomic_32.h
4654 +++ b/arch/sparc/include/asm/atomic_32.h
4655 @@ -13,6 +13,8 @@
4656
4657 #include <linux/types.h>
4658
4659 +#include <asm-generic/atomic64.h>
4660 +
4661 #ifdef __KERNEL__
4662
4663 #include <asm/system.h>
4664 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
4665 index 9f421df..b81fc12 100644
4666 --- a/arch/sparc/include/asm/atomic_64.h
4667 +++ b/arch/sparc/include/asm/atomic_64.h
4668 @@ -14,18 +14,40 @@
4669 #define ATOMIC64_INIT(i) { (i) }
4670
4671 #define atomic_read(v) (*(volatile int *)&(v)->counter)
4672 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
4673 +{
4674 + return v->counter;
4675 +}
4676 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
4677 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
4678 +{
4679 + return v->counter;
4680 +}
4681
4682 #define atomic_set(v, i) (((v)->counter) = i)
4683 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
4684 +{
4685 + v->counter = i;
4686 +}
4687 #define atomic64_set(v, i) (((v)->counter) = i)
4688 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
4689 +{
4690 + v->counter = i;
4691 +}
4692
4693 extern void atomic_add(int, atomic_t *);
4694 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
4695 extern void atomic64_add(long, atomic64_t *);
4696 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
4697 extern void atomic_sub(int, atomic_t *);
4698 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
4699 extern void atomic64_sub(long, atomic64_t *);
4700 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
4701
4702 extern int atomic_add_ret(int, atomic_t *);
4703 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
4704 extern long atomic64_add_ret(long, atomic64_t *);
4705 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
4706 extern int atomic_sub_ret(int, atomic_t *);
4707 extern long atomic64_sub_ret(long, atomic64_t *);
4708
4709 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4710 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
4711
4712 #define atomic_inc_return(v) atomic_add_ret(1, v)
4713 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
4714 +{
4715 + return atomic_add_ret_unchecked(1, v);
4716 +}
4717 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
4718 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
4719 +{
4720 + return atomic64_add_ret_unchecked(1, v);
4721 +}
4722
4723 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
4724 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
4725
4726 #define atomic_add_return(i, v) atomic_add_ret(i, v)
4727 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
4728 +{
4729 + return atomic_add_ret_unchecked(i, v);
4730 +}
4731 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
4732 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
4733 +{
4734 + return atomic64_add_ret_unchecked(i, v);
4735 +}
4736
4737 /*
4738 * atomic_inc_and_test - increment and test
4739 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4740 * other cases.
4741 */
4742 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
4743 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
4744 +{
4745 + return atomic_inc_return_unchecked(v) == 0;
4746 +}
4747 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
4748
4749 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
4750 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4751 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
4752
4753 #define atomic_inc(v) atomic_add(1, v)
4754 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
4755 +{
4756 + atomic_add_unchecked(1, v);
4757 +}
4758 #define atomic64_inc(v) atomic64_add(1, v)
4759 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
4760 +{
4761 + atomic64_add_unchecked(1, v);
4762 +}
4763
4764 #define atomic_dec(v) atomic_sub(1, v)
4765 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
4766 +{
4767 + atomic_sub_unchecked(1, v);
4768 +}
4769 #define atomic64_dec(v) atomic64_sub(1, v)
4770 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
4771 +{
4772 + atomic64_sub_unchecked(1, v);
4773 +}
4774
4775 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
4776 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
4777
4778 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
4779 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
4780 +{
4781 + return cmpxchg(&v->counter, old, new);
4782 +}
4783 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4784 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
4785 +{
4786 + return xchg(&v->counter, new);
4787 +}
4788
4789 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
4790 {
4791 - int c, old;
4792 + int c, old, new;
4793 c = atomic_read(v);
4794 for (;;) {
4795 - if (unlikely(c == (u)))
4796 + if (unlikely(c == u))
4797 break;
4798 - old = atomic_cmpxchg((v), c, c + (a));
4799 +
4800 + asm volatile("addcc %2, %0, %0\n"
4801 +
4802 +#ifdef CONFIG_PAX_REFCOUNT
4803 + "tvs %%icc, 6\n"
4804 +#endif
4805 +
4806 + : "=r" (new)
4807 + : "0" (c), "ir" (a)
4808 + : "cc");
4809 +
4810 + old = atomic_cmpxchg(v, c, new);
4811 if (likely(old == c))
4812 break;
4813 c = old;
4814 @@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
4815 #define atomic64_cmpxchg(v, o, n) \
4816 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
4817 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
4818 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
4819 +{
4820 + return xchg(&v->counter, new);
4821 +}
4822
4823 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
4824 {
4825 - long c, old;
4826 + long c, old, new;
4827 c = atomic64_read(v);
4828 for (;;) {
4829 - if (unlikely(c == (u)))
4830 + if (unlikely(c == u))
4831 break;
4832 - old = atomic64_cmpxchg((v), c, c + (a));
4833 +
4834 + asm volatile("addcc %2, %0, %0\n"
4835 +
4836 +#ifdef CONFIG_PAX_REFCOUNT
4837 + "tvs %%xcc, 6\n"
4838 +#endif
4839 +
4840 + : "=r" (new)
4841 + : "0" (c), "ir" (a)
4842 + : "cc");
4843 +
4844 + old = atomic64_cmpxchg(v, c, new);
4845 if (likely(old == c))
4846 break;
4847 c = old;
4848 }
4849 - return c != (u);
4850 + return c != u;
4851 }
4852
4853 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4854 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
4855 index 69358b5..9d0d492 100644
4856 --- a/arch/sparc/include/asm/cache.h
4857 +++ b/arch/sparc/include/asm/cache.h
4858 @@ -7,10 +7,12 @@
4859 #ifndef _SPARC_CACHE_H
4860 #define _SPARC_CACHE_H
4861
4862 +#include <linux/const.h>
4863 +
4864 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
4865
4866 #define L1_CACHE_SHIFT 5
4867 -#define L1_CACHE_BYTES 32
4868 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4869
4870 #ifdef CONFIG_SPARC32
4871 #define SMP_CACHE_BYTES_SHIFT 5
4872 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
4873 index 4269ca6..e3da77f 100644
4874 --- a/arch/sparc/include/asm/elf_32.h
4875 +++ b/arch/sparc/include/asm/elf_32.h
4876 @@ -114,6 +114,13 @@ typedef struct {
4877
4878 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
4879
4880 +#ifdef CONFIG_PAX_ASLR
4881 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
4882 +
4883 +#define PAX_DELTA_MMAP_LEN 16
4884 +#define PAX_DELTA_STACK_LEN 16
4885 +#endif
4886 +
4887 /* This yields a mask that user programs can use to figure out what
4888 instruction set this cpu supports. This can NOT be done in userspace
4889 on Sparc. */
4890 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
4891 index 7df8b7f..4946269 100644
4892 --- a/arch/sparc/include/asm/elf_64.h
4893 +++ b/arch/sparc/include/asm/elf_64.h
4894 @@ -180,6 +180,13 @@ typedef struct {
4895 #define ELF_ET_DYN_BASE 0x0000010000000000UL
4896 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
4897
4898 +#ifdef CONFIG_PAX_ASLR
4899 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
4900 +
4901 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
4902 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
4903 +#endif
4904 +
4905 extern unsigned long sparc64_elf_hwcap;
4906 #define ELF_HWCAP sparc64_elf_hwcap
4907
4908 diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
4909 index 156707b..aefa786 100644
4910 --- a/arch/sparc/include/asm/page_32.h
4911 +++ b/arch/sparc/include/asm/page_32.h
4912 @@ -8,6 +8,8 @@
4913 #ifndef _SPARC_PAGE_H
4914 #define _SPARC_PAGE_H
4915
4916 +#include <linux/const.h>
4917 +
4918 #define PAGE_SHIFT 12
4919
4920 #ifndef __ASSEMBLY__
4921 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
4922 index a790cc6..091ed94 100644
4923 --- a/arch/sparc/include/asm/pgtable_32.h
4924 +++ b/arch/sparc/include/asm/pgtable_32.h
4925 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
4926 BTFIXUPDEF_INT(page_none)
4927 BTFIXUPDEF_INT(page_copy)
4928 BTFIXUPDEF_INT(page_readonly)
4929 +
4930 +#ifdef CONFIG_PAX_PAGEEXEC
4931 +BTFIXUPDEF_INT(page_shared_noexec)
4932 +BTFIXUPDEF_INT(page_copy_noexec)
4933 +BTFIXUPDEF_INT(page_readonly_noexec)
4934 +#endif
4935 +
4936 BTFIXUPDEF_INT(page_kernel)
4937
4938 #define PMD_SHIFT SUN4C_PMD_SHIFT
4939 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
4940 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4941 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4942
4943 +#ifdef CONFIG_PAX_PAGEEXEC
4944 +extern pgprot_t PAGE_SHARED_NOEXEC;
4945 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4946 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4947 +#else
4948 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
4949 +# define PAGE_COPY_NOEXEC PAGE_COPY
4950 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
4951 +#endif
4952 +
4953 extern unsigned long page_kernel;
4954
4955 #ifdef MODULE
4956 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
4957 index f6ae2b2..b03ffc7 100644
4958 --- a/arch/sparc/include/asm/pgtsrmmu.h
4959 +++ b/arch/sparc/include/asm/pgtsrmmu.h
4960 @@ -115,6 +115,13 @@
4961 SRMMU_EXEC | SRMMU_REF)
4962 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4963 SRMMU_EXEC | SRMMU_REF)
4964 +
4965 +#ifdef CONFIG_PAX_PAGEEXEC
4966 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4967 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4968 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4969 +#endif
4970 +
4971 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4972 SRMMU_DIRTY | SRMMU_REF)
4973
4974 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
4975 index 9689176..63c18ea 100644
4976 --- a/arch/sparc/include/asm/spinlock_64.h
4977 +++ b/arch/sparc/include/asm/spinlock_64.h
4978 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
4979
4980 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4981
4982 -static void inline arch_read_lock(arch_rwlock_t *lock)
4983 +static inline void arch_read_lock(arch_rwlock_t *lock)
4984 {
4985 unsigned long tmp1, tmp2;
4986
4987 __asm__ __volatile__ (
4988 "1: ldsw [%2], %0\n"
4989 " brlz,pn %0, 2f\n"
4990 -"4: add %0, 1, %1\n"
4991 +"4: addcc %0, 1, %1\n"
4992 +
4993 +#ifdef CONFIG_PAX_REFCOUNT
4994 +" tvs %%icc, 6\n"
4995 +#endif
4996 +
4997 " cas [%2], %0, %1\n"
4998 " cmp %0, %1\n"
4999 " bne,pn %%icc, 1b\n"
5000 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
5001 " .previous"
5002 : "=&r" (tmp1), "=&r" (tmp2)
5003 : "r" (lock)
5004 - : "memory");
5005 + : "memory", "cc");
5006 }
5007
5008 -static int inline arch_read_trylock(arch_rwlock_t *lock)
5009 +static inline int arch_read_trylock(arch_rwlock_t *lock)
5010 {
5011 int tmp1, tmp2;
5012
5013 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5014 "1: ldsw [%2], %0\n"
5015 " brlz,a,pn %0, 2f\n"
5016 " mov 0, %0\n"
5017 -" add %0, 1, %1\n"
5018 +" addcc %0, 1, %1\n"
5019 +
5020 +#ifdef CONFIG_PAX_REFCOUNT
5021 +" tvs %%icc, 6\n"
5022 +#endif
5023 +
5024 " cas [%2], %0, %1\n"
5025 " cmp %0, %1\n"
5026 " bne,pn %%icc, 1b\n"
5027 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5028 return tmp1;
5029 }
5030
5031 -static void inline arch_read_unlock(arch_rwlock_t *lock)
5032 +static inline void arch_read_unlock(arch_rwlock_t *lock)
5033 {
5034 unsigned long tmp1, tmp2;
5035
5036 __asm__ __volatile__(
5037 "1: lduw [%2], %0\n"
5038 -" sub %0, 1, %1\n"
5039 +" subcc %0, 1, %1\n"
5040 +
5041 +#ifdef CONFIG_PAX_REFCOUNT
5042 +" tvs %%icc, 6\n"
5043 +#endif
5044 +
5045 " cas [%2], %0, %1\n"
5046 " cmp %0, %1\n"
5047 " bne,pn %%xcc, 1b\n"
5048 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
5049 : "memory");
5050 }
5051
5052 -static void inline arch_write_lock(arch_rwlock_t *lock)
5053 +static inline void arch_write_lock(arch_rwlock_t *lock)
5054 {
5055 unsigned long mask, tmp1, tmp2;
5056
5057 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
5058 : "memory");
5059 }
5060
5061 -static void inline arch_write_unlock(arch_rwlock_t *lock)
5062 +static inline void arch_write_unlock(arch_rwlock_t *lock)
5063 {
5064 __asm__ __volatile__(
5065 " stw %%g0, [%0]"
5066 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
5067 : "memory");
5068 }
5069
5070 -static int inline arch_write_trylock(arch_rwlock_t *lock)
5071 +static inline int arch_write_trylock(arch_rwlock_t *lock)
5072 {
5073 unsigned long mask, tmp1, tmp2, result;
5074
5075 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5076 index fa57532..e1a4c53 100644
5077 --- a/arch/sparc/include/asm/thread_info_32.h
5078 +++ b/arch/sparc/include/asm/thread_info_32.h
5079 @@ -50,6 +50,8 @@ struct thread_info {
5080 unsigned long w_saved;
5081
5082 struct restart_block restart_block;
5083 +
5084 + unsigned long lowest_stack;
5085 };
5086
5087 /*
5088 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5089 index 60d86be..952dea1 100644
5090 --- a/arch/sparc/include/asm/thread_info_64.h
5091 +++ b/arch/sparc/include/asm/thread_info_64.h
5092 @@ -63,6 +63,8 @@ struct thread_info {
5093 struct pt_regs *kern_una_regs;
5094 unsigned int kern_una_insn;
5095
5096 + unsigned long lowest_stack;
5097 +
5098 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5099 };
5100
5101 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5102 index e88fbe5..96b0ce5 100644
5103 --- a/arch/sparc/include/asm/uaccess.h
5104 +++ b/arch/sparc/include/asm/uaccess.h
5105 @@ -1,5 +1,13 @@
5106 #ifndef ___ASM_SPARC_UACCESS_H
5107 #define ___ASM_SPARC_UACCESS_H
5108 +
5109 +#ifdef __KERNEL__
5110 +#ifndef __ASSEMBLY__
5111 +#include <linux/types.h>
5112 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
5113 +#endif
5114 +#endif
5115 +
5116 #if defined(__sparc__) && defined(__arch64__)
5117 #include <asm/uaccess_64.h>
5118 #else
5119 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5120 index 8303ac4..07f333d 100644
5121 --- a/arch/sparc/include/asm/uaccess_32.h
5122 +++ b/arch/sparc/include/asm/uaccess_32.h
5123 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5124
5125 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5126 {
5127 - if (n && __access_ok((unsigned long) to, n))
5128 + if ((long)n < 0)
5129 + return n;
5130 +
5131 + if (n && __access_ok((unsigned long) to, n)) {
5132 + if (!__builtin_constant_p(n))
5133 + check_object_size(from, n, true);
5134 return __copy_user(to, (__force void __user *) from, n);
5135 - else
5136 + } else
5137 return n;
5138 }
5139
5140 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5141 {
5142 + if ((long)n < 0)
5143 + return n;
5144 +
5145 + if (!__builtin_constant_p(n))
5146 + check_object_size(from, n, true);
5147 +
5148 return __copy_user(to, (__force void __user *) from, n);
5149 }
5150
5151 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5152 {
5153 - if (n && __access_ok((unsigned long) from, n))
5154 + if ((long)n < 0)
5155 + return n;
5156 +
5157 + if (n && __access_ok((unsigned long) from, n)) {
5158 + if (!__builtin_constant_p(n))
5159 + check_object_size(to, n, false);
5160 return __copy_user((__force void __user *) to, from, n);
5161 - else
5162 + } else
5163 return n;
5164 }
5165
5166 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5167 {
5168 + if ((long)n < 0)
5169 + return n;
5170 +
5171 return __copy_user((__force void __user *) to, from, n);
5172 }
5173
5174 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5175 index 3e1449f..5293a0e 100644
5176 --- a/arch/sparc/include/asm/uaccess_64.h
5177 +++ b/arch/sparc/include/asm/uaccess_64.h
5178 @@ -10,6 +10,7 @@
5179 #include <linux/compiler.h>
5180 #include <linux/string.h>
5181 #include <linux/thread_info.h>
5182 +#include <linux/kernel.h>
5183 #include <asm/asi.h>
5184 #include <asm/system.h>
5185 #include <asm/spitfire.h>
5186 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5187 static inline unsigned long __must_check
5188 copy_from_user(void *to, const void __user *from, unsigned long size)
5189 {
5190 - unsigned long ret = ___copy_from_user(to, from, size);
5191 + unsigned long ret;
5192
5193 + if ((long)size < 0 || size > INT_MAX)
5194 + return size;
5195 +
5196 + if (!__builtin_constant_p(size))
5197 + check_object_size(to, size, false);
5198 +
5199 + ret = ___copy_from_user(to, from, size);
5200 if (unlikely(ret))
5201 ret = copy_from_user_fixup(to, from, size);
5202
5203 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5204 static inline unsigned long __must_check
5205 copy_to_user(void __user *to, const void *from, unsigned long size)
5206 {
5207 - unsigned long ret = ___copy_to_user(to, from, size);
5208 + unsigned long ret;
5209
5210 + if ((long)size < 0 || size > INT_MAX)
5211 + return size;
5212 +
5213 + if (!__builtin_constant_p(size))
5214 + check_object_size(from, size, true);
5215 +
5216 + ret = ___copy_to_user(to, from, size);
5217 if (unlikely(ret))
5218 ret = copy_to_user_fixup(to, from, size);
5219 return ret;
5220 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5221 index cb85458..e063f17 100644
5222 --- a/arch/sparc/kernel/Makefile
5223 +++ b/arch/sparc/kernel/Makefile
5224 @@ -3,7 +3,7 @@
5225 #
5226
5227 asflags-y := -ansi
5228 -ccflags-y := -Werror
5229 +#ccflags-y := -Werror
5230
5231 extra-y := head_$(BITS).o
5232 extra-y += init_task.o
5233 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5234 index f793742..4d880af 100644
5235 --- a/arch/sparc/kernel/process_32.c
5236 +++ b/arch/sparc/kernel/process_32.c
5237 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
5238 rw->ins[4], rw->ins[5],
5239 rw->ins[6],
5240 rw->ins[7]);
5241 - printk("%pS\n", (void *) rw->ins[7]);
5242 + printk("%pA\n", (void *) rw->ins[7]);
5243 rw = (struct reg_window32 *) rw->ins[6];
5244 }
5245 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5246 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
5247
5248 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5249 r->psr, r->pc, r->npc, r->y, print_tainted());
5250 - printk("PC: <%pS>\n", (void *) r->pc);
5251 + printk("PC: <%pA>\n", (void *) r->pc);
5252 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5253 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5254 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5255 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5256 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5257 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5258 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5259 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5260
5261 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5262 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5263 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5264 rw = (struct reg_window32 *) fp;
5265 pc = rw->ins[7];
5266 printk("[%08lx : ", pc);
5267 - printk("%pS ] ", (void *) pc);
5268 + printk("%pA ] ", (void *) pc);
5269 fp = rw->ins[6];
5270 } while (++count < 16);
5271 printk("\n");
5272 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5273 index 3739a06..48b2ff0 100644
5274 --- a/arch/sparc/kernel/process_64.c
5275 +++ b/arch/sparc/kernel/process_64.c
5276 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
5277 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5278 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5279 if (regs->tstate & TSTATE_PRIV)
5280 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5281 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5282 }
5283
5284 void show_regs(struct pt_regs *regs)
5285 {
5286 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5287 regs->tpc, regs->tnpc, regs->y, print_tainted());
5288 - printk("TPC: <%pS>\n", (void *) regs->tpc);
5289 + printk("TPC: <%pA>\n", (void *) regs->tpc);
5290 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5291 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5292 regs->u_regs[3]);
5293 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
5294 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5295 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5296 regs->u_regs[15]);
5297 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5298 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5299 show_regwindow(regs);
5300 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
5301 }
5302 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void)
5303 ((tp && tp->task) ? tp->task->pid : -1));
5304
5305 if (gp->tstate & TSTATE_PRIV) {
5306 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5307 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5308 (void *) gp->tpc,
5309 (void *) gp->o7,
5310 (void *) gp->i7,
5311 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5312 index 42b282f..28ce9f2 100644
5313 --- a/arch/sparc/kernel/sys_sparc_32.c
5314 +++ b/arch/sparc/kernel/sys_sparc_32.c
5315 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5316 if (ARCH_SUN4C && len > 0x20000000)
5317 return -ENOMEM;
5318 if (!addr)
5319 - addr = TASK_UNMAPPED_BASE;
5320 + addr = current->mm->mmap_base;
5321
5322 if (flags & MAP_SHARED)
5323 addr = COLOUR_ALIGN(addr);
5324 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5325 }
5326 if (TASK_SIZE - PAGE_SIZE - len < addr)
5327 return -ENOMEM;
5328 - if (!vmm || addr + len <= vmm->vm_start)
5329 + if (check_heap_stack_gap(vmm, addr, len))
5330 return addr;
5331 addr = vmm->vm_end;
5332 if (flags & MAP_SHARED)
5333 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5334 index 441521a..b767073 100644
5335 --- a/arch/sparc/kernel/sys_sparc_64.c
5336 +++ b/arch/sparc/kernel/sys_sparc_64.c
5337 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5338 /* We do not accept a shared mapping if it would violate
5339 * cache aliasing constraints.
5340 */
5341 - if ((flags & MAP_SHARED) &&
5342 + if ((filp || (flags & MAP_SHARED)) &&
5343 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5344 return -EINVAL;
5345 return addr;
5346 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5347 if (filp || (flags & MAP_SHARED))
5348 do_color_align = 1;
5349
5350 +#ifdef CONFIG_PAX_RANDMMAP
5351 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5352 +#endif
5353 +
5354 if (addr) {
5355 if (do_color_align)
5356 addr = COLOUR_ALIGN(addr, pgoff);
5357 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5358 addr = PAGE_ALIGN(addr);
5359
5360 vma = find_vma(mm, addr);
5361 - if (task_size - len >= addr &&
5362 - (!vma || addr + len <= vma->vm_start))
5363 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5364 return addr;
5365 }
5366
5367 if (len > mm->cached_hole_size) {
5368 - start_addr = addr = mm->free_area_cache;
5369 + start_addr = addr = mm->free_area_cache;
5370 } else {
5371 - start_addr = addr = TASK_UNMAPPED_BASE;
5372 + start_addr = addr = mm->mmap_base;
5373 mm->cached_hole_size = 0;
5374 }
5375
5376 @@ -174,14 +177,14 @@ full_search:
5377 vma = find_vma(mm, VA_EXCLUDE_END);
5378 }
5379 if (unlikely(task_size < addr)) {
5380 - if (start_addr != TASK_UNMAPPED_BASE) {
5381 - start_addr = addr = TASK_UNMAPPED_BASE;
5382 + if (start_addr != mm->mmap_base) {
5383 + start_addr = addr = mm->mmap_base;
5384 mm->cached_hole_size = 0;
5385 goto full_search;
5386 }
5387 return -ENOMEM;
5388 }
5389 - if (likely(!vma || addr + len <= vma->vm_start)) {
5390 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5391 /*
5392 * Remember the place where we stopped the search:
5393 */
5394 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5395 /* We do not accept a shared mapping if it would violate
5396 * cache aliasing constraints.
5397 */
5398 - if ((flags & MAP_SHARED) &&
5399 + if ((filp || (flags & MAP_SHARED)) &&
5400 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5401 return -EINVAL;
5402 return addr;
5403 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5404 addr = PAGE_ALIGN(addr);
5405
5406 vma = find_vma(mm, addr);
5407 - if (task_size - len >= addr &&
5408 - (!vma || addr + len <= vma->vm_start))
5409 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5410 return addr;
5411 }
5412
5413 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5414 /* make sure it can fit in the remaining address space */
5415 if (likely(addr > len)) {
5416 vma = find_vma(mm, addr-len);
5417 - if (!vma || addr <= vma->vm_start) {
5418 + if (check_heap_stack_gap(vma, addr - len, len)) {
5419 /* remember the address as a hint for next time */
5420 return (mm->free_area_cache = addr-len);
5421 }
5422 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5423 if (unlikely(mm->mmap_base < len))
5424 goto bottomup;
5425
5426 - addr = mm->mmap_base-len;
5427 - if (do_color_align)
5428 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5429 + addr = mm->mmap_base - len;
5430
5431 do {
5432 + if (do_color_align)
5433 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5434 /*
5435 * Lookup failure means no vma is above this address,
5436 * else if new region fits below vma->vm_start,
5437 * return with success:
5438 */
5439 vma = find_vma(mm, addr);
5440 - if (likely(!vma || addr+len <= vma->vm_start)) {
5441 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5442 /* remember the address as a hint for next time */
5443 return (mm->free_area_cache = addr);
5444 }
5445 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5446 mm->cached_hole_size = vma->vm_start - addr;
5447
5448 /* try just below the current vma->vm_start */
5449 - addr = vma->vm_start-len;
5450 - if (do_color_align)
5451 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5452 - } while (likely(len < vma->vm_start));
5453 + addr = skip_heap_stack_gap(vma, len);
5454 + } while (!IS_ERR_VALUE(addr));
5455
5456 bottomup:
5457 /*
5458 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5459 gap == RLIM_INFINITY ||
5460 sysctl_legacy_va_layout) {
5461 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5462 +
5463 +#ifdef CONFIG_PAX_RANDMMAP
5464 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5465 + mm->mmap_base += mm->delta_mmap;
5466 +#endif
5467 +
5468 mm->get_unmapped_area = arch_get_unmapped_area;
5469 mm->unmap_area = arch_unmap_area;
5470 } else {
5471 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5472 gap = (task_size / 6 * 5);
5473
5474 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5475 +
5476 +#ifdef CONFIG_PAX_RANDMMAP
5477 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5478 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5479 +#endif
5480 +
5481 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5482 mm->unmap_area = arch_unmap_area_topdown;
5483 }
5484 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5485 index 591f20c..0f1b925 100644
5486 --- a/arch/sparc/kernel/traps_32.c
5487 +++ b/arch/sparc/kernel/traps_32.c
5488 @@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
5489 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5490 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5491
5492 +extern void gr_handle_kernel_exploit(void);
5493 +
5494 void die_if_kernel(char *str, struct pt_regs *regs)
5495 {
5496 static int die_counter;
5497 @@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5498 count++ < 30 &&
5499 (((unsigned long) rw) >= PAGE_OFFSET) &&
5500 !(((unsigned long) rw) & 0x7)) {
5501 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
5502 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
5503 (void *) rw->ins[7]);
5504 rw = (struct reg_window32 *)rw->ins[6];
5505 }
5506 }
5507 printk("Instruction DUMP:");
5508 instruction_dump ((unsigned long *) regs->pc);
5509 - if(regs->psr & PSR_PS)
5510 + if(regs->psr & PSR_PS) {
5511 + gr_handle_kernel_exploit();
5512 do_exit(SIGKILL);
5513 + }
5514 do_exit(SIGSEGV);
5515 }
5516
5517 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
5518 index 0cbdaa4..438e4c9 100644
5519 --- a/arch/sparc/kernel/traps_64.c
5520 +++ b/arch/sparc/kernel/traps_64.c
5521 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
5522 i + 1,
5523 p->trapstack[i].tstate, p->trapstack[i].tpc,
5524 p->trapstack[i].tnpc, p->trapstack[i].tt);
5525 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
5526 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
5527 }
5528 }
5529
5530 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
5531
5532 lvl -= 0x100;
5533 if (regs->tstate & TSTATE_PRIV) {
5534 +
5535 +#ifdef CONFIG_PAX_REFCOUNT
5536 + if (lvl == 6)
5537 + pax_report_refcount_overflow(regs);
5538 +#endif
5539 +
5540 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
5541 die_if_kernel(buffer, regs);
5542 }
5543 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
5544 void bad_trap_tl1(struct pt_regs *regs, long lvl)
5545 {
5546 char buffer[32];
5547 -
5548 +
5549 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
5550 0, lvl, SIGTRAP) == NOTIFY_STOP)
5551 return;
5552
5553 +#ifdef CONFIG_PAX_REFCOUNT
5554 + if (lvl == 6)
5555 + pax_report_refcount_overflow(regs);
5556 +#endif
5557 +
5558 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
5559
5560 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
5561 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
5562 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5563 printk("%s" "ERROR(%d): ",
5564 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
5565 - printk("TPC<%pS>\n", (void *) regs->tpc);
5566 + printk("TPC<%pA>\n", (void *) regs->tpc);
5567 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
5568 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
5569 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
5570 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5571 smp_processor_id(),
5572 (type & 0x1) ? 'I' : 'D',
5573 regs->tpc);
5574 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
5575 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
5576 panic("Irrecoverable Cheetah+ parity error.");
5577 }
5578
5579 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5580 smp_processor_id(),
5581 (type & 0x1) ? 'I' : 'D',
5582 regs->tpc);
5583 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
5584 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
5585 }
5586
5587 struct sun4v_error_entry {
5588 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
5589
5590 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
5591 regs->tpc, tl);
5592 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
5593 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
5594 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5595 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
5596 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
5597 (void *) regs->u_regs[UREG_I7]);
5598 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
5599 "pte[%lx] error[%lx]\n",
5600 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
5601
5602 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
5603 regs->tpc, tl);
5604 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
5605 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
5606 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5607 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
5608 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
5609 (void *) regs->u_regs[UREG_I7]);
5610 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
5611 "pte[%lx] error[%lx]\n",
5612 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5613 fp = (unsigned long)sf->fp + STACK_BIAS;
5614 }
5615
5616 - printk(" [%016lx] %pS\n", pc, (void *) pc);
5617 + printk(" [%016lx] %pA\n", pc, (void *) pc);
5618 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5619 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
5620 int index = tsk->curr_ret_stack;
5621 if (tsk->ret_stack && index >= graph) {
5622 pc = tsk->ret_stack[index - graph].ret;
5623 - printk(" [%016lx] %pS\n", pc, (void *) pc);
5624 + printk(" [%016lx] %pA\n", pc, (void *) pc);
5625 graph++;
5626 }
5627 }
5628 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
5629 return (struct reg_window *) (fp + STACK_BIAS);
5630 }
5631
5632 +extern void gr_handle_kernel_exploit(void);
5633 +
5634 void die_if_kernel(char *str, struct pt_regs *regs)
5635 {
5636 static int die_counter;
5637 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5638 while (rw &&
5639 count++ < 30 &&
5640 kstack_valid(tp, (unsigned long) rw)) {
5641 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
5642 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
5643 (void *) rw->ins[7]);
5644
5645 rw = kernel_stack_up(rw);
5646 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5647 }
5648 user_instruction_dump ((unsigned int __user *) regs->tpc);
5649 }
5650 - if (regs->tstate & TSTATE_PRIV)
5651 + if (regs->tstate & TSTATE_PRIV) {
5652 + gr_handle_kernel_exploit();
5653 do_exit(SIGKILL);
5654 + }
5655 do_exit(SIGSEGV);
5656 }
5657 EXPORT_SYMBOL(die_if_kernel);
5658 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
5659 index 76e4ac1..78f8bb1 100644
5660 --- a/arch/sparc/kernel/unaligned_64.c
5661 +++ b/arch/sparc/kernel/unaligned_64.c
5662 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
5663 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
5664
5665 if (__ratelimit(&ratelimit)) {
5666 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
5667 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
5668 regs->tpc, (void *) regs->tpc);
5669 }
5670 }
5671 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
5672 index a3fc437..fea9957 100644
5673 --- a/arch/sparc/lib/Makefile
5674 +++ b/arch/sparc/lib/Makefile
5675 @@ -2,7 +2,7 @@
5676 #
5677
5678 asflags-y := -ansi -DST_DIV0=0x02
5679 -ccflags-y := -Werror
5680 +#ccflags-y := -Werror
5681
5682 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5683 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5684 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
5685 index 59186e0..f747d7a 100644
5686 --- a/arch/sparc/lib/atomic_64.S
5687 +++ b/arch/sparc/lib/atomic_64.S
5688 @@ -18,7 +18,12 @@
5689 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5690 BACKOFF_SETUP(%o2)
5691 1: lduw [%o1], %g1
5692 - add %g1, %o0, %g7
5693 + addcc %g1, %o0, %g7
5694 +
5695 +#ifdef CONFIG_PAX_REFCOUNT
5696 + tvs %icc, 6
5697 +#endif
5698 +
5699 cas [%o1], %g1, %g7
5700 cmp %g1, %g7
5701 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5702 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5703 2: BACKOFF_SPIN(%o2, %o3, 1b)
5704 .size atomic_add, .-atomic_add
5705
5706 + .globl atomic_add_unchecked
5707 + .type atomic_add_unchecked,#function
5708 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5709 + BACKOFF_SETUP(%o2)
5710 +1: lduw [%o1], %g1
5711 + add %g1, %o0, %g7
5712 + cas [%o1], %g1, %g7
5713 + cmp %g1, %g7
5714 + bne,pn %icc, 2f
5715 + nop
5716 + retl
5717 + nop
5718 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5719 + .size atomic_add_unchecked, .-atomic_add_unchecked
5720 +
5721 .globl atomic_sub
5722 .type atomic_sub,#function
5723 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5724 BACKOFF_SETUP(%o2)
5725 1: lduw [%o1], %g1
5726 - sub %g1, %o0, %g7
5727 + subcc %g1, %o0, %g7
5728 +
5729 +#ifdef CONFIG_PAX_REFCOUNT
5730 + tvs %icc, 6
5731 +#endif
5732 +
5733 cas [%o1], %g1, %g7
5734 cmp %g1, %g7
5735 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5736 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5737 2: BACKOFF_SPIN(%o2, %o3, 1b)
5738 .size atomic_sub, .-atomic_sub
5739
5740 + .globl atomic_sub_unchecked
5741 + .type atomic_sub_unchecked,#function
5742 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5743 + BACKOFF_SETUP(%o2)
5744 +1: lduw [%o1], %g1
5745 + sub %g1, %o0, %g7
5746 + cas [%o1], %g1, %g7
5747 + cmp %g1, %g7
5748 + bne,pn %icc, 2f
5749 + nop
5750 + retl
5751 + nop
5752 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5753 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
5754 +
5755 .globl atomic_add_ret
5756 .type atomic_add_ret,#function
5757 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5758 BACKOFF_SETUP(%o2)
5759 1: lduw [%o1], %g1
5760 - add %g1, %o0, %g7
5761 + addcc %g1, %o0, %g7
5762 +
5763 +#ifdef CONFIG_PAX_REFCOUNT
5764 + tvs %icc, 6
5765 +#endif
5766 +
5767 cas [%o1], %g1, %g7
5768 cmp %g1, %g7
5769 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5770 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5771 2: BACKOFF_SPIN(%o2, %o3, 1b)
5772 .size atomic_add_ret, .-atomic_add_ret
5773
5774 + .globl atomic_add_ret_unchecked
5775 + .type atomic_add_ret_unchecked,#function
5776 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5777 + BACKOFF_SETUP(%o2)
5778 +1: lduw [%o1], %g1
5779 + addcc %g1, %o0, %g7
5780 + cas [%o1], %g1, %g7
5781 + cmp %g1, %g7
5782 + bne,pn %icc, 2f
5783 + add %g7, %o0, %g7
5784 + sra %g7, 0, %o0
5785 + retl
5786 + nop
5787 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5788 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
5789 +
5790 .globl atomic_sub_ret
5791 .type atomic_sub_ret,#function
5792 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5793 BACKOFF_SETUP(%o2)
5794 1: lduw [%o1], %g1
5795 - sub %g1, %o0, %g7
5796 + subcc %g1, %o0, %g7
5797 +
5798 +#ifdef CONFIG_PAX_REFCOUNT
5799 + tvs %icc, 6
5800 +#endif
5801 +
5802 cas [%o1], %g1, %g7
5803 cmp %g1, %g7
5804 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5805 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5806 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5807 BACKOFF_SETUP(%o2)
5808 1: ldx [%o1], %g1
5809 - add %g1, %o0, %g7
5810 + addcc %g1, %o0, %g7
5811 +
5812 +#ifdef CONFIG_PAX_REFCOUNT
5813 + tvs %xcc, 6
5814 +#endif
5815 +
5816 casx [%o1], %g1, %g7
5817 cmp %g1, %g7
5818 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5819 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5820 2: BACKOFF_SPIN(%o2, %o3, 1b)
5821 .size atomic64_add, .-atomic64_add
5822
5823 + .globl atomic64_add_unchecked
5824 + .type atomic64_add_unchecked,#function
5825 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5826 + BACKOFF_SETUP(%o2)
5827 +1: ldx [%o1], %g1
5828 + addcc %g1, %o0, %g7
5829 + casx [%o1], %g1, %g7
5830 + cmp %g1, %g7
5831 + bne,pn %xcc, 2f
5832 + nop
5833 + retl
5834 + nop
5835 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5836 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
5837 +
5838 .globl atomic64_sub
5839 .type atomic64_sub,#function
5840 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5841 BACKOFF_SETUP(%o2)
5842 1: ldx [%o1], %g1
5843 - sub %g1, %o0, %g7
5844 + subcc %g1, %o0, %g7
5845 +
5846 +#ifdef CONFIG_PAX_REFCOUNT
5847 + tvs %xcc, 6
5848 +#endif
5849 +
5850 casx [%o1], %g1, %g7
5851 cmp %g1, %g7
5852 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5853 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5854 2: BACKOFF_SPIN(%o2, %o3, 1b)
5855 .size atomic64_sub, .-atomic64_sub
5856
5857 + .globl atomic64_sub_unchecked
5858 + .type atomic64_sub_unchecked,#function
5859 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5860 + BACKOFF_SETUP(%o2)
5861 +1: ldx [%o1], %g1
5862 + subcc %g1, %o0, %g7
5863 + casx [%o1], %g1, %g7
5864 + cmp %g1, %g7
5865 + bne,pn %xcc, 2f
5866 + nop
5867 + retl
5868 + nop
5869 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5870 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5871 +
5872 .globl atomic64_add_ret
5873 .type atomic64_add_ret,#function
5874 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5875 BACKOFF_SETUP(%o2)
5876 1: ldx [%o1], %g1
5877 - add %g1, %o0, %g7
5878 + addcc %g1, %o0, %g7
5879 +
5880 +#ifdef CONFIG_PAX_REFCOUNT
5881 + tvs %xcc, 6
5882 +#endif
5883 +
5884 casx [%o1], %g1, %g7
5885 cmp %g1, %g7
5886 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5887 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5888 2: BACKOFF_SPIN(%o2, %o3, 1b)
5889 .size atomic64_add_ret, .-atomic64_add_ret
5890
5891 + .globl atomic64_add_ret_unchecked
5892 + .type atomic64_add_ret_unchecked,#function
5893 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5894 + BACKOFF_SETUP(%o2)
5895 +1: ldx [%o1], %g1
5896 + addcc %g1, %o0, %g7
5897 + casx [%o1], %g1, %g7
5898 + cmp %g1, %g7
5899 + bne,pn %xcc, 2f
5900 + add %g7, %o0, %g7
5901 + mov %g7, %o0
5902 + retl
5903 + nop
5904 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5905 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5906 +
5907 .globl atomic64_sub_ret
5908 .type atomic64_sub_ret,#function
5909 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5910 BACKOFF_SETUP(%o2)
5911 1: ldx [%o1], %g1
5912 - sub %g1, %o0, %g7
5913 + subcc %g1, %o0, %g7
5914 +
5915 +#ifdef CONFIG_PAX_REFCOUNT
5916 + tvs %xcc, 6
5917 +#endif
5918 +
5919 casx [%o1], %g1, %g7
5920 cmp %g1, %g7
5921 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5922 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
5923 index 1b30bb3..b4a16c7 100644
5924 --- a/arch/sparc/lib/ksyms.c
5925 +++ b/arch/sparc/lib/ksyms.c
5926 @@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
5927
5928 /* Atomic counter implementation. */
5929 EXPORT_SYMBOL(atomic_add);
5930 +EXPORT_SYMBOL(atomic_add_unchecked);
5931 EXPORT_SYMBOL(atomic_add_ret);
5932 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
5933 EXPORT_SYMBOL(atomic_sub);
5934 +EXPORT_SYMBOL(atomic_sub_unchecked);
5935 EXPORT_SYMBOL(atomic_sub_ret);
5936 EXPORT_SYMBOL(atomic64_add);
5937 +EXPORT_SYMBOL(atomic64_add_unchecked);
5938 EXPORT_SYMBOL(atomic64_add_ret);
5939 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5940 EXPORT_SYMBOL(atomic64_sub);
5941 +EXPORT_SYMBOL(atomic64_sub_unchecked);
5942 EXPORT_SYMBOL(atomic64_sub_ret);
5943
5944 /* Atomic bit operations. */
5945 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
5946 index 301421c..e2535d1 100644
5947 --- a/arch/sparc/mm/Makefile
5948 +++ b/arch/sparc/mm/Makefile
5949 @@ -2,7 +2,7 @@
5950 #
5951
5952 asflags-y := -ansi
5953 -ccflags-y := -Werror
5954 +#ccflags-y := -Werror
5955
5956 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
5957 obj-y += fault_$(BITS).o
5958 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
5959 index 8023fd7..c8e89e9 100644
5960 --- a/arch/sparc/mm/fault_32.c
5961 +++ b/arch/sparc/mm/fault_32.c
5962 @@ -21,6 +21,9 @@
5963 #include <linux/perf_event.h>
5964 #include <linux/interrupt.h>
5965 #include <linux/kdebug.h>
5966 +#include <linux/slab.h>
5967 +#include <linux/pagemap.h>
5968 +#include <linux/compiler.h>
5969
5970 #include <asm/system.h>
5971 #include <asm/page.h>
5972 @@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
5973 return safe_compute_effective_address(regs, insn);
5974 }
5975
5976 +#ifdef CONFIG_PAX_PAGEEXEC
5977 +#ifdef CONFIG_PAX_DLRESOLVE
5978 +static void pax_emuplt_close(struct vm_area_struct *vma)
5979 +{
5980 + vma->vm_mm->call_dl_resolve = 0UL;
5981 +}
5982 +
5983 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5984 +{
5985 + unsigned int *kaddr;
5986 +
5987 + vmf->page = alloc_page(GFP_HIGHUSER);
5988 + if (!vmf->page)
5989 + return VM_FAULT_OOM;
5990 +
5991 + kaddr = kmap(vmf->page);
5992 + memset(kaddr, 0, PAGE_SIZE);
5993 + kaddr[0] = 0x9DE3BFA8U; /* save */
5994 + flush_dcache_page(vmf->page);
5995 + kunmap(vmf->page);
5996 + return VM_FAULT_MAJOR;
5997 +}
5998 +
5999 +static const struct vm_operations_struct pax_vm_ops = {
6000 + .close = pax_emuplt_close,
6001 + .fault = pax_emuplt_fault
6002 +};
6003 +
6004 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6005 +{
6006 + int ret;
6007 +
6008 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6009 + vma->vm_mm = current->mm;
6010 + vma->vm_start = addr;
6011 + vma->vm_end = addr + PAGE_SIZE;
6012 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6013 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6014 + vma->vm_ops = &pax_vm_ops;
6015 +
6016 + ret = insert_vm_struct(current->mm, vma);
6017 + if (ret)
6018 + return ret;
6019 +
6020 + ++current->mm->total_vm;
6021 + return 0;
6022 +}
6023 +#endif
6024 +
6025 +/*
6026 + * PaX: decide what to do with offenders (regs->pc = fault address)
6027 + *
6028 + * returns 1 when task should be killed
6029 + * 2 when patched PLT trampoline was detected
6030 + * 3 when unpatched PLT trampoline was detected
6031 + */
6032 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6033 +{
6034 +
6035 +#ifdef CONFIG_PAX_EMUPLT
6036 + int err;
6037 +
6038 + do { /* PaX: patched PLT emulation #1 */
6039 + unsigned int sethi1, sethi2, jmpl;
6040 +
6041 + err = get_user(sethi1, (unsigned int *)regs->pc);
6042 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6043 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6044 +
6045 + if (err)
6046 + break;
6047 +
6048 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6049 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6050 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6051 + {
6052 + unsigned int addr;
6053 +
6054 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6055 + addr = regs->u_regs[UREG_G1];
6056 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6057 + regs->pc = addr;
6058 + regs->npc = addr+4;
6059 + return 2;
6060 + }
6061 + } while (0);
6062 +
6063 + { /* PaX: patched PLT emulation #2 */
6064 + unsigned int ba;
6065 +
6066 + err = get_user(ba, (unsigned int *)regs->pc);
6067 +
6068 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6069 + unsigned int addr;
6070 +
6071 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6072 + regs->pc = addr;
6073 + regs->npc = addr+4;
6074 + return 2;
6075 + }
6076 + }
6077 +
6078 + do { /* PaX: patched PLT emulation #3 */
6079 + unsigned int sethi, jmpl, nop;
6080 +
6081 + err = get_user(sethi, (unsigned int *)regs->pc);
6082 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6083 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6084 +
6085 + if (err)
6086 + break;
6087 +
6088 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6089 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6090 + nop == 0x01000000U)
6091 + {
6092 + unsigned int addr;
6093 +
6094 + addr = (sethi & 0x003FFFFFU) << 10;
6095 + regs->u_regs[UREG_G1] = addr;
6096 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6097 + regs->pc = addr;
6098 + regs->npc = addr+4;
6099 + return 2;
6100 + }
6101 + } while (0);
6102 +
6103 + do { /* PaX: unpatched PLT emulation step 1 */
6104 + unsigned int sethi, ba, nop;
6105 +
6106 + err = get_user(sethi, (unsigned int *)regs->pc);
6107 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
6108 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6109 +
6110 + if (err)
6111 + break;
6112 +
6113 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6114 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6115 + nop == 0x01000000U)
6116 + {
6117 + unsigned int addr, save, call;
6118 +
6119 + if ((ba & 0xFFC00000U) == 0x30800000U)
6120 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6121 + else
6122 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6123 +
6124 + err = get_user(save, (unsigned int *)addr);
6125 + err |= get_user(call, (unsigned int *)(addr+4));
6126 + err |= get_user(nop, (unsigned int *)(addr+8));
6127 + if (err)
6128 + break;
6129 +
6130 +#ifdef CONFIG_PAX_DLRESOLVE
6131 + if (save == 0x9DE3BFA8U &&
6132 + (call & 0xC0000000U) == 0x40000000U &&
6133 + nop == 0x01000000U)
6134 + {
6135 + struct vm_area_struct *vma;
6136 + unsigned long call_dl_resolve;
6137 +
6138 + down_read(&current->mm->mmap_sem);
6139 + call_dl_resolve = current->mm->call_dl_resolve;
6140 + up_read(&current->mm->mmap_sem);
6141 + if (likely(call_dl_resolve))
6142 + goto emulate;
6143 +
6144 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6145 +
6146 + down_write(&current->mm->mmap_sem);
6147 + if (current->mm->call_dl_resolve) {
6148 + call_dl_resolve = current->mm->call_dl_resolve;
6149 + up_write(&current->mm->mmap_sem);
6150 + if (vma)
6151 + kmem_cache_free(vm_area_cachep, vma);
6152 + goto emulate;
6153 + }
6154 +
6155 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6156 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6157 + up_write(&current->mm->mmap_sem);
6158 + if (vma)
6159 + kmem_cache_free(vm_area_cachep, vma);
6160 + return 1;
6161 + }
6162 +
6163 + if (pax_insert_vma(vma, call_dl_resolve)) {
6164 + up_write(&current->mm->mmap_sem);
6165 + kmem_cache_free(vm_area_cachep, vma);
6166 + return 1;
6167 + }
6168 +
6169 + current->mm->call_dl_resolve = call_dl_resolve;
6170 + up_write(&current->mm->mmap_sem);
6171 +
6172 +emulate:
6173 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6174 + regs->pc = call_dl_resolve;
6175 + regs->npc = addr+4;
6176 + return 3;
6177 + }
6178 +#endif
6179 +
6180 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6181 + if ((save & 0xFFC00000U) == 0x05000000U &&
6182 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6183 + nop == 0x01000000U)
6184 + {
6185 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6186 + regs->u_regs[UREG_G2] = addr + 4;
6187 + addr = (save & 0x003FFFFFU) << 10;
6188 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6189 + regs->pc = addr;
6190 + regs->npc = addr+4;
6191 + return 3;
6192 + }
6193 + }
6194 + } while (0);
6195 +
6196 + do { /* PaX: unpatched PLT emulation step 2 */
6197 + unsigned int save, call, nop;
6198 +
6199 + err = get_user(save, (unsigned int *)(regs->pc-4));
6200 + err |= get_user(call, (unsigned int *)regs->pc);
6201 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
6202 + if (err)
6203 + break;
6204 +
6205 + if (save == 0x9DE3BFA8U &&
6206 + (call & 0xC0000000U) == 0x40000000U &&
6207 + nop == 0x01000000U)
6208 + {
6209 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6210 +
6211 + regs->u_regs[UREG_RETPC] = regs->pc;
6212 + regs->pc = dl_resolve;
6213 + regs->npc = dl_resolve+4;
6214 + return 3;
6215 + }
6216 + } while (0);
6217 +#endif
6218 +
6219 + return 1;
6220 +}
6221 +
6222 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6223 +{
6224 + unsigned long i;
6225 +
6226 + printk(KERN_ERR "PAX: bytes at PC: ");
6227 + for (i = 0; i < 8; i++) {
6228 + unsigned int c;
6229 + if (get_user(c, (unsigned int *)pc+i))
6230 + printk(KERN_CONT "???????? ");
6231 + else
6232 + printk(KERN_CONT "%08x ", c);
6233 + }
6234 + printk("\n");
6235 +}
6236 +#endif
6237 +
6238 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
6239 int text_fault)
6240 {
6241 @@ -280,6 +545,24 @@ good_area:
6242 if(!(vma->vm_flags & VM_WRITE))
6243 goto bad_area;
6244 } else {
6245 +
6246 +#ifdef CONFIG_PAX_PAGEEXEC
6247 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6248 + up_read(&mm->mmap_sem);
6249 + switch (pax_handle_fetch_fault(regs)) {
6250 +
6251 +#ifdef CONFIG_PAX_EMUPLT
6252 + case 2:
6253 + case 3:
6254 + return;
6255 +#endif
6256 +
6257 + }
6258 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6259 + do_group_exit(SIGKILL);
6260 + }
6261 +#endif
6262 +
6263 /* Allow reads even for write-only mappings */
6264 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6265 goto bad_area;
6266 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6267 index 504c062..6fcb9c6 100644
6268 --- a/arch/sparc/mm/fault_64.c
6269 +++ b/arch/sparc/mm/fault_64.c
6270 @@ -21,6 +21,9 @@
6271 #include <linux/kprobes.h>
6272 #include <linux/kdebug.h>
6273 #include <linux/percpu.h>
6274 +#include <linux/slab.h>
6275 +#include <linux/pagemap.h>
6276 +#include <linux/compiler.h>
6277
6278 #include <asm/page.h>
6279 #include <asm/pgtable.h>
6280 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6281 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6282 regs->tpc);
6283 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6284 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6285 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6286 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6287 dump_stack();
6288 unhandled_fault(regs->tpc, current, regs);
6289 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
6290 show_regs(regs);
6291 }
6292
6293 +#ifdef CONFIG_PAX_PAGEEXEC
6294 +#ifdef CONFIG_PAX_DLRESOLVE
6295 +static void pax_emuplt_close(struct vm_area_struct *vma)
6296 +{
6297 + vma->vm_mm->call_dl_resolve = 0UL;
6298 +}
6299 +
6300 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6301 +{
6302 + unsigned int *kaddr;
6303 +
6304 + vmf->page = alloc_page(GFP_HIGHUSER);
6305 + if (!vmf->page)
6306 + return VM_FAULT_OOM;
6307 +
6308 + kaddr = kmap(vmf->page);
6309 + memset(kaddr, 0, PAGE_SIZE);
6310 + kaddr[0] = 0x9DE3BFA8U; /* save */
6311 + flush_dcache_page(vmf->page);
6312 + kunmap(vmf->page);
6313 + return VM_FAULT_MAJOR;
6314 +}
6315 +
6316 +static const struct vm_operations_struct pax_vm_ops = {
6317 + .close = pax_emuplt_close,
6318 + .fault = pax_emuplt_fault
6319 +};
6320 +
6321 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6322 +{
6323 + int ret;
6324 +
6325 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6326 + vma->vm_mm = current->mm;
6327 + vma->vm_start = addr;
6328 + vma->vm_end = addr + PAGE_SIZE;
6329 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6330 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6331 + vma->vm_ops = &pax_vm_ops;
6332 +
6333 + ret = insert_vm_struct(current->mm, vma);
6334 + if (ret)
6335 + return ret;
6336 +
6337 + ++current->mm->total_vm;
6338 + return 0;
6339 +}
6340 +#endif
6341 +
6342 +/*
6343 + * PaX: decide what to do with offenders (regs->tpc = fault address)
6344 + *
6345 + * returns 1 when task should be killed
6346 + * 2 when patched PLT trampoline was detected
6347 + * 3 when unpatched PLT trampoline was detected
6348 + */
6349 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6350 +{
6351 +
6352 +#ifdef CONFIG_PAX_EMUPLT
6353 + int err;
6354 +
6355 + do { /* PaX: patched PLT emulation #1 */
6356 + unsigned int sethi1, sethi2, jmpl;
6357 +
6358 + err = get_user(sethi1, (unsigned int *)regs->tpc);
6359 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6360 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6361 +
6362 + if (err)
6363 + break;
6364 +
6365 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6366 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6367 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6368 + {
6369 + unsigned long addr;
6370 +
6371 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6372 + addr = regs->u_regs[UREG_G1];
6373 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6374 +
6375 + if (test_thread_flag(TIF_32BIT))
6376 + addr &= 0xFFFFFFFFUL;
6377 +
6378 + regs->tpc = addr;
6379 + regs->tnpc = addr+4;
6380 + return 2;
6381 + }
6382 + } while (0);
6383 +
6384 + { /* PaX: patched PLT emulation #2 */
6385 + unsigned int ba;
6386 +
6387 + err = get_user(ba, (unsigned int *)regs->tpc);
6388 +
6389 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6390 + unsigned long addr;
6391 +
6392 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6393 +
6394 + if (test_thread_flag(TIF_32BIT))
6395 + addr &= 0xFFFFFFFFUL;
6396 +
6397 + regs->tpc = addr;
6398 + regs->tnpc = addr+4;
6399 + return 2;
6400 + }
6401 + }
6402 +
6403 + do { /* PaX: patched PLT emulation #3 */
6404 + unsigned int sethi, jmpl, nop;
6405 +
6406 + err = get_user(sethi, (unsigned int *)regs->tpc);
6407 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6408 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6409 +
6410 + if (err)
6411 + break;
6412 +
6413 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6414 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6415 + nop == 0x01000000U)
6416 + {
6417 + unsigned long addr;
6418 +
6419 + addr = (sethi & 0x003FFFFFU) << 10;
6420 + regs->u_regs[UREG_G1] = addr;
6421 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6422 +
6423 + if (test_thread_flag(TIF_32BIT))
6424 + addr &= 0xFFFFFFFFUL;
6425 +
6426 + regs->tpc = addr;
6427 + regs->tnpc = addr+4;
6428 + return 2;
6429 + }
6430 + } while (0);
6431 +
6432 + do { /* PaX: patched PLT emulation #4 */
6433 + unsigned int sethi, mov1, call, mov2;
6434 +
6435 + err = get_user(sethi, (unsigned int *)regs->tpc);
6436 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6437 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
6438 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6439 +
6440 + if (err)
6441 + break;
6442 +
6443 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6444 + mov1 == 0x8210000FU &&
6445 + (call & 0xC0000000U) == 0x40000000U &&
6446 + mov2 == 0x9E100001U)
6447 + {
6448 + unsigned long addr;
6449 +
6450 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6451 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6452 +
6453 + if (test_thread_flag(TIF_32BIT))
6454 + addr &= 0xFFFFFFFFUL;
6455 +
6456 + regs->tpc = addr;
6457 + regs->tnpc = addr+4;
6458 + return 2;
6459 + }
6460 + } while (0);
6461 +
6462 + do { /* PaX: patched PLT emulation #5 */
6463 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6464 +
6465 + err = get_user(sethi, (unsigned int *)regs->tpc);
6466 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6467 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6468 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6469 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6470 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6471 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6472 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6473 +
6474 + if (err)
6475 + break;
6476 +
6477 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6478 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6479 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6480 + (or1 & 0xFFFFE000U) == 0x82106000U &&
6481 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6482 + sllx == 0x83287020U &&
6483 + jmpl == 0x81C04005U &&
6484 + nop == 0x01000000U)
6485 + {
6486 + unsigned long addr;
6487 +
6488 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6489 + regs->u_regs[UREG_G1] <<= 32;
6490 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6491 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6492 + regs->tpc = addr;
6493 + regs->tnpc = addr+4;
6494 + return 2;
6495 + }
6496 + } while (0);
6497 +
6498 + do { /* PaX: patched PLT emulation #6 */
6499 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6500 +
6501 + err = get_user(sethi, (unsigned int *)regs->tpc);
6502 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6503 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6504 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6505 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
6506 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6507 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6508 +
6509 + if (err)
6510 + break;
6511 +
6512 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6513 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6514 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6515 + sllx == 0x83287020U &&
6516 + (or & 0xFFFFE000U) == 0x8A116000U &&
6517 + jmpl == 0x81C04005U &&
6518 + nop == 0x01000000U)
6519 + {
6520 + unsigned long addr;
6521 +
6522 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
6523 + regs->u_regs[UREG_G1] <<= 32;
6524 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
6525 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6526 + regs->tpc = addr;
6527 + regs->tnpc = addr+4;
6528 + return 2;
6529 + }
6530 + } while (0);
6531 +
6532 + do { /* PaX: unpatched PLT emulation step 1 */
6533 + unsigned int sethi, ba, nop;
6534 +
6535 + err = get_user(sethi, (unsigned int *)regs->tpc);
6536 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6537 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6538 +
6539 + if (err)
6540 + break;
6541 +
6542 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6543 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6544 + nop == 0x01000000U)
6545 + {
6546 + unsigned long addr;
6547 + unsigned int save, call;
6548 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
6549 +
6550 + if ((ba & 0xFFC00000U) == 0x30800000U)
6551 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6552 + else
6553 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6554 +
6555 + if (test_thread_flag(TIF_32BIT))
6556 + addr &= 0xFFFFFFFFUL;
6557 +
6558 + err = get_user(save, (unsigned int *)addr);
6559 + err |= get_user(call, (unsigned int *)(addr+4));
6560 + err |= get_user(nop, (unsigned int *)(addr+8));
6561 + if (err)
6562 + break;
6563 +
6564 +#ifdef CONFIG_PAX_DLRESOLVE
6565 + if (save == 0x9DE3BFA8U &&
6566 + (call & 0xC0000000U) == 0x40000000U &&
6567 + nop == 0x01000000U)
6568 + {
6569 + struct vm_area_struct *vma;
6570 + unsigned long call_dl_resolve;
6571 +
6572 + down_read(&current->mm->mmap_sem);
6573 + call_dl_resolve = current->mm->call_dl_resolve;
6574 + up_read(&current->mm->mmap_sem);
6575 + if (likely(call_dl_resolve))
6576 + goto emulate;
6577 +
6578 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6579 +
6580 + down_write(&current->mm->mmap_sem);
6581 + if (current->mm->call_dl_resolve) {
6582 + call_dl_resolve = current->mm->call_dl_resolve;
6583 + up_write(&current->mm->mmap_sem);
6584 + if (vma)
6585 + kmem_cache_free(vm_area_cachep, vma);
6586 + goto emulate;
6587 + }
6588 +
6589 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6590 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6591 + up_write(&current->mm->mmap_sem);
6592 + if (vma)
6593 + kmem_cache_free(vm_area_cachep, vma);
6594 + return 1;
6595 + }
6596 +
6597 + if (pax_insert_vma(vma, call_dl_resolve)) {
6598 + up_write(&current->mm->mmap_sem);
6599 + kmem_cache_free(vm_area_cachep, vma);
6600 + return 1;
6601 + }
6602 +
6603 + current->mm->call_dl_resolve = call_dl_resolve;
6604 + up_write(&current->mm->mmap_sem);
6605 +
6606 +emulate:
6607 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6608 + regs->tpc = call_dl_resolve;
6609 + regs->tnpc = addr+4;
6610 + return 3;
6611 + }
6612 +#endif
6613 +
6614 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6615 + if ((save & 0xFFC00000U) == 0x05000000U &&
6616 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6617 + nop == 0x01000000U)
6618 + {
6619 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6620 + regs->u_regs[UREG_G2] = addr + 4;
6621 + addr = (save & 0x003FFFFFU) << 10;
6622 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6623 +
6624 + if (test_thread_flag(TIF_32BIT))
6625 + addr &= 0xFFFFFFFFUL;
6626 +
6627 + regs->tpc = addr;
6628 + regs->tnpc = addr+4;
6629 + return 3;
6630 + }
6631 +
6632 + /* PaX: 64-bit PLT stub */
6633 + err = get_user(sethi1, (unsigned int *)addr);
6634 + err |= get_user(sethi2, (unsigned int *)(addr+4));
6635 + err |= get_user(or1, (unsigned int *)(addr+8));
6636 + err |= get_user(or2, (unsigned int *)(addr+12));
6637 + err |= get_user(sllx, (unsigned int *)(addr+16));
6638 + err |= get_user(add, (unsigned int *)(addr+20));
6639 + err |= get_user(jmpl, (unsigned int *)(addr+24));
6640 + err |= get_user(nop, (unsigned int *)(addr+28));
6641 + if (err)
6642 + break;
6643 +
6644 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
6645 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6646 + (or1 & 0xFFFFE000U) == 0x88112000U &&
6647 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6648 + sllx == 0x89293020U &&
6649 + add == 0x8A010005U &&
6650 + jmpl == 0x89C14000U &&
6651 + nop == 0x01000000U)
6652 + {
6653 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6654 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6655 + regs->u_regs[UREG_G4] <<= 32;
6656 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6657 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
6658 + regs->u_regs[UREG_G4] = addr + 24;
6659 + addr = regs->u_regs[UREG_G5];
6660 + regs->tpc = addr;
6661 + regs->tnpc = addr+4;
6662 + return 3;
6663 + }
6664 + }
6665 + } while (0);
6666 +
6667 +#ifdef CONFIG_PAX_DLRESOLVE
6668 + do { /* PaX: unpatched PLT emulation step 2 */
6669 + unsigned int save, call, nop;
6670 +
6671 + err = get_user(save, (unsigned int *)(regs->tpc-4));
6672 + err |= get_user(call, (unsigned int *)regs->tpc);
6673 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
6674 + if (err)
6675 + break;
6676 +
6677 + if (save == 0x9DE3BFA8U &&
6678 + (call & 0xC0000000U) == 0x40000000U &&
6679 + nop == 0x01000000U)
6680 + {
6681 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6682 +
6683 + if (test_thread_flag(TIF_32BIT))
6684 + dl_resolve &= 0xFFFFFFFFUL;
6685 +
6686 + regs->u_regs[UREG_RETPC] = regs->tpc;
6687 + regs->tpc = dl_resolve;
6688 + regs->tnpc = dl_resolve+4;
6689 + return 3;
6690 + }
6691 + } while (0);
6692 +#endif
6693 +
6694 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
6695 + unsigned int sethi, ba, nop;
6696 +
6697 + err = get_user(sethi, (unsigned int *)regs->tpc);
6698 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6699 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6700 +
6701 + if (err)
6702 + break;
6703 +
6704 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6705 + (ba & 0xFFF00000U) == 0x30600000U &&
6706 + nop == 0x01000000U)
6707 + {
6708 + unsigned long addr;
6709 +
6710 + addr = (sethi & 0x003FFFFFU) << 10;
6711 + regs->u_regs[UREG_G1] = addr;
6712 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6713 +
6714 + if (test_thread_flag(TIF_32BIT))
6715 + addr &= 0xFFFFFFFFUL;
6716 +
6717 + regs->tpc = addr;
6718 + regs->tnpc = addr+4;
6719 + return 2;
6720 + }
6721 + } while (0);
6722 +
6723 +#endif
6724 +
6725 + return 1;
6726 +}
6727 +
6728 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6729 +{
6730 + unsigned long i;
6731 +
6732 + printk(KERN_ERR "PAX: bytes at PC: ");
6733 + for (i = 0; i < 8; i++) {
6734 + unsigned int c;
6735 + if (get_user(c, (unsigned int *)pc+i))
6736 + printk(KERN_CONT "???????? ");
6737 + else
6738 + printk(KERN_CONT "%08x ", c);
6739 + }
6740 + printk("\n");
6741 +}
6742 +#endif
6743 +
6744 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6745 {
6746 struct mm_struct *mm = current->mm;
6747 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6748 if (!vma)
6749 goto bad_area;
6750
6751 +#ifdef CONFIG_PAX_PAGEEXEC
6752 + /* PaX: detect ITLB misses on non-exec pages */
6753 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
6754 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
6755 + {
6756 + if (address != regs->tpc)
6757 + goto good_area;
6758 +
6759 + up_read(&mm->mmap_sem);
6760 + switch (pax_handle_fetch_fault(regs)) {
6761 +
6762 +#ifdef CONFIG_PAX_EMUPLT
6763 + case 2:
6764 + case 3:
6765 + return;
6766 +#endif
6767 +
6768 + }
6769 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6770 + do_group_exit(SIGKILL);
6771 + }
6772 +#endif
6773 +
6774 /* Pure DTLB misses do not tell us whether the fault causing
6775 * load/store/atomic was a write or not, it only says that there
6776 * was no match. So in such a case we (carefully) read the
6777 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
6778 index 07e1453..0a7d9e9 100644
6779 --- a/arch/sparc/mm/hugetlbpage.c
6780 +++ b/arch/sparc/mm/hugetlbpage.c
6781 @@ -67,7 +67,7 @@ full_search:
6782 }
6783 return -ENOMEM;
6784 }
6785 - if (likely(!vma || addr + len <= vma->vm_start)) {
6786 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6787 /*
6788 * Remember the place where we stopped the search:
6789 */
6790 @@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6791 /* make sure it can fit in the remaining address space */
6792 if (likely(addr > len)) {
6793 vma = find_vma(mm, addr-len);
6794 - if (!vma || addr <= vma->vm_start) {
6795 + if (check_heap_stack_gap(vma, addr - len, len)) {
6796 /* remember the address as a hint for next time */
6797 return (mm->free_area_cache = addr-len);
6798 }
6799 @@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6800 if (unlikely(mm->mmap_base < len))
6801 goto bottomup;
6802
6803 - addr = (mm->mmap_base-len) & HPAGE_MASK;
6804 + addr = mm->mmap_base - len;
6805
6806 do {
6807 + addr &= HPAGE_MASK;
6808 /*
6809 * Lookup failure means no vma is above this address,
6810 * else if new region fits below vma->vm_start,
6811 * return with success:
6812 */
6813 vma = find_vma(mm, addr);
6814 - if (likely(!vma || addr+len <= vma->vm_start)) {
6815 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6816 /* remember the address as a hint for next time */
6817 return (mm->free_area_cache = addr);
6818 }
6819 @@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6820 mm->cached_hole_size = vma->vm_start - addr;
6821
6822 /* try just below the current vma->vm_start */
6823 - addr = (vma->vm_start-len) & HPAGE_MASK;
6824 - } while (likely(len < vma->vm_start));
6825 + addr = skip_heap_stack_gap(vma, len);
6826 + } while (!IS_ERR_VALUE(addr));
6827
6828 bottomup:
6829 /*
6830 @@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
6831 if (addr) {
6832 addr = ALIGN(addr, HPAGE_SIZE);
6833 vma = find_vma(mm, addr);
6834 - if (task_size - len >= addr &&
6835 - (!vma || addr + len <= vma->vm_start))
6836 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6837 return addr;
6838 }
6839 if (mm->get_unmapped_area == arch_get_unmapped_area)
6840 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
6841 index 7b00de6..78239f4 100644
6842 --- a/arch/sparc/mm/init_32.c
6843 +++ b/arch/sparc/mm/init_32.c
6844 @@ -316,6 +316,9 @@ extern void device_scan(void);
6845 pgprot_t PAGE_SHARED __read_mostly;
6846 EXPORT_SYMBOL(PAGE_SHARED);
6847
6848 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6849 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6850 +
6851 void __init paging_init(void)
6852 {
6853 switch(sparc_cpu_model) {
6854 @@ -344,17 +347,17 @@ void __init paging_init(void)
6855
6856 /* Initialize the protection map with non-constant, MMU dependent values. */
6857 protection_map[0] = PAGE_NONE;
6858 - protection_map[1] = PAGE_READONLY;
6859 - protection_map[2] = PAGE_COPY;
6860 - protection_map[3] = PAGE_COPY;
6861 + protection_map[1] = PAGE_READONLY_NOEXEC;
6862 + protection_map[2] = PAGE_COPY_NOEXEC;
6863 + protection_map[3] = PAGE_COPY_NOEXEC;
6864 protection_map[4] = PAGE_READONLY;
6865 protection_map[5] = PAGE_READONLY;
6866 protection_map[6] = PAGE_COPY;
6867 protection_map[7] = PAGE_COPY;
6868 protection_map[8] = PAGE_NONE;
6869 - protection_map[9] = PAGE_READONLY;
6870 - protection_map[10] = PAGE_SHARED;
6871 - protection_map[11] = PAGE_SHARED;
6872 + protection_map[9] = PAGE_READONLY_NOEXEC;
6873 + protection_map[10] = PAGE_SHARED_NOEXEC;
6874 + protection_map[11] = PAGE_SHARED_NOEXEC;
6875 protection_map[12] = PAGE_READONLY;
6876 protection_map[13] = PAGE_READONLY;
6877 protection_map[14] = PAGE_SHARED;
6878 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
6879 index cbef74e..c38fead 100644
6880 --- a/arch/sparc/mm/srmmu.c
6881 +++ b/arch/sparc/mm/srmmu.c
6882 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6883 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6884 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6885 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6886 +
6887 +#ifdef CONFIG_PAX_PAGEEXEC
6888 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6889 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6890 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6891 +#endif
6892 +
6893 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6894 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6895
6896 diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
6897 index 27fe667..36d474c 100644
6898 --- a/arch/tile/include/asm/atomic_64.h
6899 +++ b/arch/tile/include/asm/atomic_64.h
6900 @@ -142,6 +142,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
6901
6902 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6903
6904 +#define atomic64_read_unchecked(v) atomic64_read(v)
6905 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
6906 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
6907 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
6908 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
6909 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
6910 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
6911 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
6912 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
6913 +
6914 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
6915 #define smp_mb__before_atomic_dec() smp_mb()
6916 #define smp_mb__after_atomic_dec() smp_mb()
6917 diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
6918 index 392e533..536b092 100644
6919 --- a/arch/tile/include/asm/cache.h
6920 +++ b/arch/tile/include/asm/cache.h
6921 @@ -15,11 +15,12 @@
6922 #ifndef _ASM_TILE_CACHE_H
6923 #define _ASM_TILE_CACHE_H
6924
6925 +#include <linux/const.h>
6926 #include <arch/chip.h>
6927
6928 /* bytes per L1 data cache line */
6929 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
6930 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6931 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6932
6933 /* bytes per L2 cache line */
6934 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
6935 diff --git a/arch/um/Makefile b/arch/um/Makefile
6936 index 7730af6..cce5b19 100644
6937 --- a/arch/um/Makefile
6938 +++ b/arch/um/Makefile
6939 @@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
6940 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
6941 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
6942
6943 +ifdef CONSTIFY_PLUGIN
6944 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6945 +endif
6946 +
6947 #This will adjust *FLAGS accordingly to the platform.
6948 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
6949
6950 diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
6951 index 19e1bdd..3665b77 100644
6952 --- a/arch/um/include/asm/cache.h
6953 +++ b/arch/um/include/asm/cache.h
6954 @@ -1,6 +1,7 @@
6955 #ifndef __UM_CACHE_H
6956 #define __UM_CACHE_H
6957
6958 +#include <linux/const.h>
6959
6960 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
6961 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6962 @@ -12,6 +13,6 @@
6963 # define L1_CACHE_SHIFT 5
6964 #endif
6965
6966 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6967 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6968
6969 #endif
6970 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
6971 index 6c03acd..a5e0215 100644
6972 --- a/arch/um/include/asm/kmap_types.h
6973 +++ b/arch/um/include/asm/kmap_types.h
6974 @@ -23,6 +23,7 @@ enum km_type {
6975 KM_IRQ1,
6976 KM_SOFTIRQ0,
6977 KM_SOFTIRQ1,
6978 + KM_CLEARPAGE,
6979 KM_TYPE_NR
6980 };
6981
6982 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
6983 index 7cfc3ce..cbd1a58 100644
6984 --- a/arch/um/include/asm/page.h
6985 +++ b/arch/um/include/asm/page.h
6986 @@ -14,6 +14,9 @@
6987 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6988 #define PAGE_MASK (~(PAGE_SIZE-1))
6989
6990 +#define ktla_ktva(addr) (addr)
6991 +#define ktva_ktla(addr) (addr)
6992 +
6993 #ifndef __ASSEMBLY__
6994
6995 struct page;
6996 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
6997 index c533835..84db18e 100644
6998 --- a/arch/um/kernel/process.c
6999 +++ b/arch/um/kernel/process.c
7000 @@ -406,22 +406,6 @@ int singlestepping(void * t)
7001 return 2;
7002 }
7003
7004 -/*
7005 - * Only x86 and x86_64 have an arch_align_stack().
7006 - * All other arches have "#define arch_align_stack(x) (x)"
7007 - * in their asm/system.h
7008 - * As this is included in UML from asm-um/system-generic.h,
7009 - * we can use it to behave as the subarch does.
7010 - */
7011 -#ifndef arch_align_stack
7012 -unsigned long arch_align_stack(unsigned long sp)
7013 -{
7014 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7015 - sp -= get_random_int() % 8192;
7016 - return sp & ~0xf;
7017 -}
7018 -#endif
7019 -
7020 unsigned long get_wchan(struct task_struct *p)
7021 {
7022 unsigned long stack_page, sp, ip;
7023 diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
7024 index ad8f795..2c7eec6 100644
7025 --- a/arch/unicore32/include/asm/cache.h
7026 +++ b/arch/unicore32/include/asm/cache.h
7027 @@ -12,8 +12,10 @@
7028 #ifndef __UNICORE_CACHE_H__
7029 #define __UNICORE_CACHE_H__
7030
7031 -#define L1_CACHE_SHIFT (5)
7032 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7033 +#include <linux/const.h>
7034 +
7035 +#define L1_CACHE_SHIFT 5
7036 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7037
7038 /*
7039 * Memory returned by kmalloc() may be used for DMA, so we must make
7040 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7041 index efb4294..61bc18c 100644
7042 --- a/arch/x86/Kconfig
7043 +++ b/arch/x86/Kconfig
7044 @@ -235,7 +235,7 @@ config X86_HT
7045
7046 config X86_32_LAZY_GS
7047 def_bool y
7048 - depends on X86_32 && !CC_STACKPROTECTOR
7049 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7050
7051 config ARCH_HWEIGHT_CFLAGS
7052 string
7053 @@ -1022,7 +1022,7 @@ choice
7054
7055 config NOHIGHMEM
7056 bool "off"
7057 - depends on !X86_NUMAQ
7058 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7059 ---help---
7060 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7061 However, the address space of 32-bit x86 processors is only 4
7062 @@ -1059,7 +1059,7 @@ config NOHIGHMEM
7063
7064 config HIGHMEM4G
7065 bool "4GB"
7066 - depends on !X86_NUMAQ
7067 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7068 ---help---
7069 Select this if you have a 32-bit processor and between 1 and 4
7070 gigabytes of physical RAM.
7071 @@ -1113,7 +1113,7 @@ config PAGE_OFFSET
7072 hex
7073 default 0xB0000000 if VMSPLIT_3G_OPT
7074 default 0x80000000 if VMSPLIT_2G
7075 - default 0x78000000 if VMSPLIT_2G_OPT
7076 + default 0x70000000 if VMSPLIT_2G_OPT
7077 default 0x40000000 if VMSPLIT_1G
7078 default 0xC0000000
7079 depends on X86_32
7080 @@ -1496,6 +1496,7 @@ config SECCOMP
7081
7082 config CC_STACKPROTECTOR
7083 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7084 + depends on X86_64 || !PAX_MEMORY_UDEREF
7085 ---help---
7086 This option turns on the -fstack-protector GCC feature. This
7087 feature puts, at the beginning of functions, a canary value on
7088 @@ -1553,6 +1554,7 @@ config KEXEC_JUMP
7089 config PHYSICAL_START
7090 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
7091 default "0x1000000"
7092 + range 0x400000 0x40000000
7093 ---help---
7094 This gives the physical address where the kernel is loaded.
7095
7096 @@ -1616,6 +1618,7 @@ config X86_NEED_RELOCS
7097 config PHYSICAL_ALIGN
7098 hex "Alignment value to which kernel should be aligned" if X86_32
7099 default "0x1000000"
7100 + range 0x400000 0x1000000 if PAX_KERNEXEC
7101 range 0x2000 0x1000000
7102 ---help---
7103 This value puts the alignment restrictions on physical address
7104 @@ -1647,9 +1650,10 @@ config HOTPLUG_CPU
7105 Say N if you want to disable CPU hotplug.
7106
7107 config COMPAT_VDSO
7108 - def_bool y
7109 + def_bool n
7110 prompt "Compat VDSO support"
7111 depends on X86_32 || IA32_EMULATION
7112 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7113 ---help---
7114 Map the 32-bit VDSO to the predictable old-style address too.
7115
7116 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7117 index e3ca7e0..b30b28a 100644
7118 --- a/arch/x86/Kconfig.cpu
7119 +++ b/arch/x86/Kconfig.cpu
7120 @@ -341,7 +341,7 @@ config X86_PPRO_FENCE
7121
7122 config X86_F00F_BUG
7123 def_bool y
7124 - depends on M586MMX || M586TSC || M586 || M486 || M386
7125 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7126
7127 config X86_INVD_BUG
7128 def_bool y
7129 @@ -365,7 +365,7 @@ config X86_POPAD_OK
7130
7131 config X86_ALIGNMENT_16
7132 def_bool y
7133 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7134 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7135
7136 config X86_INTEL_USERCOPY
7137 def_bool y
7138 @@ -411,7 +411,7 @@ config X86_CMPXCHG64
7139 # generates cmov.
7140 config X86_CMOV
7141 def_bool y
7142 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7143 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7144
7145 config X86_MINIMUM_CPU_FAMILY
7146 int
7147 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7148 index bf56e17..05f9891 100644
7149 --- a/arch/x86/Kconfig.debug
7150 +++ b/arch/x86/Kconfig.debug
7151 @@ -81,7 +81,7 @@ config X86_PTDUMP
7152 config DEBUG_RODATA
7153 bool "Write protect kernel read-only data structures"
7154 default y
7155 - depends on DEBUG_KERNEL
7156 + depends on DEBUG_KERNEL && BROKEN
7157 ---help---
7158 Mark the kernel read-only data as write-protected in the pagetables,
7159 in order to catch accidental (and incorrect) writes to such const
7160 @@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
7161
7162 config DEBUG_SET_MODULE_RONX
7163 bool "Set loadable kernel module data as NX and text as RO"
7164 - depends on MODULES
7165 + depends on MODULES && BROKEN
7166 ---help---
7167 This option helps catch unintended modifications to loadable
7168 kernel module's text and read-only data. It also prevents execution
7169 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7170 index b02e509..2631e48 100644
7171 --- a/arch/x86/Makefile
7172 +++ b/arch/x86/Makefile
7173 @@ -46,6 +46,7 @@ else
7174 UTS_MACHINE := x86_64
7175 CHECKFLAGS += -D__x86_64__ -m64
7176
7177 + biarch := $(call cc-option,-m64)
7178 KBUILD_AFLAGS += -m64
7179 KBUILD_CFLAGS += -m64
7180
7181 @@ -195,3 +196,12 @@ define archhelp
7182 echo ' FDARGS="..." arguments for the booted kernel'
7183 echo ' FDINITRD=file initrd for the booted kernel'
7184 endef
7185 +
7186 +define OLD_LD
7187 +
7188 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7189 +*** Please upgrade your binutils to 2.18 or newer
7190 +endef
7191 +
7192 +archprepare:
7193 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7194 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7195 index 95365a8..52f857b 100644
7196 --- a/arch/x86/boot/Makefile
7197 +++ b/arch/x86/boot/Makefile
7198 @@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7199 $(call cc-option, -fno-stack-protector) \
7200 $(call cc-option, -mpreferred-stack-boundary=2)
7201 KBUILD_CFLAGS += $(call cc-option, -m32)
7202 +ifdef CONSTIFY_PLUGIN
7203 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7204 +endif
7205 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7206 GCOV_PROFILE := n
7207
7208 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7209 index 878e4b9..20537ab 100644
7210 --- a/arch/x86/boot/bitops.h
7211 +++ b/arch/x86/boot/bitops.h
7212 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7213 u8 v;
7214 const u32 *p = (const u32 *)addr;
7215
7216 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7217 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7218 return v;
7219 }
7220
7221 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7222
7223 static inline void set_bit(int nr, void *addr)
7224 {
7225 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7226 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7227 }
7228
7229 #endif /* BOOT_BITOPS_H */
7230 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7231 index c7093bd..d4247ffe0 100644
7232 --- a/arch/x86/boot/boot.h
7233 +++ b/arch/x86/boot/boot.h
7234 @@ -85,7 +85,7 @@ static inline void io_delay(void)
7235 static inline u16 ds(void)
7236 {
7237 u16 seg;
7238 - asm("movw %%ds,%0" : "=rm" (seg));
7239 + asm volatile("movw %%ds,%0" : "=rm" (seg));
7240 return seg;
7241 }
7242
7243 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7244 static inline int memcmp(const void *s1, const void *s2, size_t len)
7245 {
7246 u8 diff;
7247 - asm("repe; cmpsb; setnz %0"
7248 + asm volatile("repe; cmpsb; setnz %0"
7249 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7250 return diff;
7251 }
7252 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7253 index 09664ef..edc5d03 100644
7254 --- a/arch/x86/boot/compressed/Makefile
7255 +++ b/arch/x86/boot/compressed/Makefile
7256 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7257 KBUILD_CFLAGS += $(cflags-y)
7258 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7259 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7260 +ifdef CONSTIFY_PLUGIN
7261 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7262 +endif
7263
7264 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7265 GCOV_PROFILE := n
7266 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7267 index 67a655a..b924059 100644
7268 --- a/arch/x86/boot/compressed/head_32.S
7269 +++ b/arch/x86/boot/compressed/head_32.S
7270 @@ -76,7 +76,7 @@ ENTRY(startup_32)
7271 notl %eax
7272 andl %eax, %ebx
7273 #else
7274 - movl $LOAD_PHYSICAL_ADDR, %ebx
7275 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7276 #endif
7277
7278 /* Target address to relocate to for decompression */
7279 @@ -162,7 +162,7 @@ relocated:
7280 * and where it was actually loaded.
7281 */
7282 movl %ebp, %ebx
7283 - subl $LOAD_PHYSICAL_ADDR, %ebx
7284 + subl $____LOAD_PHYSICAL_ADDR, %ebx
7285 jz 2f /* Nothing to be done if loaded at compiled addr. */
7286 /*
7287 * Process relocations.
7288 @@ -170,8 +170,7 @@ relocated:
7289
7290 1: subl $4, %edi
7291 movl (%edi), %ecx
7292 - testl %ecx, %ecx
7293 - jz 2f
7294 + jecxz 2f
7295 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7296 jmp 1b
7297 2:
7298 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7299 index 35af09d..99c9676 100644
7300 --- a/arch/x86/boot/compressed/head_64.S
7301 +++ b/arch/x86/boot/compressed/head_64.S
7302 @@ -91,7 +91,7 @@ ENTRY(startup_32)
7303 notl %eax
7304 andl %eax, %ebx
7305 #else
7306 - movl $LOAD_PHYSICAL_ADDR, %ebx
7307 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7308 #endif
7309
7310 /* Target address to relocate to for decompression */
7311 @@ -233,7 +233,7 @@ ENTRY(startup_64)
7312 notq %rax
7313 andq %rax, %rbp
7314 #else
7315 - movq $LOAD_PHYSICAL_ADDR, %rbp
7316 + movq $____LOAD_PHYSICAL_ADDR, %rbp
7317 #endif
7318
7319 /* Target address to relocate to for decompression */
7320 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7321 index 3a19d04..7c1d55a 100644
7322 --- a/arch/x86/boot/compressed/misc.c
7323 +++ b/arch/x86/boot/compressed/misc.c
7324 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
7325 case PT_LOAD:
7326 #ifdef CONFIG_RELOCATABLE
7327 dest = output;
7328 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7329 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7330 #else
7331 dest = (void *)(phdr->p_paddr);
7332 #endif
7333 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7334 error("Destination address too large");
7335 #endif
7336 #ifndef CONFIG_RELOCATABLE
7337 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7338 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7339 error("Wrong destination address");
7340 #endif
7341
7342 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7343 index 89bbf4e..869908e 100644
7344 --- a/arch/x86/boot/compressed/relocs.c
7345 +++ b/arch/x86/boot/compressed/relocs.c
7346 @@ -13,8 +13,11 @@
7347
7348 static void die(char *fmt, ...);
7349
7350 +#include "../../../../include/generated/autoconf.h"
7351 +
7352 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7353 static Elf32_Ehdr ehdr;
7354 +static Elf32_Phdr *phdr;
7355 static unsigned long reloc_count, reloc_idx;
7356 static unsigned long *relocs;
7357
7358 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
7359 }
7360 }
7361
7362 +static void read_phdrs(FILE *fp)
7363 +{
7364 + unsigned int i;
7365 +
7366 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7367 + if (!phdr) {
7368 + die("Unable to allocate %d program headers\n",
7369 + ehdr.e_phnum);
7370 + }
7371 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7372 + die("Seek to %d failed: %s\n",
7373 + ehdr.e_phoff, strerror(errno));
7374 + }
7375 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7376 + die("Cannot read ELF program headers: %s\n",
7377 + strerror(errno));
7378 + }
7379 + for(i = 0; i < ehdr.e_phnum; i++) {
7380 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7381 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7382 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7383 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7384 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7385 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7386 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7387 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7388 + }
7389 +
7390 +}
7391 +
7392 static void read_shdrs(FILE *fp)
7393 {
7394 - int i;
7395 + unsigned int i;
7396 Elf32_Shdr shdr;
7397
7398 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7399 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
7400
7401 static void read_strtabs(FILE *fp)
7402 {
7403 - int i;
7404 + unsigned int i;
7405 for (i = 0; i < ehdr.e_shnum; i++) {
7406 struct section *sec = &secs[i];
7407 if (sec->shdr.sh_type != SHT_STRTAB) {
7408 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
7409
7410 static void read_symtabs(FILE *fp)
7411 {
7412 - int i,j;
7413 + unsigned int i,j;
7414 for (i = 0; i < ehdr.e_shnum; i++) {
7415 struct section *sec = &secs[i];
7416 if (sec->shdr.sh_type != SHT_SYMTAB) {
7417 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
7418
7419 static void read_relocs(FILE *fp)
7420 {
7421 - int i,j;
7422 + unsigned int i,j;
7423 + uint32_t base;
7424 +
7425 for (i = 0; i < ehdr.e_shnum; i++) {
7426 struct section *sec = &secs[i];
7427 if (sec->shdr.sh_type != SHT_REL) {
7428 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
7429 die("Cannot read symbol table: %s\n",
7430 strerror(errno));
7431 }
7432 + base = 0;
7433 + for (j = 0; j < ehdr.e_phnum; j++) {
7434 + if (phdr[j].p_type != PT_LOAD )
7435 + continue;
7436 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7437 + continue;
7438 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7439 + break;
7440 + }
7441 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7442 Elf32_Rel *rel = &sec->reltab[j];
7443 - rel->r_offset = elf32_to_cpu(rel->r_offset);
7444 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7445 rel->r_info = elf32_to_cpu(rel->r_info);
7446 }
7447 }
7448 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
7449
7450 static void print_absolute_symbols(void)
7451 {
7452 - int i;
7453 + unsigned int i;
7454 printf("Absolute symbols\n");
7455 printf(" Num: Value Size Type Bind Visibility Name\n");
7456 for (i = 0; i < ehdr.e_shnum; i++) {
7457 struct section *sec = &secs[i];
7458 char *sym_strtab;
7459 Elf32_Sym *sh_symtab;
7460 - int j;
7461 + unsigned int j;
7462
7463 if (sec->shdr.sh_type != SHT_SYMTAB) {
7464 continue;
7465 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
7466
7467 static void print_absolute_relocs(void)
7468 {
7469 - int i, printed = 0;
7470 + unsigned int i, printed = 0;
7471
7472 for (i = 0; i < ehdr.e_shnum; i++) {
7473 struct section *sec = &secs[i];
7474 struct section *sec_applies, *sec_symtab;
7475 char *sym_strtab;
7476 Elf32_Sym *sh_symtab;
7477 - int j;
7478 + unsigned int j;
7479 if (sec->shdr.sh_type != SHT_REL) {
7480 continue;
7481 }
7482 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
7483
7484 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7485 {
7486 - int i;
7487 + unsigned int i;
7488 /* Walk through the relocations */
7489 for (i = 0; i < ehdr.e_shnum; i++) {
7490 char *sym_strtab;
7491 Elf32_Sym *sh_symtab;
7492 struct section *sec_applies, *sec_symtab;
7493 - int j;
7494 + unsigned int j;
7495 struct section *sec = &secs[i];
7496
7497 if (sec->shdr.sh_type != SHT_REL) {
7498 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7499 !is_rel_reloc(sym_name(sym_strtab, sym))) {
7500 continue;
7501 }
7502 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
7503 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
7504 + continue;
7505 +
7506 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
7507 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
7508 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
7509 + continue;
7510 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
7511 + continue;
7512 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
7513 + continue;
7514 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
7515 + continue;
7516 +#endif
7517 +
7518 switch (r_type) {
7519 case R_386_NONE:
7520 case R_386_PC32:
7521 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
7522
7523 static void emit_relocs(int as_text)
7524 {
7525 - int i;
7526 + unsigned int i;
7527 /* Count how many relocations I have and allocate space for them. */
7528 reloc_count = 0;
7529 walk_relocs(count_reloc);
7530 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
7531 fname, strerror(errno));
7532 }
7533 read_ehdr(fp);
7534 + read_phdrs(fp);
7535 read_shdrs(fp);
7536 read_strtabs(fp);
7537 read_symtabs(fp);
7538 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7539 index 4d3ff03..e4972ff 100644
7540 --- a/arch/x86/boot/cpucheck.c
7541 +++ b/arch/x86/boot/cpucheck.c
7542 @@ -74,7 +74,7 @@ static int has_fpu(void)
7543 u16 fcw = -1, fsw = -1;
7544 u32 cr0;
7545
7546 - asm("movl %%cr0,%0" : "=r" (cr0));
7547 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
7548 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7549 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7550 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7551 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7552 {
7553 u32 f0, f1;
7554
7555 - asm("pushfl ; "
7556 + asm volatile("pushfl ; "
7557 "pushfl ; "
7558 "popl %0 ; "
7559 "movl %0,%1 ; "
7560 @@ -115,7 +115,7 @@ static void get_flags(void)
7561 set_bit(X86_FEATURE_FPU, cpu.flags);
7562
7563 if (has_eflag(X86_EFLAGS_ID)) {
7564 - asm("cpuid"
7565 + asm volatile("cpuid"
7566 : "=a" (max_intel_level),
7567 "=b" (cpu_vendor[0]),
7568 "=d" (cpu_vendor[1]),
7569 @@ -124,7 +124,7 @@ static void get_flags(void)
7570
7571 if (max_intel_level >= 0x00000001 &&
7572 max_intel_level <= 0x0000ffff) {
7573 - asm("cpuid"
7574 + asm volatile("cpuid"
7575 : "=a" (tfms),
7576 "=c" (cpu.flags[4]),
7577 "=d" (cpu.flags[0])
7578 @@ -136,7 +136,7 @@ static void get_flags(void)
7579 cpu.model += ((tfms >> 16) & 0xf) << 4;
7580 }
7581
7582 - asm("cpuid"
7583 + asm volatile("cpuid"
7584 : "=a" (max_amd_level)
7585 : "a" (0x80000000)
7586 : "ebx", "ecx", "edx");
7587 @@ -144,7 +144,7 @@ static void get_flags(void)
7588 if (max_amd_level >= 0x80000001 &&
7589 max_amd_level <= 0x8000ffff) {
7590 u32 eax = 0x80000001;
7591 - asm("cpuid"
7592 + asm volatile("cpuid"
7593 : "+a" (eax),
7594 "=c" (cpu.flags[6]),
7595 "=d" (cpu.flags[1])
7596 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7597 u32 ecx = MSR_K7_HWCR;
7598 u32 eax, edx;
7599
7600 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7601 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7602 eax &= ~(1 << 15);
7603 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7604 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7605
7606 get_flags(); /* Make sure it really did something */
7607 err = check_flags();
7608 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7609 u32 ecx = MSR_VIA_FCR;
7610 u32 eax, edx;
7611
7612 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7613 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7614 eax |= (1<<1)|(1<<7);
7615 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7616 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7617
7618 set_bit(X86_FEATURE_CX8, cpu.flags);
7619 err = check_flags();
7620 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7621 u32 eax, edx;
7622 u32 level = 1;
7623
7624 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7625 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7626 - asm("cpuid"
7627 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7628 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7629 + asm volatile("cpuid"
7630 : "+a" (level), "=d" (cpu.flags[0])
7631 : : "ecx", "ebx");
7632 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7633 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7634
7635 err = check_flags();
7636 }
7637 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7638 index bdb4d45..0476680 100644
7639 --- a/arch/x86/boot/header.S
7640 +++ b/arch/x86/boot/header.S
7641 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7642 # single linked list of
7643 # struct setup_data
7644
7645 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7646 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7647
7648 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7649 #define VO_INIT_SIZE (VO__end - VO__text)
7650 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7651 index db75d07..8e6d0af 100644
7652 --- a/arch/x86/boot/memory.c
7653 +++ b/arch/x86/boot/memory.c
7654 @@ -19,7 +19,7 @@
7655
7656 static int detect_memory_e820(void)
7657 {
7658 - int count = 0;
7659 + unsigned int count = 0;
7660 struct biosregs ireg, oreg;
7661 struct e820entry *desc = boot_params.e820_map;
7662 static struct e820entry buf; /* static so it is zeroed */
7663 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7664 index 11e8c6e..fdbb1ed 100644
7665 --- a/arch/x86/boot/video-vesa.c
7666 +++ b/arch/x86/boot/video-vesa.c
7667 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7668
7669 boot_params.screen_info.vesapm_seg = oreg.es;
7670 boot_params.screen_info.vesapm_off = oreg.di;
7671 + boot_params.screen_info.vesapm_size = oreg.cx;
7672 }
7673
7674 /*
7675 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
7676 index 43eda28..5ab5fdb 100644
7677 --- a/arch/x86/boot/video.c
7678 +++ b/arch/x86/boot/video.c
7679 @@ -96,7 +96,7 @@ static void store_mode_params(void)
7680 static unsigned int get_entry(void)
7681 {
7682 char entry_buf[4];
7683 - int i, len = 0;
7684 + unsigned int i, len = 0;
7685 int key;
7686 unsigned int v;
7687
7688 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
7689 index 5b577d5..3c1fed4 100644
7690 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
7691 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
7692 @@ -8,6 +8,8 @@
7693 * including this sentence is retained in full.
7694 */
7695
7696 +#include <asm/alternative-asm.h>
7697 +
7698 .extern crypto_ft_tab
7699 .extern crypto_it_tab
7700 .extern crypto_fl_tab
7701 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
7702 je B192; \
7703 leaq 32(r9),r9;
7704
7705 +#define ret pax_force_retaddr 0, 1; ret
7706 +
7707 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
7708 movq r1,r2; \
7709 movq r3,r4; \
7710 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
7711 index be6d9e3..21fbbca 100644
7712 --- a/arch/x86/crypto/aesni-intel_asm.S
7713 +++ b/arch/x86/crypto/aesni-intel_asm.S
7714 @@ -31,6 +31,7 @@
7715
7716 #include <linux/linkage.h>
7717 #include <asm/inst.h>
7718 +#include <asm/alternative-asm.h>
7719
7720 #ifdef __x86_64__
7721 .data
7722 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
7723 pop %r14
7724 pop %r13
7725 pop %r12
7726 + pax_force_retaddr 0, 1
7727 ret
7728 +ENDPROC(aesni_gcm_dec)
7729
7730
7731 /*****************************************************************************
7732 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
7733 pop %r14
7734 pop %r13
7735 pop %r12
7736 + pax_force_retaddr 0, 1
7737 ret
7738 +ENDPROC(aesni_gcm_enc)
7739
7740 #endif
7741
7742 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
7743 pxor %xmm1, %xmm0
7744 movaps %xmm0, (TKEYP)
7745 add $0x10, TKEYP
7746 + pax_force_retaddr_bts
7747 ret
7748
7749 .align 4
7750 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
7751 shufps $0b01001110, %xmm2, %xmm1
7752 movaps %xmm1, 0x10(TKEYP)
7753 add $0x20, TKEYP
7754 + pax_force_retaddr_bts
7755 ret
7756
7757 .align 4
7758 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
7759
7760 movaps %xmm0, (TKEYP)
7761 add $0x10, TKEYP
7762 + pax_force_retaddr_bts
7763 ret
7764
7765 .align 4
7766 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
7767 pxor %xmm1, %xmm2
7768 movaps %xmm2, (TKEYP)
7769 add $0x10, TKEYP
7770 + pax_force_retaddr_bts
7771 ret
7772
7773 /*
7774 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
7775 #ifndef __x86_64__
7776 popl KEYP
7777 #endif
7778 + pax_force_retaddr 0, 1
7779 ret
7780 +ENDPROC(aesni_set_key)
7781
7782 /*
7783 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
7784 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
7785 popl KLEN
7786 popl KEYP
7787 #endif
7788 + pax_force_retaddr 0, 1
7789 ret
7790 +ENDPROC(aesni_enc)
7791
7792 /*
7793 * _aesni_enc1: internal ABI
7794 @@ -1959,6 +1972,7 @@ _aesni_enc1:
7795 AESENC KEY STATE
7796 movaps 0x70(TKEYP), KEY
7797 AESENCLAST KEY STATE
7798 + pax_force_retaddr_bts
7799 ret
7800
7801 /*
7802 @@ -2067,6 +2081,7 @@ _aesni_enc4:
7803 AESENCLAST KEY STATE2
7804 AESENCLAST KEY STATE3
7805 AESENCLAST KEY STATE4
7806 + pax_force_retaddr_bts
7807 ret
7808
7809 /*
7810 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
7811 popl KLEN
7812 popl KEYP
7813 #endif
7814 + pax_force_retaddr 0, 1
7815 ret
7816 +ENDPROC(aesni_dec)
7817
7818 /*
7819 * _aesni_dec1: internal ABI
7820 @@ -2146,6 +2163,7 @@ _aesni_dec1:
7821 AESDEC KEY STATE
7822 movaps 0x70(TKEYP), KEY
7823 AESDECLAST KEY STATE
7824 + pax_force_retaddr_bts
7825 ret
7826
7827 /*
7828 @@ -2254,6 +2272,7 @@ _aesni_dec4:
7829 AESDECLAST KEY STATE2
7830 AESDECLAST KEY STATE3
7831 AESDECLAST KEY STATE4
7832 + pax_force_retaddr_bts
7833 ret
7834
7835 /*
7836 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
7837 popl KEYP
7838 popl LEN
7839 #endif
7840 + pax_force_retaddr 0, 1
7841 ret
7842 +ENDPROC(aesni_ecb_enc)
7843
7844 /*
7845 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7846 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
7847 popl KEYP
7848 popl LEN
7849 #endif
7850 + pax_force_retaddr 0, 1
7851 ret
7852 +ENDPROC(aesni_ecb_dec)
7853
7854 /*
7855 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7856 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
7857 popl LEN
7858 popl IVP
7859 #endif
7860 + pax_force_retaddr 0, 1
7861 ret
7862 +ENDPROC(aesni_cbc_enc)
7863
7864 /*
7865 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7866 @@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
7867 popl LEN
7868 popl IVP
7869 #endif
7870 + pax_force_retaddr 0, 1
7871 ret
7872 +ENDPROC(aesni_cbc_dec)
7873
7874 #ifdef __x86_64__
7875 .align 16
7876 @@ -2524,6 +2551,7 @@ _aesni_inc_init:
7877 mov $1, TCTR_LOW
7878 MOVQ_R64_XMM TCTR_LOW INC
7879 MOVQ_R64_XMM CTR TCTR_LOW
7880 + pax_force_retaddr_bts
7881 ret
7882
7883 /*
7884 @@ -2552,6 +2580,7 @@ _aesni_inc:
7885 .Linc_low:
7886 movaps CTR, IV
7887 PSHUFB_XMM BSWAP_MASK IV
7888 + pax_force_retaddr_bts
7889 ret
7890
7891 /*
7892 @@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
7893 .Lctr_enc_ret:
7894 movups IV, (IVP)
7895 .Lctr_enc_just_ret:
7896 + pax_force_retaddr 0, 1
7897 ret
7898 +ENDPROC(aesni_ctr_enc)
7899 #endif
7900 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
7901 index 391d245..67f35c2 100644
7902 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
7903 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
7904 @@ -20,6 +20,8 @@
7905 *
7906 */
7907
7908 +#include <asm/alternative-asm.h>
7909 +
7910 .file "blowfish-x86_64-asm.S"
7911 .text
7912
7913 @@ -151,9 +153,11 @@ __blowfish_enc_blk:
7914 jnz __enc_xor;
7915
7916 write_block();
7917 + pax_force_retaddr 0, 1
7918 ret;
7919 __enc_xor:
7920 xor_block();
7921 + pax_force_retaddr 0, 1
7922 ret;
7923
7924 .align 8
7925 @@ -188,6 +192,7 @@ blowfish_dec_blk:
7926
7927 movq %r11, %rbp;
7928
7929 + pax_force_retaddr 0, 1
7930 ret;
7931
7932 /**********************************************************************
7933 @@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
7934
7935 popq %rbx;
7936 popq %rbp;
7937 + pax_force_retaddr 0, 1
7938 ret;
7939
7940 __enc_xor4:
7941 @@ -349,6 +355,7 @@ __enc_xor4:
7942
7943 popq %rbx;
7944 popq %rbp;
7945 + pax_force_retaddr 0, 1
7946 ret;
7947
7948 .align 8
7949 @@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
7950 popq %rbx;
7951 popq %rbp;
7952
7953 + pax_force_retaddr 0, 1
7954 ret;
7955
7956 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7957 index 6214a9b..1f4fc9a 100644
7958 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
7959 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7960 @@ -1,3 +1,5 @@
7961 +#include <asm/alternative-asm.h>
7962 +
7963 # enter ECRYPT_encrypt_bytes
7964 .text
7965 .p2align 5
7966 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
7967 add %r11,%rsp
7968 mov %rdi,%rax
7969 mov %rsi,%rdx
7970 + pax_force_retaddr 0, 1
7971 ret
7972 # bytesatleast65:
7973 ._bytesatleast65:
7974 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
7975 add %r11,%rsp
7976 mov %rdi,%rax
7977 mov %rsi,%rdx
7978 + pax_force_retaddr
7979 ret
7980 # enter ECRYPT_ivsetup
7981 .text
7982 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
7983 add %r11,%rsp
7984 mov %rdi,%rax
7985 mov %rsi,%rdx
7986 + pax_force_retaddr
7987 ret
7988 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
7989 index b2c2f57..8470cab 100644
7990 --- a/arch/x86/crypto/sha1_ssse3_asm.S
7991 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
7992 @@ -28,6 +28,8 @@
7993 * (at your option) any later version.
7994 */
7995
7996 +#include <asm/alternative-asm.h>
7997 +
7998 #define CTX %rdi // arg1
7999 #define BUF %rsi // arg2
8000 #define CNT %rdx // arg3
8001 @@ -104,6 +106,7 @@
8002 pop %r12
8003 pop %rbp
8004 pop %rbx
8005 + pax_force_retaddr 0, 1
8006 ret
8007
8008 .size \name, .-\name
8009 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8010 index 5b012a2..36d5364 100644
8011 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8012 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8013 @@ -20,6 +20,8 @@
8014 *
8015 */
8016
8017 +#include <asm/alternative-asm.h>
8018 +
8019 .file "twofish-x86_64-asm-3way.S"
8020 .text
8021
8022 @@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
8023 popq %r13;
8024 popq %r14;
8025 popq %r15;
8026 + pax_force_retaddr 0, 1
8027 ret;
8028
8029 __enc_xor3:
8030 @@ -271,6 +274,7 @@ __enc_xor3:
8031 popq %r13;
8032 popq %r14;
8033 popq %r15;
8034 + pax_force_retaddr 0, 1
8035 ret;
8036
8037 .global twofish_dec_blk_3way
8038 @@ -312,5 +316,6 @@ twofish_dec_blk_3way:
8039 popq %r13;
8040 popq %r14;
8041 popq %r15;
8042 + pax_force_retaddr 0, 1
8043 ret;
8044
8045 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8046 index 7bcf3fc..f53832f 100644
8047 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8048 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8049 @@ -21,6 +21,7 @@
8050 .text
8051
8052 #include <asm/asm-offsets.h>
8053 +#include <asm/alternative-asm.h>
8054
8055 #define a_offset 0
8056 #define b_offset 4
8057 @@ -268,6 +269,7 @@ twofish_enc_blk:
8058
8059 popq R1
8060 movq $1,%rax
8061 + pax_force_retaddr 0, 1
8062 ret
8063
8064 twofish_dec_blk:
8065 @@ -319,4 +321,5 @@ twofish_dec_blk:
8066
8067 popq R1
8068 movq $1,%rax
8069 + pax_force_retaddr 0, 1
8070 ret
8071 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8072 index fd84387..887aa7e 100644
8073 --- a/arch/x86/ia32/ia32_aout.c
8074 +++ b/arch/x86/ia32/ia32_aout.c
8075 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8076 unsigned long dump_start, dump_size;
8077 struct user32 dump;
8078
8079 + memset(&dump, 0, sizeof(dump));
8080 +
8081 fs = get_fs();
8082 set_fs(KERNEL_DS);
8083 has_dumped = 1;
8084 @@ -315,6 +317,13 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
8085 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
8086 current->mm->cached_hole_size = 0;
8087
8088 + retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
8089 + if (retval < 0) {
8090 + /* Someone check-me: is this error path enough? */
8091 + send_sig(SIGKILL, current, 0);
8092 + return retval;
8093 + }
8094 +
8095 install_exec_creds(bprm);
8096 current->flags &= ~PF_FORKNOEXEC;
8097
8098 @@ -410,13 +419,6 @@ beyond_if:
8099
8100 set_brk(current->mm->start_brk, current->mm->brk);
8101
8102 - retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
8103 - if (retval < 0) {
8104 - /* Someone check-me: is this error path enough? */
8105 - send_sig(SIGKILL, current, 0);
8106 - return retval;
8107 - }
8108 -
8109 current->mm->start_stack =
8110 (unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
8111 /* start thread */
8112 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8113 index 6557769..ef6ae89 100644
8114 --- a/arch/x86/ia32/ia32_signal.c
8115 +++ b/arch/x86/ia32/ia32_signal.c
8116 @@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8117 }
8118 seg = get_fs();
8119 set_fs(KERNEL_DS);
8120 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8121 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8122 set_fs(seg);
8123 if (ret >= 0 && uoss_ptr) {
8124 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8125 @@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8126 */
8127 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8128 size_t frame_size,
8129 - void **fpstate)
8130 + void __user **fpstate)
8131 {
8132 unsigned long sp;
8133
8134 @@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8135
8136 if (used_math()) {
8137 sp = sp - sig_xstate_ia32_size;
8138 - *fpstate = (struct _fpstate_ia32 *) sp;
8139 + *fpstate = (struct _fpstate_ia32 __user *) sp;
8140 if (save_i387_xstate_ia32(*fpstate) < 0)
8141 return (void __user *) -1L;
8142 }
8143 @@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8144 sp -= frame_size;
8145 /* Align the stack pointer according to the i386 ABI,
8146 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8147 - sp = ((sp + 4) & -16ul) - 4;
8148 + sp = ((sp - 12) & -16ul) - 4;
8149 return (void __user *) sp;
8150 }
8151
8152 @@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8153 * These are actually not used anymore, but left because some
8154 * gdb versions depend on them as a marker.
8155 */
8156 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8157 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8158 } put_user_catch(err);
8159
8160 if (err)
8161 @@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8162 0xb8,
8163 __NR_ia32_rt_sigreturn,
8164 0x80cd,
8165 - 0,
8166 + 0
8167 };
8168
8169 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8170 @@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8171
8172 if (ka->sa.sa_flags & SA_RESTORER)
8173 restorer = ka->sa.sa_restorer;
8174 + else if (current->mm->context.vdso)
8175 + /* Return stub is in 32bit vsyscall page */
8176 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8177 else
8178 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8179 - rt_sigreturn);
8180 + restorer = &frame->retcode;
8181 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8182
8183 /*
8184 * Not actually used anymore, but left because some gdb
8185 * versions need it.
8186 */
8187 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8188 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8189 } put_user_catch(err);
8190
8191 if (err)
8192 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8193 index a6253ec..4ad2120 100644
8194 --- a/arch/x86/ia32/ia32entry.S
8195 +++ b/arch/x86/ia32/ia32entry.S
8196 @@ -13,7 +13,9 @@
8197 #include <asm/thread_info.h>
8198 #include <asm/segment.h>
8199 #include <asm/irqflags.h>
8200 +#include <asm/pgtable.h>
8201 #include <linux/linkage.h>
8202 +#include <asm/alternative-asm.h>
8203
8204 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8205 #include <linux/elf-em.h>
8206 @@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
8207 ENDPROC(native_irq_enable_sysexit)
8208 #endif
8209
8210 + .macro pax_enter_kernel_user
8211 + pax_set_fptr_mask
8212 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8213 + call pax_enter_kernel_user
8214 +#endif
8215 + .endm
8216 +
8217 + .macro pax_exit_kernel_user
8218 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8219 + call pax_exit_kernel_user
8220 +#endif
8221 +#ifdef CONFIG_PAX_RANDKSTACK
8222 + pushq %rax
8223 + pushq %r11
8224 + call pax_randomize_kstack
8225 + popq %r11
8226 + popq %rax
8227 +#endif
8228 + .endm
8229 +
8230 +.macro pax_erase_kstack
8231 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8232 + call pax_erase_kstack
8233 +#endif
8234 +.endm
8235 +
8236 /*
8237 * 32bit SYSENTER instruction entry.
8238 *
8239 @@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target)
8240 CFI_REGISTER rsp,rbp
8241 SWAPGS_UNSAFE_STACK
8242 movq PER_CPU_VAR(kernel_stack), %rsp
8243 - addq $(KERNEL_STACK_OFFSET),%rsp
8244 - /*
8245 - * No need to follow this irqs on/off section: the syscall
8246 - * disabled irqs, here we enable it straight after entry:
8247 - */
8248 - ENABLE_INTERRUPTS(CLBR_NONE)
8249 movl %ebp,%ebp /* zero extension */
8250 pushq_cfi $__USER32_DS
8251 /*CFI_REL_OFFSET ss,0*/
8252 @@ -134,25 +156,39 @@ ENTRY(ia32_sysenter_target)
8253 CFI_REL_OFFSET rsp,0
8254 pushfq_cfi
8255 /*CFI_REL_OFFSET rflags,0*/
8256 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
8257 - CFI_REGISTER rip,r10
8258 + orl $X86_EFLAGS_IF,(%rsp)
8259 + GET_THREAD_INFO(%r11)
8260 + movl TI_sysenter_return(%r11), %r11d
8261 + CFI_REGISTER rip,r11
8262 pushq_cfi $__USER32_CS
8263 /*CFI_REL_OFFSET cs,0*/
8264 movl %eax, %eax
8265 - pushq_cfi %r10
8266 + pushq_cfi %r11
8267 CFI_REL_OFFSET rip,0
8268 pushq_cfi %rax
8269 cld
8270 SAVE_ARGS 0,1,0
8271 + pax_enter_kernel_user
8272 + /*
8273 + * No need to follow this irqs on/off section: the syscall
8274 + * disabled irqs, here we enable it straight after entry:
8275 + */
8276 + ENABLE_INTERRUPTS(CLBR_NONE)
8277 /* no need to do an access_ok check here because rbp has been
8278 32bit zero extended */
8279 +
8280 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8281 + mov $PAX_USER_SHADOW_BASE,%r11
8282 + add %r11,%rbp
8283 +#endif
8284 +
8285 1: movl (%rbp),%ebp
8286 .section __ex_table,"a"
8287 .quad 1b,ia32_badarg
8288 .previous
8289 - GET_THREAD_INFO(%r10)
8290 - orl $TS_COMPAT,TI_status(%r10)
8291 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8292 + GET_THREAD_INFO(%r11)
8293 + orl $TS_COMPAT,TI_status(%r11)
8294 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8295 CFI_REMEMBER_STATE
8296 jnz sysenter_tracesys
8297 cmpq $(IA32_NR_syscalls-1),%rax
8298 @@ -162,13 +198,15 @@ sysenter_do_call:
8299 sysenter_dispatch:
8300 call *ia32_sys_call_table(,%rax,8)
8301 movq %rax,RAX-ARGOFFSET(%rsp)
8302 - GET_THREAD_INFO(%r10)
8303 + GET_THREAD_INFO(%r11)
8304 DISABLE_INTERRUPTS(CLBR_NONE)
8305 TRACE_IRQS_OFF
8306 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8307 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8308 jnz sysexit_audit
8309 sysexit_from_sys_call:
8310 - andl $~TS_COMPAT,TI_status(%r10)
8311 + pax_exit_kernel_user
8312 + pax_erase_kstack
8313 + andl $~TS_COMPAT,TI_status(%r11)
8314 /* clear IF, that popfq doesn't enable interrupts early */
8315 andl $~0x200,EFLAGS-R11(%rsp)
8316 movl RIP-R11(%rsp),%edx /* User %eip */
8317 @@ -194,6 +232,9 @@ sysexit_from_sys_call:
8318 movl %eax,%esi /* 2nd arg: syscall number */
8319 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8320 call audit_syscall_entry
8321 +
8322 + pax_erase_kstack
8323 +
8324 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8325 cmpq $(IA32_NR_syscalls-1),%rax
8326 ja ia32_badsys
8327 @@ -205,7 +246,7 @@ sysexit_from_sys_call:
8328 .endm
8329
8330 .macro auditsys_exit exit
8331 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8332 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8333 jnz ia32_ret_from_sys_call
8334 TRACE_IRQS_ON
8335 sti
8336 @@ -215,12 +256,12 @@ sysexit_from_sys_call:
8337 movzbl %al,%edi /* zero-extend that into %edi */
8338 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
8339 call audit_syscall_exit
8340 - GET_THREAD_INFO(%r10)
8341 + GET_THREAD_INFO(%r11)
8342 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
8343 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8344 cli
8345 TRACE_IRQS_OFF
8346 - testl %edi,TI_flags(%r10)
8347 + testl %edi,TI_flags(%r11)
8348 jz \exit
8349 CLEAR_RREGS -ARGOFFSET
8350 jmp int_with_check
8351 @@ -238,7 +279,7 @@ sysexit_audit:
8352
8353 sysenter_tracesys:
8354 #ifdef CONFIG_AUDITSYSCALL
8355 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8356 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8357 jz sysenter_auditsys
8358 #endif
8359 SAVE_REST
8360 @@ -246,6 +287,9 @@ sysenter_tracesys:
8361 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8362 movq %rsp,%rdi /* &pt_regs -> arg1 */
8363 call syscall_trace_enter
8364 +
8365 + pax_erase_kstack
8366 +
8367 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8368 RESTORE_REST
8369 cmpq $(IA32_NR_syscalls-1),%rax
8370 @@ -277,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
8371 ENTRY(ia32_cstar_target)
8372 CFI_STARTPROC32 simple
8373 CFI_SIGNAL_FRAME
8374 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8375 + CFI_DEF_CFA rsp,0
8376 CFI_REGISTER rip,rcx
8377 /*CFI_REGISTER rflags,r11*/
8378 SWAPGS_UNSAFE_STACK
8379 movl %esp,%r8d
8380 CFI_REGISTER rsp,r8
8381 movq PER_CPU_VAR(kernel_stack),%rsp
8382 + SAVE_ARGS 8*6,0,0
8383 + pax_enter_kernel_user
8384 /*
8385 * No need to follow this irqs on/off section: the syscall
8386 * disabled irqs and here we enable it straight after entry:
8387 */
8388 ENABLE_INTERRUPTS(CLBR_NONE)
8389 - SAVE_ARGS 8,0,0
8390 movl %eax,%eax /* zero extension */
8391 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8392 movq %rcx,RIP-ARGOFFSET(%rsp)
8393 @@ -305,13 +350,19 @@ ENTRY(ia32_cstar_target)
8394 /* no need to do an access_ok check here because r8 has been
8395 32bit zero extended */
8396 /* hardware stack frame is complete now */
8397 +
8398 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8399 + mov $PAX_USER_SHADOW_BASE,%r11
8400 + add %r11,%r8
8401 +#endif
8402 +
8403 1: movl (%r8),%r9d
8404 .section __ex_table,"a"
8405 .quad 1b,ia32_badarg
8406 .previous
8407 - GET_THREAD_INFO(%r10)
8408 - orl $TS_COMPAT,TI_status(%r10)
8409 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8410 + GET_THREAD_INFO(%r11)
8411 + orl $TS_COMPAT,TI_status(%r11)
8412 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8413 CFI_REMEMBER_STATE
8414 jnz cstar_tracesys
8415 cmpq $IA32_NR_syscalls-1,%rax
8416 @@ -321,13 +372,15 @@ cstar_do_call:
8417 cstar_dispatch:
8418 call *ia32_sys_call_table(,%rax,8)
8419 movq %rax,RAX-ARGOFFSET(%rsp)
8420 - GET_THREAD_INFO(%r10)
8421 + GET_THREAD_INFO(%r11)
8422 DISABLE_INTERRUPTS(CLBR_NONE)
8423 TRACE_IRQS_OFF
8424 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8425 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8426 jnz sysretl_audit
8427 sysretl_from_sys_call:
8428 - andl $~TS_COMPAT,TI_status(%r10)
8429 + pax_exit_kernel_user
8430 + pax_erase_kstack
8431 + andl $~TS_COMPAT,TI_status(%r11)
8432 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
8433 movl RIP-ARGOFFSET(%rsp),%ecx
8434 CFI_REGISTER rip,rcx
8435 @@ -355,7 +408,7 @@ sysretl_audit:
8436
8437 cstar_tracesys:
8438 #ifdef CONFIG_AUDITSYSCALL
8439 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8440 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8441 jz cstar_auditsys
8442 #endif
8443 xchgl %r9d,%ebp
8444 @@ -364,6 +417,9 @@ cstar_tracesys:
8445 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8446 movq %rsp,%rdi /* &pt_regs -> arg1 */
8447 call syscall_trace_enter
8448 +
8449 + pax_erase_kstack
8450 +
8451 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8452 RESTORE_REST
8453 xchgl %ebp,%r9d
8454 @@ -409,20 +465,21 @@ ENTRY(ia32_syscall)
8455 CFI_REL_OFFSET rip,RIP-RIP
8456 PARAVIRT_ADJUST_EXCEPTION_FRAME
8457 SWAPGS
8458 - /*
8459 - * No need to follow this irqs on/off section: the syscall
8460 - * disabled irqs and here we enable it straight after entry:
8461 - */
8462 - ENABLE_INTERRUPTS(CLBR_NONE)
8463 movl %eax,%eax
8464 pushq_cfi %rax
8465 cld
8466 /* note the registers are not zero extended to the sf.
8467 this could be a problem. */
8468 SAVE_ARGS 0,1,0
8469 - GET_THREAD_INFO(%r10)
8470 - orl $TS_COMPAT,TI_status(%r10)
8471 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8472 + pax_enter_kernel_user
8473 + /*
8474 + * No need to follow this irqs on/off section: the syscall
8475 + * disabled irqs and here we enable it straight after entry:
8476 + */
8477 + ENABLE_INTERRUPTS(CLBR_NONE)
8478 + GET_THREAD_INFO(%r11)
8479 + orl $TS_COMPAT,TI_status(%r11)
8480 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8481 jnz ia32_tracesys
8482 cmpq $(IA32_NR_syscalls-1),%rax
8483 ja ia32_badsys
8484 @@ -441,6 +498,9 @@ ia32_tracesys:
8485 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8486 movq %rsp,%rdi /* &pt_regs -> arg1 */
8487 call syscall_trace_enter
8488 +
8489 + pax_erase_kstack
8490 +
8491 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8492 RESTORE_REST
8493 cmpq $(IA32_NR_syscalls-1),%rax
8494 @@ -455,6 +515,7 @@ ia32_badsys:
8495
8496 quiet_ni_syscall:
8497 movq $-ENOSYS,%rax
8498 + pax_force_retaddr
8499 ret
8500 CFI_ENDPROC
8501
8502 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8503 index f6f5c53..b358b28 100644
8504 --- a/arch/x86/ia32/sys_ia32.c
8505 +++ b/arch/x86/ia32/sys_ia32.c
8506 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8507 */
8508 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8509 {
8510 - typeof(ubuf->st_uid) uid = 0;
8511 - typeof(ubuf->st_gid) gid = 0;
8512 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
8513 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
8514 SET_UID(uid, stat->uid);
8515 SET_GID(gid, stat->gid);
8516 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8517 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
8518 }
8519 set_fs(KERNEL_DS);
8520 ret = sys_rt_sigprocmask(how,
8521 - set ? (sigset_t __user *)&s : NULL,
8522 - oset ? (sigset_t __user *)&s : NULL,
8523 + set ? (sigset_t __force_user *)&s : NULL,
8524 + oset ? (sigset_t __force_user *)&s : NULL,
8525 sigsetsize);
8526 set_fs(old_fs);
8527 if (ret)
8528 @@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
8529 return alarm_setitimer(seconds);
8530 }
8531
8532 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
8533 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
8534 int options)
8535 {
8536 return compat_sys_wait4(pid, stat_addr, options, NULL);
8537 @@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8538 mm_segment_t old_fs = get_fs();
8539
8540 set_fs(KERNEL_DS);
8541 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8542 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8543 set_fs(old_fs);
8544 if (put_compat_timespec(&t, interval))
8545 return -EFAULT;
8546 @@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8547 mm_segment_t old_fs = get_fs();
8548
8549 set_fs(KERNEL_DS);
8550 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8551 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8552 set_fs(old_fs);
8553 if (!ret) {
8554 switch (_NSIG_WORDS) {
8555 @@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8556 if (copy_siginfo_from_user32(&info, uinfo))
8557 return -EFAULT;
8558 set_fs(KERNEL_DS);
8559 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8560 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8561 set_fs(old_fs);
8562 return ret;
8563 }
8564 @@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8565 return -EFAULT;
8566
8567 set_fs(KERNEL_DS);
8568 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8569 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8570 count);
8571 set_fs(old_fs);
8572
8573 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8574 index 091508b..7692c6f 100644
8575 --- a/arch/x86/include/asm/alternative-asm.h
8576 +++ b/arch/x86/include/asm/alternative-asm.h
8577 @@ -4,10 +4,10 @@
8578
8579 #ifdef CONFIG_SMP
8580 .macro LOCK_PREFIX
8581 -1: lock
8582 +672: lock
8583 .section .smp_locks,"a"
8584 .balign 4
8585 - .long 1b - .
8586 + .long 672b - .
8587 .previous
8588 .endm
8589 #else
8590 @@ -15,6 +15,45 @@
8591 .endm
8592 #endif
8593
8594 +#ifdef KERNEXEC_PLUGIN
8595 + .macro pax_force_retaddr_bts rip=0
8596 + btsq $63,\rip(%rsp)
8597 + .endm
8598 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8599 + .macro pax_force_retaddr rip=0, reload=0
8600 + btsq $63,\rip(%rsp)
8601 + .endm
8602 + .macro pax_force_fptr ptr
8603 + btsq $63,\ptr
8604 + .endm
8605 + .macro pax_set_fptr_mask
8606 + .endm
8607 +#endif
8608 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8609 + .macro pax_force_retaddr rip=0, reload=0
8610 + .if \reload
8611 + pax_set_fptr_mask
8612 + .endif
8613 + orq %r10,\rip(%rsp)
8614 + .endm
8615 + .macro pax_force_fptr ptr
8616 + orq %r10,\ptr
8617 + .endm
8618 + .macro pax_set_fptr_mask
8619 + movabs $0x8000000000000000,%r10
8620 + .endm
8621 +#endif
8622 +#else
8623 + .macro pax_force_retaddr rip=0, reload=0
8624 + .endm
8625 + .macro pax_force_fptr ptr
8626 + .endm
8627 + .macro pax_force_retaddr_bts rip=0
8628 + .endm
8629 + .macro pax_set_fptr_mask
8630 + .endm
8631 +#endif
8632 +
8633 .macro altinstruction_entry orig alt feature orig_len alt_len
8634 .long \orig - .
8635 .long \alt - .
8636 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8637 index 37ad100..7d47faa 100644
8638 --- a/arch/x86/include/asm/alternative.h
8639 +++ b/arch/x86/include/asm/alternative.h
8640 @@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
8641 ".section .discard,\"aw\",@progbits\n" \
8642 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
8643 ".previous\n" \
8644 - ".section .altinstr_replacement, \"ax\"\n" \
8645 + ".section .altinstr_replacement, \"a\"\n" \
8646 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
8647 ".previous"
8648
8649 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
8650 index 1a6c09a..fec2432 100644
8651 --- a/arch/x86/include/asm/apic.h
8652 +++ b/arch/x86/include/asm/apic.h
8653 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
8654
8655 #ifdef CONFIG_X86_LOCAL_APIC
8656
8657 -extern unsigned int apic_verbosity;
8658 +extern int apic_verbosity;
8659 extern int local_apic_timer_c2_ok;
8660
8661 extern int disable_apic;
8662 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
8663 index 20370c6..a2eb9b0 100644
8664 --- a/arch/x86/include/asm/apm.h
8665 +++ b/arch/x86/include/asm/apm.h
8666 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
8667 __asm__ __volatile__(APM_DO_ZERO_SEGS
8668 "pushl %%edi\n\t"
8669 "pushl %%ebp\n\t"
8670 - "lcall *%%cs:apm_bios_entry\n\t"
8671 + "lcall *%%ss:apm_bios_entry\n\t"
8672 "setc %%al\n\t"
8673 "popl %%ebp\n\t"
8674 "popl %%edi\n\t"
8675 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
8676 __asm__ __volatile__(APM_DO_ZERO_SEGS
8677 "pushl %%edi\n\t"
8678 "pushl %%ebp\n\t"
8679 - "lcall *%%cs:apm_bios_entry\n\t"
8680 + "lcall *%%ss:apm_bios_entry\n\t"
8681 "setc %%bl\n\t"
8682 "popl %%ebp\n\t"
8683 "popl %%edi\n\t"
8684 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
8685 index 58cb6d4..ca9010d 100644
8686 --- a/arch/x86/include/asm/atomic.h
8687 +++ b/arch/x86/include/asm/atomic.h
8688 @@ -22,7 +22,18 @@
8689 */
8690 static inline int atomic_read(const atomic_t *v)
8691 {
8692 - return (*(volatile int *)&(v)->counter);
8693 + return (*(volatile const int *)&(v)->counter);
8694 +}
8695 +
8696 +/**
8697 + * atomic_read_unchecked - read atomic variable
8698 + * @v: pointer of type atomic_unchecked_t
8699 + *
8700 + * Atomically reads the value of @v.
8701 + */
8702 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8703 +{
8704 + return (*(volatile const int *)&(v)->counter);
8705 }
8706
8707 /**
8708 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
8709 }
8710
8711 /**
8712 + * atomic_set_unchecked - set atomic variable
8713 + * @v: pointer of type atomic_unchecked_t
8714 + * @i: required value
8715 + *
8716 + * Atomically sets the value of @v to @i.
8717 + */
8718 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8719 +{
8720 + v->counter = i;
8721 +}
8722 +
8723 +/**
8724 * atomic_add - add integer to atomic variable
8725 * @i: integer value to add
8726 * @v: pointer of type atomic_t
8727 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
8728 */
8729 static inline void atomic_add(int i, atomic_t *v)
8730 {
8731 - asm volatile(LOCK_PREFIX "addl %1,%0"
8732 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8733 +
8734 +#ifdef CONFIG_PAX_REFCOUNT
8735 + "jno 0f\n"
8736 + LOCK_PREFIX "subl %1,%0\n"
8737 + "int $4\n0:\n"
8738 + _ASM_EXTABLE(0b, 0b)
8739 +#endif
8740 +
8741 + : "+m" (v->counter)
8742 + : "ir" (i));
8743 +}
8744 +
8745 +/**
8746 + * atomic_add_unchecked - add integer to atomic variable
8747 + * @i: integer value to add
8748 + * @v: pointer of type atomic_unchecked_t
8749 + *
8750 + * Atomically adds @i to @v.
8751 + */
8752 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8753 +{
8754 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8755 : "+m" (v->counter)
8756 : "ir" (i));
8757 }
8758 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
8759 */
8760 static inline void atomic_sub(int i, atomic_t *v)
8761 {
8762 - asm volatile(LOCK_PREFIX "subl %1,%0"
8763 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8764 +
8765 +#ifdef CONFIG_PAX_REFCOUNT
8766 + "jno 0f\n"
8767 + LOCK_PREFIX "addl %1,%0\n"
8768 + "int $4\n0:\n"
8769 + _ASM_EXTABLE(0b, 0b)
8770 +#endif
8771 +
8772 + : "+m" (v->counter)
8773 + : "ir" (i));
8774 +}
8775 +
8776 +/**
8777 + * atomic_sub_unchecked - subtract integer from atomic variable
8778 + * @i: integer value to subtract
8779 + * @v: pointer of type atomic_unchecked_t
8780 + *
8781 + * Atomically subtracts @i from @v.
8782 + */
8783 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8784 +{
8785 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8786 : "+m" (v->counter)
8787 : "ir" (i));
8788 }
8789 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8790 {
8791 unsigned char c;
8792
8793 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8794 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
8795 +
8796 +#ifdef CONFIG_PAX_REFCOUNT
8797 + "jno 0f\n"
8798 + LOCK_PREFIX "addl %2,%0\n"
8799 + "int $4\n0:\n"
8800 + _ASM_EXTABLE(0b, 0b)
8801 +#endif
8802 +
8803 + "sete %1\n"
8804 : "+m" (v->counter), "=qm" (c)
8805 : "ir" (i) : "memory");
8806 return c;
8807 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8808 */
8809 static inline void atomic_inc(atomic_t *v)
8810 {
8811 - asm volatile(LOCK_PREFIX "incl %0"
8812 + asm volatile(LOCK_PREFIX "incl %0\n"
8813 +
8814 +#ifdef CONFIG_PAX_REFCOUNT
8815 + "jno 0f\n"
8816 + LOCK_PREFIX "decl %0\n"
8817 + "int $4\n0:\n"
8818 + _ASM_EXTABLE(0b, 0b)
8819 +#endif
8820 +
8821 + : "+m" (v->counter));
8822 +}
8823 +
8824 +/**
8825 + * atomic_inc_unchecked - increment atomic variable
8826 + * @v: pointer of type atomic_unchecked_t
8827 + *
8828 + * Atomically increments @v by 1.
8829 + */
8830 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8831 +{
8832 + asm volatile(LOCK_PREFIX "incl %0\n"
8833 : "+m" (v->counter));
8834 }
8835
8836 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
8837 */
8838 static inline void atomic_dec(atomic_t *v)
8839 {
8840 - asm volatile(LOCK_PREFIX "decl %0"
8841 + asm volatile(LOCK_PREFIX "decl %0\n"
8842 +
8843 +#ifdef CONFIG_PAX_REFCOUNT
8844 + "jno 0f\n"
8845 + LOCK_PREFIX "incl %0\n"
8846 + "int $4\n0:\n"
8847 + _ASM_EXTABLE(0b, 0b)
8848 +#endif
8849 +
8850 + : "+m" (v->counter));
8851 +}
8852 +
8853 +/**
8854 + * atomic_dec_unchecked - decrement atomic variable
8855 + * @v: pointer of type atomic_unchecked_t
8856 + *
8857 + * Atomically decrements @v by 1.
8858 + */
8859 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8860 +{
8861 + asm volatile(LOCK_PREFIX "decl %0\n"
8862 : "+m" (v->counter));
8863 }
8864
8865 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
8866 {
8867 unsigned char c;
8868
8869 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
8870 + asm volatile(LOCK_PREFIX "decl %0\n"
8871 +
8872 +#ifdef CONFIG_PAX_REFCOUNT
8873 + "jno 0f\n"
8874 + LOCK_PREFIX "incl %0\n"
8875 + "int $4\n0:\n"
8876 + _ASM_EXTABLE(0b, 0b)
8877 +#endif
8878 +
8879 + "sete %1\n"
8880 : "+m" (v->counter), "=qm" (c)
8881 : : "memory");
8882 return c != 0;
8883 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
8884 {
8885 unsigned char c;
8886
8887 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
8888 + asm volatile(LOCK_PREFIX "incl %0\n"
8889 +
8890 +#ifdef CONFIG_PAX_REFCOUNT
8891 + "jno 0f\n"
8892 + LOCK_PREFIX "decl %0\n"
8893 + "int $4\n0:\n"
8894 + _ASM_EXTABLE(0b, 0b)
8895 +#endif
8896 +
8897 + "sete %1\n"
8898 + : "+m" (v->counter), "=qm" (c)
8899 + : : "memory");
8900 + return c != 0;
8901 +}
8902 +
8903 +/**
8904 + * atomic_inc_and_test_unchecked - increment and test
8905 + * @v: pointer of type atomic_unchecked_t
8906 + *
8907 + * Atomically increments @v by 1
8908 + * and returns true if the result is zero, or false for all
8909 + * other cases.
8910 + */
8911 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8912 +{
8913 + unsigned char c;
8914 +
8915 + asm volatile(LOCK_PREFIX "incl %0\n"
8916 + "sete %1\n"
8917 : "+m" (v->counter), "=qm" (c)
8918 : : "memory");
8919 return c != 0;
8920 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
8921 {
8922 unsigned char c;
8923
8924 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
8925 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
8926 +
8927 +#ifdef CONFIG_PAX_REFCOUNT
8928 + "jno 0f\n"
8929 + LOCK_PREFIX "subl %2,%0\n"
8930 + "int $4\n0:\n"
8931 + _ASM_EXTABLE(0b, 0b)
8932 +#endif
8933 +
8934 + "sets %1\n"
8935 : "+m" (v->counter), "=qm" (c)
8936 : "ir" (i) : "memory");
8937 return c;
8938 @@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
8939 goto no_xadd;
8940 #endif
8941 /* Modern 486+ processor */
8942 - return i + xadd(&v->counter, i);
8943 + return i + xadd_check_overflow(&v->counter, i);
8944
8945 #ifdef CONFIG_M386
8946 no_xadd: /* Legacy 386 processor */
8947 @@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
8948 }
8949
8950 /**
8951 + * atomic_add_return_unchecked - add integer and return
8952 + * @i: integer value to add
8953 + * @v: pointer of type atomic_unchecked_t
8954 + *
8955 + * Atomically adds @i to @v and returns @i + @v
8956 + */
8957 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8958 +{
8959 +#ifdef CONFIG_M386
8960 + int __i;
8961 + unsigned long flags;
8962 + if (unlikely(boot_cpu_data.x86 <= 3))
8963 + goto no_xadd;
8964 +#endif
8965 + /* Modern 486+ processor */
8966 + return i + xadd(&v->counter, i);
8967 +
8968 +#ifdef CONFIG_M386
8969 +no_xadd: /* Legacy 386 processor */
8970 + raw_local_irq_save(flags);
8971 + __i = atomic_read_unchecked(v);
8972 + atomic_set_unchecked(v, i + __i);
8973 + raw_local_irq_restore(flags);
8974 + return i + __i;
8975 +#endif
8976 +}
8977 +
8978 +/**
8979 * atomic_sub_return - subtract integer and return
8980 * @v: pointer of type atomic_t
8981 * @i: integer value to subtract
8982 @@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
8983 }
8984
8985 #define atomic_inc_return(v) (atomic_add_return(1, v))
8986 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8987 +{
8988 + return atomic_add_return_unchecked(1, v);
8989 +}
8990 #define atomic_dec_return(v) (atomic_sub_return(1, v))
8991
8992 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8993 @@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8994 return cmpxchg(&v->counter, old, new);
8995 }
8996
8997 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8998 +{
8999 + return cmpxchg(&v->counter, old, new);
9000 +}
9001 +
9002 static inline int atomic_xchg(atomic_t *v, int new)
9003 {
9004 return xchg(&v->counter, new);
9005 }
9006
9007 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9008 +{
9009 + return xchg(&v->counter, new);
9010 +}
9011 +
9012 /**
9013 * __atomic_add_unless - add unless the number is already a given value
9014 * @v: pointer of type atomic_t
9015 @@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
9016 */
9017 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9018 {
9019 - int c, old;
9020 + int c, old, new;
9021 c = atomic_read(v);
9022 for (;;) {
9023 - if (unlikely(c == (u)))
9024 + if (unlikely(c == u))
9025 break;
9026 - old = atomic_cmpxchg((v), c, c + (a));
9027 +
9028 + asm volatile("addl %2,%0\n"
9029 +
9030 +#ifdef CONFIG_PAX_REFCOUNT
9031 + "jno 0f\n"
9032 + "subl %2,%0\n"
9033 + "int $4\n0:\n"
9034 + _ASM_EXTABLE(0b, 0b)
9035 +#endif
9036 +
9037 + : "=r" (new)
9038 + : "0" (c), "ir" (a));
9039 +
9040 + old = atomic_cmpxchg(v, c, new);
9041 if (likely(old == c))
9042 break;
9043 c = old;
9044 @@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9045 return c;
9046 }
9047
9048 +/**
9049 + * atomic_inc_not_zero_hint - increment if not null
9050 + * @v: pointer of type atomic_t
9051 + * @hint: probable value of the atomic before the increment
9052 + *
9053 + * This version of atomic_inc_not_zero() gives a hint of probable
9054 + * value of the atomic. This helps processor to not read the memory
9055 + * before doing the atomic read/modify/write cycle, lowering
9056 + * number of bus transactions on some arches.
9057 + *
9058 + * Returns: 0 if increment was not done, 1 otherwise.
9059 + */
9060 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
9061 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
9062 +{
9063 + int val, c = hint, new;
9064 +
9065 + /* sanity test, should be removed by compiler if hint is a constant */
9066 + if (!hint)
9067 + return __atomic_add_unless(v, 1, 0);
9068 +
9069 + do {
9070 + asm volatile("incl %0\n"
9071 +
9072 +#ifdef CONFIG_PAX_REFCOUNT
9073 + "jno 0f\n"
9074 + "decl %0\n"
9075 + "int $4\n0:\n"
9076 + _ASM_EXTABLE(0b, 0b)
9077 +#endif
9078 +
9079 + : "=r" (new)
9080 + : "0" (c));
9081 +
9082 + val = atomic_cmpxchg(v, c, new);
9083 + if (val == c)
9084 + return 1;
9085 + c = val;
9086 + } while (c);
9087 +
9088 + return 0;
9089 +}
9090
9091 /*
9092 * atomic_dec_if_positive - decrement by 1 if old value positive
9093 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
9094 index 24098aa..1e37723 100644
9095 --- a/arch/x86/include/asm/atomic64_32.h
9096 +++ b/arch/x86/include/asm/atomic64_32.h
9097 @@ -12,6 +12,14 @@ typedef struct {
9098 u64 __aligned(8) counter;
9099 } atomic64_t;
9100
9101 +#ifdef CONFIG_PAX_REFCOUNT
9102 +typedef struct {
9103 + u64 __aligned(8) counter;
9104 +} atomic64_unchecked_t;
9105 +#else
9106 +typedef atomic64_t atomic64_unchecked_t;
9107 +#endif
9108 +
9109 #define ATOMIC64_INIT(val) { (val) }
9110
9111 #ifdef CONFIG_X86_CMPXCHG64
9112 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
9113 }
9114
9115 /**
9116 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
9117 + * @p: pointer to type atomic64_unchecked_t
9118 + * @o: expected value
9119 + * @n: new value
9120 + *
9121 + * Atomically sets @v to @n if it was equal to @o and returns
9122 + * the old value.
9123 + */
9124 +
9125 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
9126 +{
9127 + return cmpxchg64(&v->counter, o, n);
9128 +}
9129 +
9130 +/**
9131 * atomic64_xchg - xchg atomic64 variable
9132 * @v: pointer to type atomic64_t
9133 * @n: value to assign
9134 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
9135 }
9136
9137 /**
9138 + * atomic64_set_unchecked - set atomic64 variable
9139 + * @v: pointer to type atomic64_unchecked_t
9140 + * @n: value to assign
9141 + *
9142 + * Atomically sets the value of @v to @n.
9143 + */
9144 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
9145 +{
9146 + unsigned high = (unsigned)(i >> 32);
9147 + unsigned low = (unsigned)i;
9148 + asm volatile(ATOMIC64_ALTERNATIVE(set)
9149 + : "+b" (low), "+c" (high)
9150 + : "S" (v)
9151 + : "eax", "edx", "memory"
9152 + );
9153 +}
9154 +
9155 +/**
9156 * atomic64_read - read atomic64 variable
9157 * @v: pointer to type atomic64_t
9158 *
9159 @@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v)
9160 }
9161
9162 /**
9163 + * atomic64_read_unchecked - read atomic64 variable
9164 + * @v: pointer to type atomic64_unchecked_t
9165 + *
9166 + * Atomically reads the value of @v and returns it.
9167 + */
9168 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
9169 +{
9170 + long long r;
9171 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
9172 + : "=A" (r), "+c" (v)
9173 + : : "memory"
9174 + );
9175 + return r;
9176 + }
9177 +
9178 +/**
9179 * atomic64_add_return - add and return
9180 * @i: integer value to add
9181 * @v: pointer to type atomic64_t
9182 @@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
9183 return i;
9184 }
9185
9186 +/**
9187 + * atomic64_add_return_unchecked - add and return
9188 + * @i: integer value to add
9189 + * @v: pointer to type atomic64_unchecked_t
9190 + *
9191 + * Atomically adds @i to @v and returns @i + *@v
9192 + */
9193 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
9194 +{
9195 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
9196 + : "+A" (i), "+c" (v)
9197 + : : "memory"
9198 + );
9199 + return i;
9200 +}
9201 +
9202 /*
9203 * Other variants with different arithmetic operators:
9204 */
9205 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
9206 return a;
9207 }
9208
9209 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9210 +{
9211 + long long a;
9212 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
9213 + : "=A" (a)
9214 + : "S" (v)
9215 + : "memory", "ecx"
9216 + );
9217 + return a;
9218 +}
9219 +
9220 static inline long long atomic64_dec_return(atomic64_t *v)
9221 {
9222 long long a;
9223 @@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
9224 }
9225
9226 /**
9227 + * atomic64_add_unchecked - add integer to atomic64 variable
9228 + * @i: integer value to add
9229 + * @v: pointer to type atomic64_unchecked_t
9230 + *
9231 + * Atomically adds @i to @v.
9232 + */
9233 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
9234 +{
9235 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
9236 + : "+A" (i), "+c" (v)
9237 + : : "memory"
9238 + );
9239 + return i;
9240 +}
9241 +
9242 +/**
9243 * atomic64_sub - subtract the atomic64 variable
9244 * @i: integer value to subtract
9245 * @v: pointer to type atomic64_t
9246 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
9247 index 0e1cbfc..5623683 100644
9248 --- a/arch/x86/include/asm/atomic64_64.h
9249 +++ b/arch/x86/include/asm/atomic64_64.h
9250 @@ -18,7 +18,19 @@
9251 */
9252 static inline long atomic64_read(const atomic64_t *v)
9253 {
9254 - return (*(volatile long *)&(v)->counter);
9255 + return (*(volatile const long *)&(v)->counter);
9256 +}
9257 +
9258 +/**
9259 + * atomic64_read_unchecked - read atomic64 variable
9260 + * @v: pointer of type atomic64_unchecked_t
9261 + *
9262 + * Atomically reads the value of @v.
9263 + * Doesn't imply a read memory barrier.
9264 + */
9265 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9266 +{
9267 + return (*(volatile const long *)&(v)->counter);
9268 }
9269
9270 /**
9271 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9272 }
9273
9274 /**
9275 + * atomic64_set_unchecked - set atomic64 variable
9276 + * @v: pointer to type atomic64_unchecked_t
9277 + * @i: required value
9278 + *
9279 + * Atomically sets the value of @v to @i.
9280 + */
9281 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9282 +{
9283 + v->counter = i;
9284 +}
9285 +
9286 +/**
9287 * atomic64_add - add integer to atomic64 variable
9288 * @i: integer value to add
9289 * @v: pointer to type atomic64_t
9290 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9291 */
9292 static inline void atomic64_add(long i, atomic64_t *v)
9293 {
9294 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
9295 +
9296 +#ifdef CONFIG_PAX_REFCOUNT
9297 + "jno 0f\n"
9298 + LOCK_PREFIX "subq %1,%0\n"
9299 + "int $4\n0:\n"
9300 + _ASM_EXTABLE(0b, 0b)
9301 +#endif
9302 +
9303 + : "=m" (v->counter)
9304 + : "er" (i), "m" (v->counter));
9305 +}
9306 +
9307 +/**
9308 + * atomic64_add_unchecked - add integer to atomic64 variable
9309 + * @i: integer value to add
9310 + * @v: pointer to type atomic64_unchecked_t
9311 + *
9312 + * Atomically adds @i to @v.
9313 + */
9314 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9315 +{
9316 asm volatile(LOCK_PREFIX "addq %1,%0"
9317 : "=m" (v->counter)
9318 : "er" (i), "m" (v->counter));
9319 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
9320 */
9321 static inline void atomic64_sub(long i, atomic64_t *v)
9322 {
9323 - asm volatile(LOCK_PREFIX "subq %1,%0"
9324 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9325 +
9326 +#ifdef CONFIG_PAX_REFCOUNT
9327 + "jno 0f\n"
9328 + LOCK_PREFIX "addq %1,%0\n"
9329 + "int $4\n0:\n"
9330 + _ASM_EXTABLE(0b, 0b)
9331 +#endif
9332 +
9333 + : "=m" (v->counter)
9334 + : "er" (i), "m" (v->counter));
9335 +}
9336 +
9337 +/**
9338 + * atomic64_sub_unchecked - subtract the atomic64 variable
9339 + * @i: integer value to subtract
9340 + * @v: pointer to type atomic64_unchecked_t
9341 + *
9342 + * Atomically subtracts @i from @v.
9343 + */
9344 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
9345 +{
9346 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9347 : "=m" (v->counter)
9348 : "er" (i), "m" (v->counter));
9349 }
9350 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9351 {
9352 unsigned char c;
9353
9354 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9355 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
9356 +
9357 +#ifdef CONFIG_PAX_REFCOUNT
9358 + "jno 0f\n"
9359 + LOCK_PREFIX "addq %2,%0\n"
9360 + "int $4\n0:\n"
9361 + _ASM_EXTABLE(0b, 0b)
9362 +#endif
9363 +
9364 + "sete %1\n"
9365 : "=m" (v->counter), "=qm" (c)
9366 : "er" (i), "m" (v->counter) : "memory");
9367 return c;
9368 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9369 */
9370 static inline void atomic64_inc(atomic64_t *v)
9371 {
9372 + asm volatile(LOCK_PREFIX "incq %0\n"
9373 +
9374 +#ifdef CONFIG_PAX_REFCOUNT
9375 + "jno 0f\n"
9376 + LOCK_PREFIX "decq %0\n"
9377 + "int $4\n0:\n"
9378 + _ASM_EXTABLE(0b, 0b)
9379 +#endif
9380 +
9381 + : "=m" (v->counter)
9382 + : "m" (v->counter));
9383 +}
9384 +
9385 +/**
9386 + * atomic64_inc_unchecked - increment atomic64 variable
9387 + * @v: pointer to type atomic64_unchecked_t
9388 + *
9389 + * Atomically increments @v by 1.
9390 + */
9391 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9392 +{
9393 asm volatile(LOCK_PREFIX "incq %0"
9394 : "=m" (v->counter)
9395 : "m" (v->counter));
9396 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
9397 */
9398 static inline void atomic64_dec(atomic64_t *v)
9399 {
9400 - asm volatile(LOCK_PREFIX "decq %0"
9401 + asm volatile(LOCK_PREFIX "decq %0\n"
9402 +
9403 +#ifdef CONFIG_PAX_REFCOUNT
9404 + "jno 0f\n"
9405 + LOCK_PREFIX "incq %0\n"
9406 + "int $4\n0:\n"
9407 + _ASM_EXTABLE(0b, 0b)
9408 +#endif
9409 +
9410 + : "=m" (v->counter)
9411 + : "m" (v->counter));
9412 +}
9413 +
9414 +/**
9415 + * atomic64_dec_unchecked - decrement atomic64 variable
9416 + * @v: pointer to type atomic64_t
9417 + *
9418 + * Atomically decrements @v by 1.
9419 + */
9420 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9421 +{
9422 + asm volatile(LOCK_PREFIX "decq %0\n"
9423 : "=m" (v->counter)
9424 : "m" (v->counter));
9425 }
9426 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9427 {
9428 unsigned char c;
9429
9430 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
9431 + asm volatile(LOCK_PREFIX "decq %0\n"
9432 +
9433 +#ifdef CONFIG_PAX_REFCOUNT
9434 + "jno 0f\n"
9435 + LOCK_PREFIX "incq %0\n"
9436 + "int $4\n0:\n"
9437 + _ASM_EXTABLE(0b, 0b)
9438 +#endif
9439 +
9440 + "sete %1\n"
9441 : "=m" (v->counter), "=qm" (c)
9442 : "m" (v->counter) : "memory");
9443 return c != 0;
9444 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9445 {
9446 unsigned char c;
9447
9448 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
9449 + asm volatile(LOCK_PREFIX "incq %0\n"
9450 +
9451 +#ifdef CONFIG_PAX_REFCOUNT
9452 + "jno 0f\n"
9453 + LOCK_PREFIX "decq %0\n"
9454 + "int $4\n0:\n"
9455 + _ASM_EXTABLE(0b, 0b)
9456 +#endif
9457 +
9458 + "sete %1\n"
9459 : "=m" (v->counter), "=qm" (c)
9460 : "m" (v->counter) : "memory");
9461 return c != 0;
9462 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9463 {
9464 unsigned char c;
9465
9466 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9467 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
9468 +
9469 +#ifdef CONFIG_PAX_REFCOUNT
9470 + "jno 0f\n"
9471 + LOCK_PREFIX "subq %2,%0\n"
9472 + "int $4\n0:\n"
9473 + _ASM_EXTABLE(0b, 0b)
9474 +#endif
9475 +
9476 + "sets %1\n"
9477 : "=m" (v->counter), "=qm" (c)
9478 : "er" (i), "m" (v->counter) : "memory");
9479 return c;
9480 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9481 */
9482 static inline long atomic64_add_return(long i, atomic64_t *v)
9483 {
9484 + return i + xadd_check_overflow(&v->counter, i);
9485 +}
9486 +
9487 +/**
9488 + * atomic64_add_return_unchecked - add and return
9489 + * @i: integer value to add
9490 + * @v: pointer to type atomic64_unchecked_t
9491 + *
9492 + * Atomically adds @i to @v and returns @i + @v
9493 + */
9494 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9495 +{
9496 return i + xadd(&v->counter, i);
9497 }
9498
9499 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9500 }
9501
9502 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9503 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9504 +{
9505 + return atomic64_add_return_unchecked(1, v);
9506 +}
9507 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9508
9509 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9510 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9511 return cmpxchg(&v->counter, old, new);
9512 }
9513
9514 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9515 +{
9516 + return cmpxchg(&v->counter, old, new);
9517 +}
9518 +
9519 static inline long atomic64_xchg(atomic64_t *v, long new)
9520 {
9521 return xchg(&v->counter, new);
9522 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
9523 */
9524 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9525 {
9526 - long c, old;
9527 + long c, old, new;
9528 c = atomic64_read(v);
9529 for (;;) {
9530 - if (unlikely(c == (u)))
9531 + if (unlikely(c == u))
9532 break;
9533 - old = atomic64_cmpxchg((v), c, c + (a));
9534 +
9535 + asm volatile("add %2,%0\n"
9536 +
9537 +#ifdef CONFIG_PAX_REFCOUNT
9538 + "jno 0f\n"
9539 + "sub %2,%0\n"
9540 + "int $4\n0:\n"
9541 + _ASM_EXTABLE(0b, 0b)
9542 +#endif
9543 +
9544 + : "=r" (new)
9545 + : "0" (c), "ir" (a));
9546 +
9547 + old = atomic64_cmpxchg(v, c, new);
9548 if (likely(old == c))
9549 break;
9550 c = old;
9551 }
9552 - return c != (u);
9553 + return c != u;
9554 }
9555
9556 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9557 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9558 index 1775d6e..b65017f 100644
9559 --- a/arch/x86/include/asm/bitops.h
9560 +++ b/arch/x86/include/asm/bitops.h
9561 @@ -38,7 +38,7 @@
9562 * a mask operation on a byte.
9563 */
9564 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9565 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9566 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9567 #define CONST_MASK(nr) (1 << ((nr) & 7))
9568
9569 /**
9570 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9571 index 5e1a2ee..c9f9533 100644
9572 --- a/arch/x86/include/asm/boot.h
9573 +++ b/arch/x86/include/asm/boot.h
9574 @@ -11,10 +11,15 @@
9575 #include <asm/pgtable_types.h>
9576
9577 /* Physical address where kernel should be loaded. */
9578 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9579 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9580 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9581 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9582
9583 +#ifndef __ASSEMBLY__
9584 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
9585 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9586 +#endif
9587 +
9588 /* Minimum kernel alignment, as a power of two */
9589 #ifdef CONFIG_X86_64
9590 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9591 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9592 index 48f99f1..d78ebf9 100644
9593 --- a/arch/x86/include/asm/cache.h
9594 +++ b/arch/x86/include/asm/cache.h
9595 @@ -5,12 +5,13 @@
9596
9597 /* L1 cache line size */
9598 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9599 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9600 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9601
9602 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9603 +#define __read_only __attribute__((__section__(".data..read_only")))
9604
9605 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
9606 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
9607 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
9608
9609 #ifdef CONFIG_X86_VSMP
9610 #ifdef CONFIG_SMP
9611 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9612 index 4e12668..501d239 100644
9613 --- a/arch/x86/include/asm/cacheflush.h
9614 +++ b/arch/x86/include/asm/cacheflush.h
9615 @@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
9616 unsigned long pg_flags = pg->flags & _PGMT_MASK;
9617
9618 if (pg_flags == _PGMT_DEFAULT)
9619 - return -1;
9620 + return ~0UL;
9621 else if (pg_flags == _PGMT_WC)
9622 return _PAGE_CACHE_WC;
9623 else if (pg_flags == _PGMT_UC_MINUS)
9624 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
9625 index 46fc474..b02b0f9 100644
9626 --- a/arch/x86/include/asm/checksum_32.h
9627 +++ b/arch/x86/include/asm/checksum_32.h
9628 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
9629 int len, __wsum sum,
9630 int *src_err_ptr, int *dst_err_ptr);
9631
9632 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
9633 + int len, __wsum sum,
9634 + int *src_err_ptr, int *dst_err_ptr);
9635 +
9636 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
9637 + int len, __wsum sum,
9638 + int *src_err_ptr, int *dst_err_ptr);
9639 +
9640 /*
9641 * Note: when you get a NULL pointer exception here this means someone
9642 * passed in an incorrect kernel address to one of these functions.
9643 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
9644 int *err_ptr)
9645 {
9646 might_sleep();
9647 - return csum_partial_copy_generic((__force void *)src, dst,
9648 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
9649 len, sum, err_ptr, NULL);
9650 }
9651
9652 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
9653 {
9654 might_sleep();
9655 if (access_ok(VERIFY_WRITE, dst, len))
9656 - return csum_partial_copy_generic(src, (__force void *)dst,
9657 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
9658 len, sum, NULL, err_ptr);
9659
9660 if (len)
9661 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
9662 index 5d3acdf..6447a02 100644
9663 --- a/arch/x86/include/asm/cmpxchg.h
9664 +++ b/arch/x86/include/asm/cmpxchg.h
9665 @@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
9666 __compiletime_error("Bad argument size for cmpxchg");
9667 extern void __xadd_wrong_size(void)
9668 __compiletime_error("Bad argument size for xadd");
9669 +extern void __xadd_check_overflow_wrong_size(void)
9670 + __compiletime_error("Bad argument size for xadd_check_overflow");
9671
9672 /*
9673 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
9674 @@ -195,6 +197,34 @@ extern void __xadd_wrong_size(void)
9675 __ret; \
9676 })
9677
9678 +#define __xadd_check_overflow(ptr, inc, lock) \
9679 + ({ \
9680 + __typeof__ (*(ptr)) __ret = (inc); \
9681 + switch (sizeof(*(ptr))) { \
9682 + case __X86_CASE_L: \
9683 + asm volatile (lock "xaddl %0, %1\n" \
9684 + "jno 0f\n" \
9685 + "mov %0,%1\n" \
9686 + "int $4\n0:\n" \
9687 + _ASM_EXTABLE(0b, 0b) \
9688 + : "+r" (__ret), "+m" (*(ptr)) \
9689 + : : "memory", "cc"); \
9690 + break; \
9691 + case __X86_CASE_Q: \
9692 + asm volatile (lock "xaddq %q0, %1\n" \
9693 + "jno 0f\n" \
9694 + "mov %0,%1\n" \
9695 + "int $4\n0:\n" \
9696 + _ASM_EXTABLE(0b, 0b) \
9697 + : "+r" (__ret), "+m" (*(ptr)) \
9698 + : : "memory", "cc"); \
9699 + break; \
9700 + default: \
9701 + __xadd_check_overflow_wrong_size(); \
9702 + } \
9703 + __ret; \
9704 + })
9705 +
9706 /*
9707 * xadd() adds "inc" to "*ptr" and atomically returns the previous
9708 * value of "*ptr".
9709 @@ -207,4 +237,6 @@ extern void __xadd_wrong_size(void)
9710 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
9711 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
9712
9713 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
9714 +
9715 #endif /* ASM_X86_CMPXCHG_H */
9716 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
9717 index f3444f7..051a196 100644
9718 --- a/arch/x86/include/asm/cpufeature.h
9719 +++ b/arch/x86/include/asm/cpufeature.h
9720 @@ -363,7 +363,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
9721 ".section .discard,\"aw\",@progbits\n"
9722 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
9723 ".previous\n"
9724 - ".section .altinstr_replacement,\"ax\"\n"
9725 + ".section .altinstr_replacement,\"a\"\n"
9726 "3: movb $1,%0\n"
9727 "4:\n"
9728 ".previous\n"
9729 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
9730 index 41935fa..3b40db8 100644
9731 --- a/arch/x86/include/asm/desc.h
9732 +++ b/arch/x86/include/asm/desc.h
9733 @@ -4,6 +4,7 @@
9734 #include <asm/desc_defs.h>
9735 #include <asm/ldt.h>
9736 #include <asm/mmu.h>
9737 +#include <asm/pgtable.h>
9738
9739 #include <linux/smp.h>
9740
9741 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
9742
9743 desc->type = (info->read_exec_only ^ 1) << 1;
9744 desc->type |= info->contents << 2;
9745 + desc->type |= info->seg_not_present ^ 1;
9746
9747 desc->s = 1;
9748 desc->dpl = 0x3;
9749 @@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
9750 }
9751
9752 extern struct desc_ptr idt_descr;
9753 -extern gate_desc idt_table[];
9754 -
9755 -struct gdt_page {
9756 - struct desc_struct gdt[GDT_ENTRIES];
9757 -} __attribute__((aligned(PAGE_SIZE)));
9758 -
9759 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
9760 +extern gate_desc idt_table[256];
9761
9762 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
9763 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
9764 {
9765 - return per_cpu(gdt_page, cpu).gdt;
9766 + return cpu_gdt_table[cpu];
9767 }
9768
9769 #ifdef CONFIG_X86_64
9770 @@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
9771 unsigned long base, unsigned dpl, unsigned flags,
9772 unsigned short seg)
9773 {
9774 - gate->a = (seg << 16) | (base & 0xffff);
9775 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
9776 + gate->gate.offset_low = base;
9777 + gate->gate.seg = seg;
9778 + gate->gate.reserved = 0;
9779 + gate->gate.type = type;
9780 + gate->gate.s = 0;
9781 + gate->gate.dpl = dpl;
9782 + gate->gate.p = 1;
9783 + gate->gate.offset_high = base >> 16;
9784 }
9785
9786 #endif
9787 @@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
9788
9789 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
9790 {
9791 + pax_open_kernel();
9792 memcpy(&idt[entry], gate, sizeof(*gate));
9793 + pax_close_kernel();
9794 }
9795
9796 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
9797 {
9798 + pax_open_kernel();
9799 memcpy(&ldt[entry], desc, 8);
9800 + pax_close_kernel();
9801 }
9802
9803 static inline void
9804 @@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
9805 default: size = sizeof(*gdt); break;
9806 }
9807
9808 + pax_open_kernel();
9809 memcpy(&gdt[entry], desc, size);
9810 + pax_close_kernel();
9811 }
9812
9813 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
9814 @@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
9815
9816 static inline void native_load_tr_desc(void)
9817 {
9818 + pax_open_kernel();
9819 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
9820 + pax_close_kernel();
9821 }
9822
9823 static inline void native_load_gdt(const struct desc_ptr *dtr)
9824 @@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
9825 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
9826 unsigned int i;
9827
9828 + pax_open_kernel();
9829 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
9830 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
9831 + pax_close_kernel();
9832 }
9833
9834 #define _LDT_empty(info) \
9835 @@ -307,7 +320,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
9836 desc->limit = (limit >> 16) & 0xf;
9837 }
9838
9839 -static inline void _set_gate(int gate, unsigned type, void *addr,
9840 +static inline void _set_gate(int gate, unsigned type, const void *addr,
9841 unsigned dpl, unsigned ist, unsigned seg)
9842 {
9843 gate_desc s;
9844 @@ -326,7 +339,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
9845 * Pentium F0 0F bugfix can have resulted in the mapped
9846 * IDT being write-protected.
9847 */
9848 -static inline void set_intr_gate(unsigned int n, void *addr)
9849 +static inline void set_intr_gate(unsigned int n, const void *addr)
9850 {
9851 BUG_ON((unsigned)n > 0xFF);
9852 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
9853 @@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
9854 /*
9855 * This routine sets up an interrupt gate at directory privilege level 3.
9856 */
9857 -static inline void set_system_intr_gate(unsigned int n, void *addr)
9858 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
9859 {
9860 BUG_ON((unsigned)n > 0xFF);
9861 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
9862 }
9863
9864 -static inline void set_system_trap_gate(unsigned int n, void *addr)
9865 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
9866 {
9867 BUG_ON((unsigned)n > 0xFF);
9868 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
9869 }
9870
9871 -static inline void set_trap_gate(unsigned int n, void *addr)
9872 +static inline void set_trap_gate(unsigned int n, const void *addr)
9873 {
9874 BUG_ON((unsigned)n > 0xFF);
9875 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
9876 @@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
9877 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
9878 {
9879 BUG_ON((unsigned)n > 0xFF);
9880 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
9881 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
9882 }
9883
9884 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
9885 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
9886 {
9887 BUG_ON((unsigned)n > 0xFF);
9888 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
9889 }
9890
9891 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
9892 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
9893 {
9894 BUG_ON((unsigned)n > 0xFF);
9895 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
9896 }
9897
9898 +#ifdef CONFIG_X86_32
9899 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
9900 +{
9901 + struct desc_struct d;
9902 +
9903 + if (likely(limit))
9904 + limit = (limit - 1UL) >> PAGE_SHIFT;
9905 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
9906 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
9907 +}
9908 +#endif
9909 +
9910 #endif /* _ASM_X86_DESC_H */
9911 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
9912 index 278441f..b95a174 100644
9913 --- a/arch/x86/include/asm/desc_defs.h
9914 +++ b/arch/x86/include/asm/desc_defs.h
9915 @@ -31,6 +31,12 @@ struct desc_struct {
9916 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
9917 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
9918 };
9919 + struct {
9920 + u16 offset_low;
9921 + u16 seg;
9922 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
9923 + unsigned offset_high: 16;
9924 + } gate;
9925 };
9926 } __attribute__((packed));
9927
9928 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
9929 index 908b969..a1f4eb4 100644
9930 --- a/arch/x86/include/asm/e820.h
9931 +++ b/arch/x86/include/asm/e820.h
9932 @@ -69,7 +69,7 @@ struct e820map {
9933 #define ISA_START_ADDRESS 0xa0000
9934 #define ISA_END_ADDRESS 0x100000
9935
9936 -#define BIOS_BEGIN 0x000a0000
9937 +#define BIOS_BEGIN 0x000c0000
9938 #define BIOS_END 0x00100000
9939
9940 #define BIOS_ROM_BASE 0xffe00000
9941 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
9942 index 5f962df..7289f09 100644
9943 --- a/arch/x86/include/asm/elf.h
9944 +++ b/arch/x86/include/asm/elf.h
9945 @@ -238,7 +238,25 @@ extern int force_personality32;
9946 the loader. We need to make sure that it is out of the way of the program
9947 that it will "exec", and that there is sufficient room for the brk. */
9948
9949 +#ifdef CONFIG_PAX_SEGMEXEC
9950 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
9951 +#else
9952 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
9953 +#endif
9954 +
9955 +#ifdef CONFIG_PAX_ASLR
9956 +#ifdef CONFIG_X86_32
9957 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
9958 +
9959 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9960 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9961 +#else
9962 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
9963 +
9964 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9965 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9966 +#endif
9967 +#endif
9968
9969 /* This yields a mask that user programs can use to figure out what
9970 instruction set this CPU supports. This could be done in user space,
9971 @@ -291,9 +309,7 @@ do { \
9972
9973 #define ARCH_DLINFO \
9974 do { \
9975 - if (vdso_enabled) \
9976 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
9977 - (unsigned long)current->mm->context.vdso); \
9978 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
9979 } while (0)
9980
9981 #define AT_SYSINFO 32
9982 @@ -304,7 +320,7 @@ do { \
9983
9984 #endif /* !CONFIG_X86_32 */
9985
9986 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
9987 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
9988
9989 #define VDSO_ENTRY \
9990 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
9991 @@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
9992 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
9993 #define compat_arch_setup_additional_pages syscall32_setup_pages
9994
9995 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9996 -#define arch_randomize_brk arch_randomize_brk
9997 -
9998 /*
9999 * True on X86_32 or when emulating IA32 on X86_64
10000 */
10001 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10002 index cc70c1c..d96d011 100644
10003 --- a/arch/x86/include/asm/emergency-restart.h
10004 +++ b/arch/x86/include/asm/emergency-restart.h
10005 @@ -15,6 +15,6 @@ enum reboot_type {
10006
10007 extern enum reboot_type reboot_type;
10008
10009 -extern void machine_emergency_restart(void);
10010 +extern void machine_emergency_restart(void) __noreturn;
10011
10012 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10013 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10014 index d09bb03..4ea4194 100644
10015 --- a/arch/x86/include/asm/futex.h
10016 +++ b/arch/x86/include/asm/futex.h
10017 @@ -12,16 +12,18 @@
10018 #include <asm/system.h>
10019
10020 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10021 + typecheck(u32 __user *, uaddr); \
10022 asm volatile("1:\t" insn "\n" \
10023 "2:\t.section .fixup,\"ax\"\n" \
10024 "3:\tmov\t%3, %1\n" \
10025 "\tjmp\t2b\n" \
10026 "\t.previous\n" \
10027 _ASM_EXTABLE(1b, 3b) \
10028 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10029 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10030 : "i" (-EFAULT), "0" (oparg), "1" (0))
10031
10032 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10033 + typecheck(u32 __user *, uaddr); \
10034 asm volatile("1:\tmovl %2, %0\n" \
10035 "\tmovl\t%0, %3\n" \
10036 "\t" insn "\n" \
10037 @@ -34,7 +36,7 @@
10038 _ASM_EXTABLE(1b, 4b) \
10039 _ASM_EXTABLE(2b, 4b) \
10040 : "=&a" (oldval), "=&r" (ret), \
10041 - "+m" (*uaddr), "=&r" (tem) \
10042 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10043 : "r" (oparg), "i" (-EFAULT), "1" (0))
10044
10045 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10046 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10047
10048 switch (op) {
10049 case FUTEX_OP_SET:
10050 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10051 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10052 break;
10053 case FUTEX_OP_ADD:
10054 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10055 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10056 uaddr, oparg);
10057 break;
10058 case FUTEX_OP_OR:
10059 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
10060 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10061 return -EFAULT;
10062
10063 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
10064 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
10065 "2:\t.section .fixup, \"ax\"\n"
10066 "3:\tmov %3, %0\n"
10067 "\tjmp 2b\n"
10068 "\t.previous\n"
10069 _ASM_EXTABLE(1b, 3b)
10070 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
10071 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
10072 : "i" (-EFAULT), "r" (newval), "1" (oldval)
10073 : "memory"
10074 );
10075 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10076 index eb92a6e..b98b2f4 100644
10077 --- a/arch/x86/include/asm/hw_irq.h
10078 +++ b/arch/x86/include/asm/hw_irq.h
10079 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
10080 extern void enable_IO_APIC(void);
10081
10082 /* Statistics */
10083 -extern atomic_t irq_err_count;
10084 -extern atomic_t irq_mis_count;
10085 +extern atomic_unchecked_t irq_err_count;
10086 +extern atomic_unchecked_t irq_mis_count;
10087
10088 /* EISA */
10089 extern void eisa_set_level_irq(unsigned int irq);
10090 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10091 index a850b4d..bae26dc 100644
10092 --- a/arch/x86/include/asm/i387.h
10093 +++ b/arch/x86/include/asm/i387.h
10094 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10095 {
10096 int err;
10097
10098 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10099 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10100 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
10101 +#endif
10102 +
10103 /* See comment in fxsave() below. */
10104 #ifdef CONFIG_AS_FXSAVEQ
10105 asm volatile("1: fxrstorq %[fx]\n\t"
10106 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10107 {
10108 int err;
10109
10110 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10111 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10112 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10113 +#endif
10114 +
10115 /*
10116 * Clear the bytes not touched by the fxsave and reserved
10117 * for the SW usage.
10118 @@ -424,7 +434,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
10119 static inline bool interrupted_user_mode(void)
10120 {
10121 struct pt_regs *regs = get_irq_regs();
10122 - return regs && user_mode_vm(regs);
10123 + return regs && user_mode(regs);
10124 }
10125
10126 /*
10127 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
10128 index d8e8eef..99f81ae 100644
10129 --- a/arch/x86/include/asm/io.h
10130 +++ b/arch/x86/include/asm/io.h
10131 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
10132
10133 #include <linux/vmalloc.h>
10134
10135 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10136 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10137 +{
10138 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10139 +}
10140 +
10141 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10142 +{
10143 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10144 +}
10145 +
10146 /*
10147 * Convert a virtual cached pointer to an uncached pointer
10148 */
10149 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10150 index bba3cf8..06bc8da 100644
10151 --- a/arch/x86/include/asm/irqflags.h
10152 +++ b/arch/x86/include/asm/irqflags.h
10153 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
10154 sti; \
10155 sysexit
10156
10157 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
10158 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10159 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
10160 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10161 +
10162 #else
10163 #define INTERRUPT_RETURN iret
10164 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10165 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10166 index 5478825..839e88c 100644
10167 --- a/arch/x86/include/asm/kprobes.h
10168 +++ b/arch/x86/include/asm/kprobes.h
10169 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
10170 #define RELATIVEJUMP_SIZE 5
10171 #define RELATIVECALL_OPCODE 0xe8
10172 #define RELATIVE_ADDR_SIZE 4
10173 -#define MAX_STACK_SIZE 64
10174 -#define MIN_STACK_SIZE(ADDR) \
10175 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10176 - THREAD_SIZE - (unsigned long)(ADDR))) \
10177 - ? (MAX_STACK_SIZE) \
10178 - : (((unsigned long)current_thread_info()) + \
10179 - THREAD_SIZE - (unsigned long)(ADDR)))
10180 +#define MAX_STACK_SIZE 64UL
10181 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10182
10183 #define flush_insn_slot(p) do { } while (0)
10184
10185 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10186 index b4973f4..7c4d3fc 100644
10187 --- a/arch/x86/include/asm/kvm_host.h
10188 +++ b/arch/x86/include/asm/kvm_host.h
10189 @@ -459,7 +459,7 @@ struct kvm_arch {
10190 unsigned int n_requested_mmu_pages;
10191 unsigned int n_max_mmu_pages;
10192 unsigned int indirect_shadow_pages;
10193 - atomic_t invlpg_counter;
10194 + atomic_unchecked_t invlpg_counter;
10195 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
10196 /*
10197 * Hash table of struct kvm_mmu_page.
10198 @@ -638,7 +638,7 @@ struct kvm_x86_ops {
10199 int (*check_intercept)(struct kvm_vcpu *vcpu,
10200 struct x86_instruction_info *info,
10201 enum x86_intercept_stage stage);
10202 -};
10203 +} __do_const;
10204
10205 struct kvm_arch_async_pf {
10206 u32 token;
10207 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10208 index 9cdae5d..300d20f 100644
10209 --- a/arch/x86/include/asm/local.h
10210 +++ b/arch/x86/include/asm/local.h
10211 @@ -18,26 +18,58 @@ typedef struct {
10212
10213 static inline void local_inc(local_t *l)
10214 {
10215 - asm volatile(_ASM_INC "%0"
10216 + asm volatile(_ASM_INC "%0\n"
10217 +
10218 +#ifdef CONFIG_PAX_REFCOUNT
10219 + "jno 0f\n"
10220 + _ASM_DEC "%0\n"
10221 + "int $4\n0:\n"
10222 + _ASM_EXTABLE(0b, 0b)
10223 +#endif
10224 +
10225 : "+m" (l->a.counter));
10226 }
10227
10228 static inline void local_dec(local_t *l)
10229 {
10230 - asm volatile(_ASM_DEC "%0"
10231 + asm volatile(_ASM_DEC "%0\n"
10232 +
10233 +#ifdef CONFIG_PAX_REFCOUNT
10234 + "jno 0f\n"
10235 + _ASM_INC "%0\n"
10236 + "int $4\n0:\n"
10237 + _ASM_EXTABLE(0b, 0b)
10238 +#endif
10239 +
10240 : "+m" (l->a.counter));
10241 }
10242
10243 static inline void local_add(long i, local_t *l)
10244 {
10245 - asm volatile(_ASM_ADD "%1,%0"
10246 + asm volatile(_ASM_ADD "%1,%0\n"
10247 +
10248 +#ifdef CONFIG_PAX_REFCOUNT
10249 + "jno 0f\n"
10250 + _ASM_SUB "%1,%0\n"
10251 + "int $4\n0:\n"
10252 + _ASM_EXTABLE(0b, 0b)
10253 +#endif
10254 +
10255 : "+m" (l->a.counter)
10256 : "ir" (i));
10257 }
10258
10259 static inline void local_sub(long i, local_t *l)
10260 {
10261 - asm volatile(_ASM_SUB "%1,%0"
10262 + asm volatile(_ASM_SUB "%1,%0\n"
10263 +
10264 +#ifdef CONFIG_PAX_REFCOUNT
10265 + "jno 0f\n"
10266 + _ASM_ADD "%1,%0\n"
10267 + "int $4\n0:\n"
10268 + _ASM_EXTABLE(0b, 0b)
10269 +#endif
10270 +
10271 : "+m" (l->a.counter)
10272 : "ir" (i));
10273 }
10274 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10275 {
10276 unsigned char c;
10277
10278 - asm volatile(_ASM_SUB "%2,%0; sete %1"
10279 + asm volatile(_ASM_SUB "%2,%0\n"
10280 +
10281 +#ifdef CONFIG_PAX_REFCOUNT
10282 + "jno 0f\n"
10283 + _ASM_ADD "%2,%0\n"
10284 + "int $4\n0:\n"
10285 + _ASM_EXTABLE(0b, 0b)
10286 +#endif
10287 +
10288 + "sete %1\n"
10289 : "+m" (l->a.counter), "=qm" (c)
10290 : "ir" (i) : "memory");
10291 return c;
10292 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10293 {
10294 unsigned char c;
10295
10296 - asm volatile(_ASM_DEC "%0; sete %1"
10297 + asm volatile(_ASM_DEC "%0\n"
10298 +
10299 +#ifdef CONFIG_PAX_REFCOUNT
10300 + "jno 0f\n"
10301 + _ASM_INC "%0\n"
10302 + "int $4\n0:\n"
10303 + _ASM_EXTABLE(0b, 0b)
10304 +#endif
10305 +
10306 + "sete %1\n"
10307 : "+m" (l->a.counter), "=qm" (c)
10308 : : "memory");
10309 return c != 0;
10310 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10311 {
10312 unsigned char c;
10313
10314 - asm volatile(_ASM_INC "%0; sete %1"
10315 + asm volatile(_ASM_INC "%0\n"
10316 +
10317 +#ifdef CONFIG_PAX_REFCOUNT
10318 + "jno 0f\n"
10319 + _ASM_DEC "%0\n"
10320 + "int $4\n0:\n"
10321 + _ASM_EXTABLE(0b, 0b)
10322 +#endif
10323 +
10324 + "sete %1\n"
10325 : "+m" (l->a.counter), "=qm" (c)
10326 : : "memory");
10327 return c != 0;
10328 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10329 {
10330 unsigned char c;
10331
10332 - asm volatile(_ASM_ADD "%2,%0; sets %1"
10333 + asm volatile(_ASM_ADD "%2,%0\n"
10334 +
10335 +#ifdef CONFIG_PAX_REFCOUNT
10336 + "jno 0f\n"
10337 + _ASM_SUB "%2,%0\n"
10338 + "int $4\n0:\n"
10339 + _ASM_EXTABLE(0b, 0b)
10340 +#endif
10341 +
10342 + "sets %1\n"
10343 : "+m" (l->a.counter), "=qm" (c)
10344 : "ir" (i) : "memory");
10345 return c;
10346 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10347 #endif
10348 /* Modern 486+ processor */
10349 __i = i;
10350 - asm volatile(_ASM_XADD "%0, %1;"
10351 + asm volatile(_ASM_XADD "%0, %1\n"
10352 +
10353 +#ifdef CONFIG_PAX_REFCOUNT
10354 + "jno 0f\n"
10355 + _ASM_MOV "%0,%1\n"
10356 + "int $4\n0:\n"
10357 + _ASM_EXTABLE(0b, 0b)
10358 +#endif
10359 +
10360 : "+r" (i), "+m" (l->a.counter)
10361 : : "memory");
10362 return i + __i;
10363 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10364 index 593e51d..fa69c9a 100644
10365 --- a/arch/x86/include/asm/mman.h
10366 +++ b/arch/x86/include/asm/mman.h
10367 @@ -5,4 +5,14 @@
10368
10369 #include <asm-generic/mman.h>
10370
10371 +#ifdef __KERNEL__
10372 +#ifndef __ASSEMBLY__
10373 +#ifdef CONFIG_X86_32
10374 +#define arch_mmap_check i386_mmap_check
10375 +int i386_mmap_check(unsigned long addr, unsigned long len,
10376 + unsigned long flags);
10377 +#endif
10378 +#endif
10379 +#endif
10380 +
10381 #endif /* _ASM_X86_MMAN_H */
10382 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10383 index 5f55e69..e20bfb1 100644
10384 --- a/arch/x86/include/asm/mmu.h
10385 +++ b/arch/x86/include/asm/mmu.h
10386 @@ -9,7 +9,7 @@
10387 * we put the segment information here.
10388 */
10389 typedef struct {
10390 - void *ldt;
10391 + struct desc_struct *ldt;
10392 int size;
10393
10394 #ifdef CONFIG_X86_64
10395 @@ -18,7 +18,19 @@ typedef struct {
10396 #endif
10397
10398 struct mutex lock;
10399 - void *vdso;
10400 + unsigned long vdso;
10401 +
10402 +#ifdef CONFIG_X86_32
10403 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10404 + unsigned long user_cs_base;
10405 + unsigned long user_cs_limit;
10406 +
10407 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10408 + cpumask_t cpu_user_cs_mask;
10409 +#endif
10410 +
10411 +#endif
10412 +#endif
10413 } mm_context_t;
10414
10415 #ifdef CONFIG_SMP
10416 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10417 index 6902152..399f3a2 100644
10418 --- a/arch/x86/include/asm/mmu_context.h
10419 +++ b/arch/x86/include/asm/mmu_context.h
10420 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10421
10422 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10423 {
10424 +
10425 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10426 + unsigned int i;
10427 + pgd_t *pgd;
10428 +
10429 + pax_open_kernel();
10430 + pgd = get_cpu_pgd(smp_processor_id());
10431 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10432 + set_pgd_batched(pgd+i, native_make_pgd(0));
10433 + pax_close_kernel();
10434 +#endif
10435 +
10436 #ifdef CONFIG_SMP
10437 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10438 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10439 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10440 struct task_struct *tsk)
10441 {
10442 unsigned cpu = smp_processor_id();
10443 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10444 + int tlbstate = TLBSTATE_OK;
10445 +#endif
10446
10447 if (likely(prev != next)) {
10448 #ifdef CONFIG_SMP
10449 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10450 + tlbstate = percpu_read(cpu_tlbstate.state);
10451 +#endif
10452 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10453 percpu_write(cpu_tlbstate.active_mm, next);
10454 #endif
10455 cpumask_set_cpu(cpu, mm_cpumask(next));
10456
10457 /* Re-load page tables */
10458 +#ifdef CONFIG_PAX_PER_CPU_PGD
10459 + pax_open_kernel();
10460 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10461 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10462 + pax_close_kernel();
10463 + load_cr3(get_cpu_pgd(cpu));
10464 +#else
10465 load_cr3(next->pgd);
10466 +#endif
10467
10468 /* stop flush ipis for the previous mm */
10469 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10470 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10471 */
10472 if (unlikely(prev->context.ldt != next->context.ldt))
10473 load_LDT_nolock(&next->context);
10474 - }
10475 +
10476 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10477 + if (!(__supported_pte_mask & _PAGE_NX)) {
10478 + smp_mb__before_clear_bit();
10479 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10480 + smp_mb__after_clear_bit();
10481 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10482 + }
10483 +#endif
10484 +
10485 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10486 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10487 + prev->context.user_cs_limit != next->context.user_cs_limit))
10488 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10489 #ifdef CONFIG_SMP
10490 + else if (unlikely(tlbstate != TLBSTATE_OK))
10491 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10492 +#endif
10493 +#endif
10494 +
10495 + }
10496 else {
10497 +
10498 +#ifdef CONFIG_PAX_PER_CPU_PGD
10499 + pax_open_kernel();
10500 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10501 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10502 + pax_close_kernel();
10503 + load_cr3(get_cpu_pgd(cpu));
10504 +#endif
10505 +
10506 +#ifdef CONFIG_SMP
10507 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10508 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10509
10510 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10511 * tlb flush IPI delivery. We must reload CR3
10512 * to make sure to use no freed page tables.
10513 */
10514 +
10515 +#ifndef CONFIG_PAX_PER_CPU_PGD
10516 load_cr3(next->pgd);
10517 +#endif
10518 +
10519 load_LDT_nolock(&next->context);
10520 +
10521 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10522 + if (!(__supported_pte_mask & _PAGE_NX))
10523 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10524 +#endif
10525 +
10526 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10527 +#ifdef CONFIG_PAX_PAGEEXEC
10528 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
10529 +#endif
10530 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10531 +#endif
10532 +
10533 }
10534 +#endif
10535 }
10536 -#endif
10537 }
10538
10539 #define activate_mm(prev, next) \
10540 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10541 index 9eae775..c914fea 100644
10542 --- a/arch/x86/include/asm/module.h
10543 +++ b/arch/x86/include/asm/module.h
10544 @@ -5,6 +5,7 @@
10545
10546 #ifdef CONFIG_X86_64
10547 /* X86_64 does not define MODULE_PROC_FAMILY */
10548 +#define MODULE_PROC_FAMILY ""
10549 #elif defined CONFIG_M386
10550 #define MODULE_PROC_FAMILY "386 "
10551 #elif defined CONFIG_M486
10552 @@ -59,8 +60,20 @@
10553 #error unknown processor family
10554 #endif
10555
10556 -#ifdef CONFIG_X86_32
10557 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
10558 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10559 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10560 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10561 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10562 +#else
10563 +#define MODULE_PAX_KERNEXEC ""
10564 #endif
10565
10566 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10567 +#define MODULE_PAX_UDEREF "UDEREF "
10568 +#else
10569 +#define MODULE_PAX_UDEREF ""
10570 +#endif
10571 +
10572 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10573 +
10574 #endif /* _ASM_X86_MODULE_H */
10575 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10576 index 7639dbf..e08a58c 100644
10577 --- a/arch/x86/include/asm/page_64_types.h
10578 +++ b/arch/x86/include/asm/page_64_types.h
10579 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10580
10581 /* duplicated to the one in bootmem.h */
10582 extern unsigned long max_pfn;
10583 -extern unsigned long phys_base;
10584 +extern const unsigned long phys_base;
10585
10586 extern unsigned long __phys_addr(unsigned long);
10587 #define __phys_reloc_hide(x) (x)
10588 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10589 index a7d2db9..edb023e 100644
10590 --- a/arch/x86/include/asm/paravirt.h
10591 +++ b/arch/x86/include/asm/paravirt.h
10592 @@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10593 val);
10594 }
10595
10596 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10597 +{
10598 + pgdval_t val = native_pgd_val(pgd);
10599 +
10600 + if (sizeof(pgdval_t) > sizeof(long))
10601 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10602 + val, (u64)val >> 32);
10603 + else
10604 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10605 + val);
10606 +}
10607 +
10608 static inline void pgd_clear(pgd_t *pgdp)
10609 {
10610 set_pgd(pgdp, __pgd(0));
10611 @@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
10612 pv_mmu_ops.set_fixmap(idx, phys, flags);
10613 }
10614
10615 +#ifdef CONFIG_PAX_KERNEXEC
10616 +static inline unsigned long pax_open_kernel(void)
10617 +{
10618 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
10619 +}
10620 +
10621 +static inline unsigned long pax_close_kernel(void)
10622 +{
10623 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
10624 +}
10625 +#else
10626 +static inline unsigned long pax_open_kernel(void) { return 0; }
10627 +static inline unsigned long pax_close_kernel(void) { return 0; }
10628 +#endif
10629 +
10630 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
10631
10632 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
10633 @@ -964,7 +991,7 @@ extern void default_banner(void);
10634
10635 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
10636 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
10637 -#define PARA_INDIRECT(addr) *%cs:addr
10638 +#define PARA_INDIRECT(addr) *%ss:addr
10639 #endif
10640
10641 #define INTERRUPT_RETURN \
10642 @@ -1041,6 +1068,21 @@ extern void default_banner(void);
10643 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
10644 CLBR_NONE, \
10645 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
10646 +
10647 +#define GET_CR0_INTO_RDI \
10648 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
10649 + mov %rax,%rdi
10650 +
10651 +#define SET_RDI_INTO_CR0 \
10652 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
10653 +
10654 +#define GET_CR3_INTO_RDI \
10655 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
10656 + mov %rax,%rdi
10657 +
10658 +#define SET_RDI_INTO_CR3 \
10659 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
10660 +
10661 #endif /* CONFIG_X86_32 */
10662
10663 #endif /* __ASSEMBLY__ */
10664 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
10665 index 8e8b9a4..f07d725 100644
10666 --- a/arch/x86/include/asm/paravirt_types.h
10667 +++ b/arch/x86/include/asm/paravirt_types.h
10668 @@ -84,20 +84,20 @@ struct pv_init_ops {
10669 */
10670 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
10671 unsigned long addr, unsigned len);
10672 -};
10673 +} __no_const;
10674
10675
10676 struct pv_lazy_ops {
10677 /* Set deferred update mode, used for batching operations. */
10678 void (*enter)(void);
10679 void (*leave)(void);
10680 -};
10681 +} __no_const;
10682
10683 struct pv_time_ops {
10684 unsigned long long (*sched_clock)(void);
10685 unsigned long long (*steal_clock)(int cpu);
10686 unsigned long (*get_tsc_khz)(void);
10687 -};
10688 +} __no_const;
10689
10690 struct pv_cpu_ops {
10691 /* hooks for various privileged instructions */
10692 @@ -193,7 +193,7 @@ struct pv_cpu_ops {
10693
10694 void (*start_context_switch)(struct task_struct *prev);
10695 void (*end_context_switch)(struct task_struct *next);
10696 -};
10697 +} __no_const;
10698
10699 struct pv_irq_ops {
10700 /*
10701 @@ -224,7 +224,7 @@ struct pv_apic_ops {
10702 unsigned long start_eip,
10703 unsigned long start_esp);
10704 #endif
10705 -};
10706 +} __no_const;
10707
10708 struct pv_mmu_ops {
10709 unsigned long (*read_cr2)(void);
10710 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
10711 struct paravirt_callee_save make_pud;
10712
10713 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
10714 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
10715 #endif /* PAGETABLE_LEVELS == 4 */
10716 #endif /* PAGETABLE_LEVELS >= 3 */
10717
10718 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
10719 an mfn. We can tell which is which from the index. */
10720 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
10721 phys_addr_t phys, pgprot_t flags);
10722 +
10723 +#ifdef CONFIG_PAX_KERNEXEC
10724 + unsigned long (*pax_open_kernel)(void);
10725 + unsigned long (*pax_close_kernel)(void);
10726 +#endif
10727 +
10728 };
10729
10730 struct arch_spinlock;
10731 @@ -334,7 +341,7 @@ struct pv_lock_ops {
10732 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
10733 int (*spin_trylock)(struct arch_spinlock *lock);
10734 void (*spin_unlock)(struct arch_spinlock *lock);
10735 -};
10736 +} __no_const;
10737
10738 /* This contains all the paravirt structures: we get a convenient
10739 * number for each function using the offset which we use to indicate
10740 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
10741 index b4389a4..b7ff22c 100644
10742 --- a/arch/x86/include/asm/pgalloc.h
10743 +++ b/arch/x86/include/asm/pgalloc.h
10744 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
10745 pmd_t *pmd, pte_t *pte)
10746 {
10747 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10748 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
10749 +}
10750 +
10751 +static inline void pmd_populate_user(struct mm_struct *mm,
10752 + pmd_t *pmd, pte_t *pte)
10753 +{
10754 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10755 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
10756 }
10757
10758 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
10759 index 98391db..8f6984e 100644
10760 --- a/arch/x86/include/asm/pgtable-2level.h
10761 +++ b/arch/x86/include/asm/pgtable-2level.h
10762 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
10763
10764 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10765 {
10766 + pax_open_kernel();
10767 *pmdp = pmd;
10768 + pax_close_kernel();
10769 }
10770
10771 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10772 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
10773 index effff47..f9e4035 100644
10774 --- a/arch/x86/include/asm/pgtable-3level.h
10775 +++ b/arch/x86/include/asm/pgtable-3level.h
10776 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10777
10778 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10779 {
10780 + pax_open_kernel();
10781 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
10782 + pax_close_kernel();
10783 }
10784
10785 static inline void native_set_pud(pud_t *pudp, pud_t pud)
10786 {
10787 + pax_open_kernel();
10788 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
10789 + pax_close_kernel();
10790 }
10791
10792 /*
10793 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
10794 index 18601c8..3d716d1 100644
10795 --- a/arch/x86/include/asm/pgtable.h
10796 +++ b/arch/x86/include/asm/pgtable.h
10797 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
10798
10799 #ifndef __PAGETABLE_PUD_FOLDED
10800 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
10801 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
10802 #define pgd_clear(pgd) native_pgd_clear(pgd)
10803 #endif
10804
10805 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
10806
10807 #define arch_end_context_switch(prev) do {} while(0)
10808
10809 +#define pax_open_kernel() native_pax_open_kernel()
10810 +#define pax_close_kernel() native_pax_close_kernel()
10811 #endif /* CONFIG_PARAVIRT */
10812
10813 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
10814 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
10815 +
10816 +#ifdef CONFIG_PAX_KERNEXEC
10817 +static inline unsigned long native_pax_open_kernel(void)
10818 +{
10819 + unsigned long cr0;
10820 +
10821 + preempt_disable();
10822 + barrier();
10823 + cr0 = read_cr0() ^ X86_CR0_WP;
10824 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
10825 + write_cr0(cr0);
10826 + return cr0 ^ X86_CR0_WP;
10827 +}
10828 +
10829 +static inline unsigned long native_pax_close_kernel(void)
10830 +{
10831 + unsigned long cr0;
10832 +
10833 + cr0 = read_cr0() ^ X86_CR0_WP;
10834 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
10835 + write_cr0(cr0);
10836 + barrier();
10837 + preempt_enable_no_resched();
10838 + return cr0 ^ X86_CR0_WP;
10839 +}
10840 +#else
10841 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
10842 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
10843 +#endif
10844 +
10845 /*
10846 * The following only work if pte_present() is true.
10847 * Undefined behaviour if not..
10848 */
10849 +static inline int pte_user(pte_t pte)
10850 +{
10851 + return pte_val(pte) & _PAGE_USER;
10852 +}
10853 +
10854 static inline int pte_dirty(pte_t pte)
10855 {
10856 return pte_flags(pte) & _PAGE_DIRTY;
10857 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
10858 return pte_clear_flags(pte, _PAGE_RW);
10859 }
10860
10861 +static inline pte_t pte_mkread(pte_t pte)
10862 +{
10863 + return __pte(pte_val(pte) | _PAGE_USER);
10864 +}
10865 +
10866 static inline pte_t pte_mkexec(pte_t pte)
10867 {
10868 - return pte_clear_flags(pte, _PAGE_NX);
10869 +#ifdef CONFIG_X86_PAE
10870 + if (__supported_pte_mask & _PAGE_NX)
10871 + return pte_clear_flags(pte, _PAGE_NX);
10872 + else
10873 +#endif
10874 + return pte_set_flags(pte, _PAGE_USER);
10875 +}
10876 +
10877 +static inline pte_t pte_exprotect(pte_t pte)
10878 +{
10879 +#ifdef CONFIG_X86_PAE
10880 + if (__supported_pte_mask & _PAGE_NX)
10881 + return pte_set_flags(pte, _PAGE_NX);
10882 + else
10883 +#endif
10884 + return pte_clear_flags(pte, _PAGE_USER);
10885 }
10886
10887 static inline pte_t pte_mkdirty(pte_t pte)
10888 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
10889 #endif
10890
10891 #ifndef __ASSEMBLY__
10892 +
10893 +#ifdef CONFIG_PAX_PER_CPU_PGD
10894 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
10895 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
10896 +{
10897 + return cpu_pgd[cpu];
10898 +}
10899 +#endif
10900 +
10901 #include <linux/mm_types.h>
10902
10903 static inline int pte_none(pte_t pte)
10904 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
10905
10906 static inline int pgd_bad(pgd_t pgd)
10907 {
10908 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
10909 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
10910 }
10911
10912 static inline int pgd_none(pgd_t pgd)
10913 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
10914 * pgd_offset() returns a (pgd_t *)
10915 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
10916 */
10917 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
10918 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
10919 +
10920 +#ifdef CONFIG_PAX_PER_CPU_PGD
10921 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
10922 +#endif
10923 +
10924 /*
10925 * a shortcut which implies the use of the kernel's pgd, instead
10926 * of a process's
10927 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
10928 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
10929 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
10930
10931 +#ifdef CONFIG_X86_32
10932 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
10933 +#else
10934 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
10935 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
10936 +
10937 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10938 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
10939 +#else
10940 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
10941 +#endif
10942 +
10943 +#endif
10944 +
10945 #ifndef __ASSEMBLY__
10946
10947 extern int direct_gbpages;
10948 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
10949 * dst and src can be on the same page, but the range must not overlap,
10950 * and must not cross a page boundary.
10951 */
10952 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
10953 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
10954 {
10955 - memcpy(dst, src, count * sizeof(pgd_t));
10956 + pax_open_kernel();
10957 + while (count--)
10958 + *dst++ = *src++;
10959 + pax_close_kernel();
10960 }
10961
10962 +#ifdef CONFIG_PAX_PER_CPU_PGD
10963 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
10964 +#endif
10965 +
10966 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10967 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
10968 +#else
10969 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
10970 +#endif
10971
10972 #include <asm-generic/pgtable.h>
10973 #endif /* __ASSEMBLY__ */
10974 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
10975 index 0c92113..34a77c6 100644
10976 --- a/arch/x86/include/asm/pgtable_32.h
10977 +++ b/arch/x86/include/asm/pgtable_32.h
10978 @@ -25,9 +25,6 @@
10979 struct mm_struct;
10980 struct vm_area_struct;
10981
10982 -extern pgd_t swapper_pg_dir[1024];
10983 -extern pgd_t initial_page_table[1024];
10984 -
10985 static inline void pgtable_cache_init(void) { }
10986 static inline void check_pgt_cache(void) { }
10987 void paging_init(void);
10988 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
10989 # include <asm/pgtable-2level.h>
10990 #endif
10991
10992 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
10993 +extern pgd_t initial_page_table[PTRS_PER_PGD];
10994 +#ifdef CONFIG_X86_PAE
10995 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
10996 +#endif
10997 +
10998 #if defined(CONFIG_HIGHPTE)
10999 #define pte_offset_map(dir, address) \
11000 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
11001 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11002 /* Clear a kernel PTE and flush it from the TLB */
11003 #define kpte_clear_flush(ptep, vaddr) \
11004 do { \
11005 + pax_open_kernel(); \
11006 pte_clear(&init_mm, (vaddr), (ptep)); \
11007 + pax_close_kernel(); \
11008 __flush_tlb_one((vaddr)); \
11009 } while (0)
11010
11011 @@ -74,6 +79,9 @@ do { \
11012
11013 #endif /* !__ASSEMBLY__ */
11014
11015 +#define HAVE_ARCH_UNMAPPED_AREA
11016 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11017 +
11018 /*
11019 * kern_addr_valid() is (1) for FLATMEM and (0) for
11020 * SPARSEMEM and DISCONTIGMEM
11021 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11022 index ed5903b..c7fe163 100644
11023 --- a/arch/x86/include/asm/pgtable_32_types.h
11024 +++ b/arch/x86/include/asm/pgtable_32_types.h
11025 @@ -8,7 +8,7 @@
11026 */
11027 #ifdef CONFIG_X86_PAE
11028 # include <asm/pgtable-3level_types.h>
11029 -# define PMD_SIZE (1UL << PMD_SHIFT)
11030 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11031 # define PMD_MASK (~(PMD_SIZE - 1))
11032 #else
11033 # include <asm/pgtable-2level_types.h>
11034 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11035 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11036 #endif
11037
11038 +#ifdef CONFIG_PAX_KERNEXEC
11039 +#ifndef __ASSEMBLY__
11040 +extern unsigned char MODULES_EXEC_VADDR[];
11041 +extern unsigned char MODULES_EXEC_END[];
11042 +#endif
11043 +#include <asm/boot.h>
11044 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11045 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11046 +#else
11047 +#define ktla_ktva(addr) (addr)
11048 +#define ktva_ktla(addr) (addr)
11049 +#endif
11050 +
11051 #define MODULES_VADDR VMALLOC_START
11052 #define MODULES_END VMALLOC_END
11053 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11054 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11055 index 975f709..107976d 100644
11056 --- a/arch/x86/include/asm/pgtable_64.h
11057 +++ b/arch/x86/include/asm/pgtable_64.h
11058 @@ -16,10 +16,14 @@
11059
11060 extern pud_t level3_kernel_pgt[512];
11061 extern pud_t level3_ident_pgt[512];
11062 +extern pud_t level3_vmalloc_start_pgt[512];
11063 +extern pud_t level3_vmalloc_end_pgt[512];
11064 +extern pud_t level3_vmemmap_pgt[512];
11065 +extern pud_t level2_vmemmap_pgt[512];
11066 extern pmd_t level2_kernel_pgt[512];
11067 extern pmd_t level2_fixmap_pgt[512];
11068 -extern pmd_t level2_ident_pgt[512];
11069 -extern pgd_t init_level4_pgt[];
11070 +extern pmd_t level2_ident_pgt[512*2];
11071 +extern pgd_t init_level4_pgt[512];
11072
11073 #define swapper_pg_dir init_level4_pgt
11074
11075 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11076
11077 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11078 {
11079 + pax_open_kernel();
11080 *pmdp = pmd;
11081 + pax_close_kernel();
11082 }
11083
11084 static inline void native_pmd_clear(pmd_t *pmd)
11085 @@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud)
11086
11087 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11088 {
11089 + pax_open_kernel();
11090 + *pgdp = pgd;
11091 + pax_close_kernel();
11092 +}
11093 +
11094 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11095 +{
11096 *pgdp = pgd;
11097 }
11098
11099 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11100 index 766ea16..5b96cb3 100644
11101 --- a/arch/x86/include/asm/pgtable_64_types.h
11102 +++ b/arch/x86/include/asm/pgtable_64_types.h
11103 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11104 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11105 #define MODULES_END _AC(0xffffffffff000000, UL)
11106 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11107 +#define MODULES_EXEC_VADDR MODULES_VADDR
11108 +#define MODULES_EXEC_END MODULES_END
11109 +
11110 +#define ktla_ktva(addr) (addr)
11111 +#define ktva_ktla(addr) (addr)
11112
11113 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11114 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11115 index 013286a..8b42f4f 100644
11116 --- a/arch/x86/include/asm/pgtable_types.h
11117 +++ b/arch/x86/include/asm/pgtable_types.h
11118 @@ -16,13 +16,12 @@
11119 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11120 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11121 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11122 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11123 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11124 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11125 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11126 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11127 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11128 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11129 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
11130 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11131 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
11132 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11133
11134 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11135 @@ -40,7 +39,6 @@
11136 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11137 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11138 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11139 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11140 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11141 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11142 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11143 @@ -57,8 +55,10 @@
11144
11145 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11146 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11147 -#else
11148 +#elif defined(CONFIG_KMEMCHECK)
11149 #define _PAGE_NX (_AT(pteval_t, 0))
11150 +#else
11151 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11152 #endif
11153
11154 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11155 @@ -96,6 +96,9 @@
11156 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11157 _PAGE_ACCESSED)
11158
11159 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
11160 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
11161 +
11162 #define __PAGE_KERNEL_EXEC \
11163 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11164 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11165 @@ -106,7 +109,7 @@
11166 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11167 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11168 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11169 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11170 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11171 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
11172 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
11173 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11174 @@ -168,8 +171,8 @@
11175 * bits are combined, this will alow user to access the high address mapped
11176 * VDSO in the presence of CONFIG_COMPAT_VDSO
11177 */
11178 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11179 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11180 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11181 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11182 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11183 #endif
11184
11185 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11186 {
11187 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11188 }
11189 +#endif
11190
11191 +#if PAGETABLE_LEVELS == 3
11192 +#include <asm-generic/pgtable-nopud.h>
11193 +#endif
11194 +
11195 +#if PAGETABLE_LEVELS == 2
11196 +#include <asm-generic/pgtable-nopmd.h>
11197 +#endif
11198 +
11199 +#ifndef __ASSEMBLY__
11200 #if PAGETABLE_LEVELS > 3
11201 typedef struct { pudval_t pud; } pud_t;
11202
11203 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11204 return pud.pud;
11205 }
11206 #else
11207 -#include <asm-generic/pgtable-nopud.h>
11208 -
11209 static inline pudval_t native_pud_val(pud_t pud)
11210 {
11211 return native_pgd_val(pud.pgd);
11212 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11213 return pmd.pmd;
11214 }
11215 #else
11216 -#include <asm-generic/pgtable-nopmd.h>
11217 -
11218 static inline pmdval_t native_pmd_val(pmd_t pmd)
11219 {
11220 return native_pgd_val(pmd.pud.pgd);
11221 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
11222
11223 extern pteval_t __supported_pte_mask;
11224 extern void set_nx(void);
11225 -extern int nx_enabled;
11226
11227 #define pgprot_writecombine pgprot_writecombine
11228 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11229 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11230 index bb3ee36..781a6b8 100644
11231 --- a/arch/x86/include/asm/processor.h
11232 +++ b/arch/x86/include/asm/processor.h
11233 @@ -268,7 +268,7 @@ struct tss_struct {
11234
11235 } ____cacheline_aligned;
11236
11237 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11238 +extern struct tss_struct init_tss[NR_CPUS];
11239
11240 /*
11241 * Save the original ist values for checking stack pointers during debugging
11242 @@ -861,11 +861,18 @@ static inline void spin_lock_prefetch(const void *x)
11243 */
11244 #define TASK_SIZE PAGE_OFFSET
11245 #define TASK_SIZE_MAX TASK_SIZE
11246 +
11247 +#ifdef CONFIG_PAX_SEGMEXEC
11248 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11249 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11250 +#else
11251 #define STACK_TOP TASK_SIZE
11252 -#define STACK_TOP_MAX STACK_TOP
11253 +#endif
11254 +
11255 +#define STACK_TOP_MAX TASK_SIZE
11256
11257 #define INIT_THREAD { \
11258 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11259 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11260 .vm86_info = NULL, \
11261 .sysenter_cs = __KERNEL_CS, \
11262 .io_bitmap_ptr = NULL, \
11263 @@ -879,7 +886,7 @@ static inline void spin_lock_prefetch(const void *x)
11264 */
11265 #define INIT_TSS { \
11266 .x86_tss = { \
11267 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11268 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11269 .ss0 = __KERNEL_DS, \
11270 .ss1 = __KERNEL_CS, \
11271 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11272 @@ -890,11 +897,7 @@ static inline void spin_lock_prefetch(const void *x)
11273 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11274
11275 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11276 -#define KSTK_TOP(info) \
11277 -({ \
11278 - unsigned long *__ptr = (unsigned long *)(info); \
11279 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11280 -})
11281 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11282
11283 /*
11284 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11285 @@ -909,7 +912,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11286 #define task_pt_regs(task) \
11287 ({ \
11288 struct pt_regs *__regs__; \
11289 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11290 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11291 __regs__ - 1; \
11292 })
11293
11294 @@ -919,13 +922,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11295 /*
11296 * User space process size. 47bits minus one guard page.
11297 */
11298 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11299 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11300
11301 /* This decides where the kernel will search for a free chunk of vm
11302 * space during mmap's.
11303 */
11304 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11305 - 0xc0000000 : 0xFFFFe000)
11306 + 0xc0000000 : 0xFFFFf000)
11307
11308 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11309 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11310 @@ -936,11 +939,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11311 #define STACK_TOP_MAX TASK_SIZE_MAX
11312
11313 #define INIT_THREAD { \
11314 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11315 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11316 }
11317
11318 #define INIT_TSS { \
11319 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11320 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11321 }
11322
11323 /*
11324 @@ -962,6 +965,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11325 */
11326 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11327
11328 +#ifdef CONFIG_PAX_SEGMEXEC
11329 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11330 +#endif
11331 +
11332 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11333
11334 /* Get/set a process' ability to use the timestamp counter instruction */
11335 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11336 index 3566454..4bdfb8c 100644
11337 --- a/arch/x86/include/asm/ptrace.h
11338 +++ b/arch/x86/include/asm/ptrace.h
11339 @@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11340 }
11341
11342 /*
11343 - * user_mode_vm(regs) determines whether a register set came from user mode.
11344 + * user_mode(regs) determines whether a register set came from user mode.
11345 * This is true if V8086 mode was enabled OR if the register set was from
11346 * protected mode with RPL-3 CS value. This tricky test checks that with
11347 * one comparison. Many places in the kernel can bypass this full check
11348 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11349 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11350 + * be used.
11351 */
11352 -static inline int user_mode(struct pt_regs *regs)
11353 +static inline int user_mode_novm(struct pt_regs *regs)
11354 {
11355 #ifdef CONFIG_X86_32
11356 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11357 #else
11358 - return !!(regs->cs & 3);
11359 + return !!(regs->cs & SEGMENT_RPL_MASK);
11360 #endif
11361 }
11362
11363 -static inline int user_mode_vm(struct pt_regs *regs)
11364 +static inline int user_mode(struct pt_regs *regs)
11365 {
11366 #ifdef CONFIG_X86_32
11367 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11368 USER_RPL;
11369 #else
11370 - return user_mode(regs);
11371 + return user_mode_novm(regs);
11372 #endif
11373 }
11374
11375 @@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
11376 #ifdef CONFIG_X86_64
11377 static inline bool user_64bit_mode(struct pt_regs *regs)
11378 {
11379 + unsigned long cs = regs->cs & 0xffff;
11380 #ifndef CONFIG_PARAVIRT
11381 /*
11382 * On non-paravirt systems, this is the only long mode CPL 3
11383 * selector. We do not allow long mode selectors in the LDT.
11384 */
11385 - return regs->cs == __USER_CS;
11386 + return cs == __USER_CS;
11387 #else
11388 /* Headers are too twisted for this to go in paravirt.h. */
11389 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
11390 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
11391 #endif
11392 }
11393 #endif
11394 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11395 index 92f29706..a79cbbb 100644
11396 --- a/arch/x86/include/asm/reboot.h
11397 +++ b/arch/x86/include/asm/reboot.h
11398 @@ -6,19 +6,19 @@
11399 struct pt_regs;
11400
11401 struct machine_ops {
11402 - void (*restart)(char *cmd);
11403 - void (*halt)(void);
11404 - void (*power_off)(void);
11405 + void (* __noreturn restart)(char *cmd);
11406 + void (* __noreturn halt)(void);
11407 + void (* __noreturn power_off)(void);
11408 void (*shutdown)(void);
11409 void (*crash_shutdown)(struct pt_regs *);
11410 - void (*emergency_restart)(void);
11411 -};
11412 + void (* __noreturn emergency_restart)(void);
11413 +} __no_const;
11414
11415 extern struct machine_ops machine_ops;
11416
11417 void native_machine_crash_shutdown(struct pt_regs *regs);
11418 void native_machine_shutdown(void);
11419 -void machine_real_restart(unsigned int type);
11420 +void machine_real_restart(unsigned int type) __noreturn;
11421 /* These must match dispatch_table in reboot_32.S */
11422 #define MRR_BIOS 0
11423 #define MRR_APM 1
11424 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11425 index 2dbe4a7..ce1db00 100644
11426 --- a/arch/x86/include/asm/rwsem.h
11427 +++ b/arch/x86/include/asm/rwsem.h
11428 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11429 {
11430 asm volatile("# beginning down_read\n\t"
11431 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11432 +
11433 +#ifdef CONFIG_PAX_REFCOUNT
11434 + "jno 0f\n"
11435 + LOCK_PREFIX _ASM_DEC "(%1)\n"
11436 + "int $4\n0:\n"
11437 + _ASM_EXTABLE(0b, 0b)
11438 +#endif
11439 +
11440 /* adds 0x00000001 */
11441 " jns 1f\n"
11442 " call call_rwsem_down_read_failed\n"
11443 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11444 "1:\n\t"
11445 " mov %1,%2\n\t"
11446 " add %3,%2\n\t"
11447 +
11448 +#ifdef CONFIG_PAX_REFCOUNT
11449 + "jno 0f\n"
11450 + "sub %3,%2\n"
11451 + "int $4\n0:\n"
11452 + _ASM_EXTABLE(0b, 0b)
11453 +#endif
11454 +
11455 " jle 2f\n\t"
11456 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11457 " jnz 1b\n\t"
11458 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11459 long tmp;
11460 asm volatile("# beginning down_write\n\t"
11461 LOCK_PREFIX " xadd %1,(%2)\n\t"
11462 +
11463 +#ifdef CONFIG_PAX_REFCOUNT
11464 + "jno 0f\n"
11465 + "mov %1,(%2)\n"
11466 + "int $4\n0:\n"
11467 + _ASM_EXTABLE(0b, 0b)
11468 +#endif
11469 +
11470 /* adds 0xffff0001, returns the old value */
11471 " test %1,%1\n\t"
11472 /* was the count 0 before? */
11473 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11474 long tmp;
11475 asm volatile("# beginning __up_read\n\t"
11476 LOCK_PREFIX " xadd %1,(%2)\n\t"
11477 +
11478 +#ifdef CONFIG_PAX_REFCOUNT
11479 + "jno 0f\n"
11480 + "mov %1,(%2)\n"
11481 + "int $4\n0:\n"
11482 + _ASM_EXTABLE(0b, 0b)
11483 +#endif
11484 +
11485 /* subtracts 1, returns the old value */
11486 " jns 1f\n\t"
11487 " call call_rwsem_wake\n" /* expects old value in %edx */
11488 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
11489 long tmp;
11490 asm volatile("# beginning __up_write\n\t"
11491 LOCK_PREFIX " xadd %1,(%2)\n\t"
11492 +
11493 +#ifdef CONFIG_PAX_REFCOUNT
11494 + "jno 0f\n"
11495 + "mov %1,(%2)\n"
11496 + "int $4\n0:\n"
11497 + _ASM_EXTABLE(0b, 0b)
11498 +#endif
11499 +
11500 /* subtracts 0xffff0001, returns the old value */
11501 " jns 1f\n\t"
11502 " call call_rwsem_wake\n" /* expects old value in %edx */
11503 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11504 {
11505 asm volatile("# beginning __downgrade_write\n\t"
11506 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
11507 +
11508 +#ifdef CONFIG_PAX_REFCOUNT
11509 + "jno 0f\n"
11510 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
11511 + "int $4\n0:\n"
11512 + _ASM_EXTABLE(0b, 0b)
11513 +#endif
11514 +
11515 /*
11516 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
11517 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
11518 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11519 */
11520 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
11521 {
11522 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
11523 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
11524 +
11525 +#ifdef CONFIG_PAX_REFCOUNT
11526 + "jno 0f\n"
11527 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
11528 + "int $4\n0:\n"
11529 + _ASM_EXTABLE(0b, 0b)
11530 +#endif
11531 +
11532 : "+m" (sem->count)
11533 : "er" (delta));
11534 }
11535 @@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
11536 */
11537 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
11538 {
11539 - return delta + xadd(&sem->count, delta);
11540 + return delta + xadd_check_overflow(&sem->count, delta);
11541 }
11542
11543 #endif /* __KERNEL__ */
11544 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
11545 index 5e64171..f58957e 100644
11546 --- a/arch/x86/include/asm/segment.h
11547 +++ b/arch/x86/include/asm/segment.h
11548 @@ -64,10 +64,15 @@
11549 * 26 - ESPFIX small SS
11550 * 27 - per-cpu [ offset to per-cpu data area ]
11551 * 28 - stack_canary-20 [ for stack protector ]
11552 - * 29 - unused
11553 - * 30 - unused
11554 + * 29 - PCI BIOS CS
11555 + * 30 - PCI BIOS DS
11556 * 31 - TSS for double fault handler
11557 */
11558 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
11559 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
11560 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
11561 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
11562 +
11563 #define GDT_ENTRY_TLS_MIN 6
11564 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
11565
11566 @@ -79,6 +84,8 @@
11567
11568 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
11569
11570 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
11571 +
11572 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
11573
11574 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
11575 @@ -104,6 +111,12 @@
11576 #define __KERNEL_STACK_CANARY 0
11577 #endif
11578
11579 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
11580 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
11581 +
11582 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
11583 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
11584 +
11585 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
11586
11587 /*
11588 @@ -141,7 +154,7 @@
11589 */
11590
11591 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
11592 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
11593 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
11594
11595
11596 #else
11597 @@ -165,6 +178,8 @@
11598 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
11599 #define __USER32_DS __USER_DS
11600
11601 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
11602 +
11603 #define GDT_ENTRY_TSS 8 /* needs two entries */
11604 #define GDT_ENTRY_LDT 10 /* needs two entries */
11605 #define GDT_ENTRY_TLS_MIN 12
11606 @@ -185,6 +200,7 @@
11607 #endif
11608
11609 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
11610 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
11611 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
11612 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
11613 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
11614 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
11615 index 73b11bc..d4a3b63 100644
11616 --- a/arch/x86/include/asm/smp.h
11617 +++ b/arch/x86/include/asm/smp.h
11618 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
11619 /* cpus sharing the last level cache: */
11620 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
11621 DECLARE_PER_CPU(u16, cpu_llc_id);
11622 -DECLARE_PER_CPU(int, cpu_number);
11623 +DECLARE_PER_CPU(unsigned int, cpu_number);
11624
11625 static inline struct cpumask *cpu_sibling_mask(int cpu)
11626 {
11627 @@ -77,7 +77,7 @@ struct smp_ops {
11628
11629 void (*send_call_func_ipi)(const struct cpumask *mask);
11630 void (*send_call_func_single_ipi)(int cpu);
11631 -};
11632 +} __no_const;
11633
11634 /* Globals due to paravirt */
11635 extern void set_cpu_sibling_map(int cpu);
11636 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
11637 extern int safe_smp_processor_id(void);
11638
11639 #elif defined(CONFIG_X86_64_SMP)
11640 -#define raw_smp_processor_id() (percpu_read(cpu_number))
11641 -
11642 -#define stack_smp_processor_id() \
11643 -({ \
11644 - struct thread_info *ti; \
11645 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
11646 - ti->cpu; \
11647 -})
11648 +#define raw_smp_processor_id() (percpu_read(cpu_number))
11649 +#define stack_smp_processor_id() raw_smp_processor_id()
11650 #define safe_smp_processor_id() smp_processor_id()
11651
11652 #endif
11653 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
11654 index 972c260..43ab1fd 100644
11655 --- a/arch/x86/include/asm/spinlock.h
11656 +++ b/arch/x86/include/asm/spinlock.h
11657 @@ -188,6 +188,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
11658 static inline void arch_read_lock(arch_rwlock_t *rw)
11659 {
11660 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
11661 +
11662 +#ifdef CONFIG_PAX_REFCOUNT
11663 + "jno 0f\n"
11664 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
11665 + "int $4\n0:\n"
11666 + _ASM_EXTABLE(0b, 0b)
11667 +#endif
11668 +
11669 "jns 1f\n"
11670 "call __read_lock_failed\n\t"
11671 "1:\n"
11672 @@ -197,6 +205,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
11673 static inline void arch_write_lock(arch_rwlock_t *rw)
11674 {
11675 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
11676 +
11677 +#ifdef CONFIG_PAX_REFCOUNT
11678 + "jno 0f\n"
11679 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
11680 + "int $4\n0:\n"
11681 + _ASM_EXTABLE(0b, 0b)
11682 +#endif
11683 +
11684 "jz 1f\n"
11685 "call __write_lock_failed\n\t"
11686 "1:\n"
11687 @@ -226,13 +242,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
11688
11689 static inline void arch_read_unlock(arch_rwlock_t *rw)
11690 {
11691 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
11692 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
11693 +
11694 +#ifdef CONFIG_PAX_REFCOUNT
11695 + "jno 0f\n"
11696 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
11697 + "int $4\n0:\n"
11698 + _ASM_EXTABLE(0b, 0b)
11699 +#endif
11700 +
11701 :"+m" (rw->lock) : : "memory");
11702 }
11703
11704 static inline void arch_write_unlock(arch_rwlock_t *rw)
11705 {
11706 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
11707 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
11708 +
11709 +#ifdef CONFIG_PAX_REFCOUNT
11710 + "jno 0f\n"
11711 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
11712 + "int $4\n0:\n"
11713 + _ASM_EXTABLE(0b, 0b)
11714 +#endif
11715 +
11716 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
11717 }
11718
11719 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
11720 index 1575177..cb23f52 100644
11721 --- a/arch/x86/include/asm/stackprotector.h
11722 +++ b/arch/x86/include/asm/stackprotector.h
11723 @@ -48,7 +48,7 @@
11724 * head_32 for boot CPU and setup_per_cpu_areas() for others.
11725 */
11726 #define GDT_STACK_CANARY_INIT \
11727 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
11728 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
11729
11730 /*
11731 * Initialize the stackprotector canary value.
11732 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
11733
11734 static inline void load_stack_canary_segment(void)
11735 {
11736 -#ifdef CONFIG_X86_32
11737 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
11738 asm volatile ("mov %0, %%gs" : : "r" (0));
11739 #endif
11740 }
11741 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
11742 index 70bbe39..4ae2bd4 100644
11743 --- a/arch/x86/include/asm/stacktrace.h
11744 +++ b/arch/x86/include/asm/stacktrace.h
11745 @@ -11,28 +11,20 @@
11746
11747 extern int kstack_depth_to_print;
11748
11749 -struct thread_info;
11750 +struct task_struct;
11751 struct stacktrace_ops;
11752
11753 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
11754 - unsigned long *stack,
11755 - unsigned long bp,
11756 - const struct stacktrace_ops *ops,
11757 - void *data,
11758 - unsigned long *end,
11759 - int *graph);
11760 +typedef unsigned long walk_stack_t(struct task_struct *task,
11761 + void *stack_start,
11762 + unsigned long *stack,
11763 + unsigned long bp,
11764 + const struct stacktrace_ops *ops,
11765 + void *data,
11766 + unsigned long *end,
11767 + int *graph);
11768
11769 -extern unsigned long
11770 -print_context_stack(struct thread_info *tinfo,
11771 - unsigned long *stack, unsigned long bp,
11772 - const struct stacktrace_ops *ops, void *data,
11773 - unsigned long *end, int *graph);
11774 -
11775 -extern unsigned long
11776 -print_context_stack_bp(struct thread_info *tinfo,
11777 - unsigned long *stack, unsigned long bp,
11778 - const struct stacktrace_ops *ops, void *data,
11779 - unsigned long *end, int *graph);
11780 +extern walk_stack_t print_context_stack;
11781 +extern walk_stack_t print_context_stack_bp;
11782
11783 /* Generic stack tracer with callbacks */
11784
11785 @@ -40,7 +32,7 @@ struct stacktrace_ops {
11786 void (*address)(void *data, unsigned long address, int reliable);
11787 /* On negative return stop dumping */
11788 int (*stack)(void *data, char *name);
11789 - walk_stack_t walk_stack;
11790 + walk_stack_t *walk_stack;
11791 };
11792
11793 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
11794 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
11795 index cb23852..2dde194 100644
11796 --- a/arch/x86/include/asm/sys_ia32.h
11797 +++ b/arch/x86/include/asm/sys_ia32.h
11798 @@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
11799 compat_sigset_t __user *, unsigned int);
11800 asmlinkage long sys32_alarm(unsigned int);
11801
11802 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
11803 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
11804 asmlinkage long sys32_sysfs(int, u32, u32);
11805
11806 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
11807 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
11808 index 2d2f01c..f985723 100644
11809 --- a/arch/x86/include/asm/system.h
11810 +++ b/arch/x86/include/asm/system.h
11811 @@ -129,7 +129,7 @@ do { \
11812 "call __switch_to\n\t" \
11813 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
11814 __switch_canary \
11815 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
11816 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
11817 "movq %%rax,%%rdi\n\t" \
11818 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
11819 "jnz ret_from_fork\n\t" \
11820 @@ -140,7 +140,7 @@ do { \
11821 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
11822 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
11823 [_tif_fork] "i" (_TIF_FORK), \
11824 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
11825 + [thread_info] "m" (current_tinfo), \
11826 [current_task] "m" (current_task) \
11827 __switch_canary_iparam \
11828 : "memory", "cc" __EXTRA_CLOBBER)
11829 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
11830 {
11831 unsigned long __limit;
11832 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
11833 - return __limit + 1;
11834 + return __limit;
11835 }
11836
11837 static inline void native_clts(void)
11838 @@ -397,13 +397,13 @@ void enable_hlt(void);
11839
11840 void cpu_idle_wait(void);
11841
11842 -extern unsigned long arch_align_stack(unsigned long sp);
11843 +#define arch_align_stack(x) ((x) & ~0xfUL)
11844 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11845
11846 void default_idle(void);
11847 bool set_pm_idle_to_default(void);
11848
11849 -void stop_this_cpu(void *dummy);
11850 +void stop_this_cpu(void *dummy) __noreturn;
11851
11852 /*
11853 * Force strict CPU ordering.
11854 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
11855 index d7ef849..6af292e 100644
11856 --- a/arch/x86/include/asm/thread_info.h
11857 +++ b/arch/x86/include/asm/thread_info.h
11858 @@ -10,6 +10,7 @@
11859 #include <linux/compiler.h>
11860 #include <asm/page.h>
11861 #include <asm/types.h>
11862 +#include <asm/percpu.h>
11863
11864 /*
11865 * low level task data that entry.S needs immediate access to
11866 @@ -24,7 +25,6 @@ struct exec_domain;
11867 #include <linux/atomic.h>
11868
11869 struct thread_info {
11870 - struct task_struct *task; /* main task structure */
11871 struct exec_domain *exec_domain; /* execution domain */
11872 __u32 flags; /* low level flags */
11873 __u32 status; /* thread synchronous flags */
11874 @@ -34,18 +34,12 @@ struct thread_info {
11875 mm_segment_t addr_limit;
11876 struct restart_block restart_block;
11877 void __user *sysenter_return;
11878 -#ifdef CONFIG_X86_32
11879 - unsigned long previous_esp; /* ESP of the previous stack in
11880 - case of nested (IRQ) stacks
11881 - */
11882 - __u8 supervisor_stack[0];
11883 -#endif
11884 + unsigned long lowest_stack;
11885 int uaccess_err;
11886 };
11887
11888 -#define INIT_THREAD_INFO(tsk) \
11889 +#define INIT_THREAD_INFO \
11890 { \
11891 - .task = &tsk, \
11892 .exec_domain = &default_exec_domain, \
11893 .flags = 0, \
11894 .cpu = 0, \
11895 @@ -56,7 +50,7 @@ struct thread_info {
11896 }, \
11897 }
11898
11899 -#define init_thread_info (init_thread_union.thread_info)
11900 +#define init_thread_info (init_thread_union.stack)
11901 #define init_stack (init_thread_union.stack)
11902
11903 #else /* !__ASSEMBLY__ */
11904 @@ -170,45 +164,40 @@ struct thread_info {
11905 ret; \
11906 })
11907
11908 -#ifdef CONFIG_X86_32
11909 -
11910 -#define STACK_WARN (THREAD_SIZE/8)
11911 -/*
11912 - * macros/functions for gaining access to the thread information structure
11913 - *
11914 - * preempt_count needs to be 1 initially, until the scheduler is functional.
11915 - */
11916 -#ifndef __ASSEMBLY__
11917 -
11918 -
11919 -/* how to get the current stack pointer from C */
11920 -register unsigned long current_stack_pointer asm("esp") __used;
11921 -
11922 -/* how to get the thread information struct from C */
11923 -static inline struct thread_info *current_thread_info(void)
11924 -{
11925 - return (struct thread_info *)
11926 - (current_stack_pointer & ~(THREAD_SIZE - 1));
11927 -}
11928 -
11929 -#else /* !__ASSEMBLY__ */
11930 -
11931 +#ifdef __ASSEMBLY__
11932 /* how to get the thread information struct from ASM */
11933 #define GET_THREAD_INFO(reg) \
11934 - movl $-THREAD_SIZE, reg; \
11935 - andl %esp, reg
11936 + mov PER_CPU_VAR(current_tinfo), reg
11937
11938 /* use this one if reg already contains %esp */
11939 -#define GET_THREAD_INFO_WITH_ESP(reg) \
11940 - andl $-THREAD_SIZE, reg
11941 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
11942 +#else
11943 +/* how to get the thread information struct from C */
11944 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
11945 +
11946 +static __always_inline struct thread_info *current_thread_info(void)
11947 +{
11948 + return percpu_read_stable(current_tinfo);
11949 +}
11950 +#endif
11951 +
11952 +#ifdef CONFIG_X86_32
11953 +
11954 +#define STACK_WARN (THREAD_SIZE/8)
11955 +/*
11956 + * macros/functions for gaining access to the thread information structure
11957 + *
11958 + * preempt_count needs to be 1 initially, until the scheduler is functional.
11959 + */
11960 +#ifndef __ASSEMBLY__
11961 +
11962 +/* how to get the current stack pointer from C */
11963 +register unsigned long current_stack_pointer asm("esp") __used;
11964
11965 #endif
11966
11967 #else /* X86_32 */
11968
11969 -#include <asm/percpu.h>
11970 -#define KERNEL_STACK_OFFSET (5*8)
11971 -
11972 /*
11973 * macros/functions for gaining access to the thread information structure
11974 * preempt_count needs to be 1 initially, until the scheduler is functional.
11975 @@ -216,21 +205,8 @@ static inline struct thread_info *current_thread_info(void)
11976 #ifndef __ASSEMBLY__
11977 DECLARE_PER_CPU(unsigned long, kernel_stack);
11978
11979 -static inline struct thread_info *current_thread_info(void)
11980 -{
11981 - struct thread_info *ti;
11982 - ti = (void *)(percpu_read_stable(kernel_stack) +
11983 - KERNEL_STACK_OFFSET - THREAD_SIZE);
11984 - return ti;
11985 -}
11986 -
11987 -#else /* !__ASSEMBLY__ */
11988 -
11989 -/* how to get the thread information struct from ASM */
11990 -#define GET_THREAD_INFO(reg) \
11991 - movq PER_CPU_VAR(kernel_stack),reg ; \
11992 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
11993 -
11994 +/* how to get the current stack pointer from C */
11995 +register unsigned long current_stack_pointer asm("rsp") __used;
11996 #endif
11997
11998 #endif /* !X86_32 */
11999 @@ -264,5 +240,16 @@ extern void arch_task_cache_init(void);
12000 extern void free_thread_info(struct thread_info *ti);
12001 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12002 #define arch_task_cache_init arch_task_cache_init
12003 +
12004 +#define __HAVE_THREAD_FUNCTIONS
12005 +#define task_thread_info(task) (&(task)->tinfo)
12006 +#define task_stack_page(task) ((task)->stack)
12007 +#define setup_thread_stack(p, org) do {} while (0)
12008 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12009 +
12010 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12011 +extern struct task_struct *alloc_task_struct_node(int node);
12012 +extern void free_task_struct(struct task_struct *);
12013 +
12014 #endif
12015 #endif /* _ASM_X86_THREAD_INFO_H */
12016 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12017 index 36361bf..324f262 100644
12018 --- a/arch/x86/include/asm/uaccess.h
12019 +++ b/arch/x86/include/asm/uaccess.h
12020 @@ -7,12 +7,15 @@
12021 #include <linux/compiler.h>
12022 #include <linux/thread_info.h>
12023 #include <linux/string.h>
12024 +#include <linux/sched.h>
12025 #include <asm/asm.h>
12026 #include <asm/page.h>
12027
12028 #define VERIFY_READ 0
12029 #define VERIFY_WRITE 1
12030
12031 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
12032 +
12033 /*
12034 * The fs value determines whether argument validity checking should be
12035 * performed or not. If get_fs() == USER_DS, checking is performed, with
12036 @@ -28,7 +31,12 @@
12037
12038 #define get_ds() (KERNEL_DS)
12039 #define get_fs() (current_thread_info()->addr_limit)
12040 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12041 +void __set_fs(mm_segment_t x);
12042 +void set_fs(mm_segment_t x);
12043 +#else
12044 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12045 +#endif
12046
12047 #define segment_eq(a, b) ((a).seg == (b).seg)
12048
12049 @@ -76,7 +84,33 @@
12050 * checks that the pointer is in the user space range - after calling
12051 * this function, memory access functions may still return -EFAULT.
12052 */
12053 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12054 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12055 +#define access_ok(type, addr, size) \
12056 +({ \
12057 + long __size = size; \
12058 + unsigned long __addr = (unsigned long)addr; \
12059 + unsigned long __addr_ao = __addr & PAGE_MASK; \
12060 + unsigned long __end_ao = __addr + __size - 1; \
12061 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12062 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12063 + while(__addr_ao <= __end_ao) { \
12064 + char __c_ao; \
12065 + __addr_ao += PAGE_SIZE; \
12066 + if (__size > PAGE_SIZE) \
12067 + cond_resched(); \
12068 + if (__get_user(__c_ao, (char __user *)__addr)) \
12069 + break; \
12070 + if (type != VERIFY_WRITE) { \
12071 + __addr = __addr_ao; \
12072 + continue; \
12073 + } \
12074 + if (__put_user(__c_ao, (char __user *)__addr)) \
12075 + break; \
12076 + __addr = __addr_ao; \
12077 + } \
12078 + } \
12079 + __ret_ao; \
12080 +})
12081
12082 /*
12083 * The exception table consists of pairs of addresses: the first is the
12084 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
12085 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12086 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12087
12088 -
12089 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12090 +#define __copyuser_seg "gs;"
12091 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12092 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12093 +#else
12094 +#define __copyuser_seg
12095 +#define __COPYUSER_SET_ES
12096 +#define __COPYUSER_RESTORE_ES
12097 +#endif
12098
12099 #ifdef CONFIG_X86_32
12100 #define __put_user_asm_u64(x, addr, err, errret) \
12101 - asm volatile("1: movl %%eax,0(%2)\n" \
12102 - "2: movl %%edx,4(%2)\n" \
12103 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12104 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12105 "3:\n" \
12106 ".section .fixup,\"ax\"\n" \
12107 "4: movl %3,%0\n" \
12108 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
12109 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12110
12111 #define __put_user_asm_ex_u64(x, addr) \
12112 - asm volatile("1: movl %%eax,0(%1)\n" \
12113 - "2: movl %%edx,4(%1)\n" \
12114 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12115 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12116 "3:\n" \
12117 _ASM_EXTABLE(1b, 2b - 1b) \
12118 _ASM_EXTABLE(2b, 3b - 2b) \
12119 @@ -252,7 +294,7 @@ extern void __put_user_8(void);
12120 __typeof__(*(ptr)) __pu_val; \
12121 __chk_user_ptr(ptr); \
12122 might_fault(); \
12123 - __pu_val = x; \
12124 + __pu_val = (x); \
12125 switch (sizeof(*(ptr))) { \
12126 case 1: \
12127 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12128 @@ -373,7 +415,7 @@ do { \
12129 } while (0)
12130
12131 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12132 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12133 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12134 "2:\n" \
12135 ".section .fixup,\"ax\"\n" \
12136 "3: mov %3,%0\n" \
12137 @@ -381,7 +423,7 @@ do { \
12138 " jmp 2b\n" \
12139 ".previous\n" \
12140 _ASM_EXTABLE(1b, 3b) \
12141 - : "=r" (err), ltype(x) \
12142 + : "=r" (err), ltype (x) \
12143 : "m" (__m(addr)), "i" (errret), "0" (err))
12144
12145 #define __get_user_size_ex(x, ptr, size) \
12146 @@ -406,7 +448,7 @@ do { \
12147 } while (0)
12148
12149 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12150 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12151 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12152 "2:\n" \
12153 _ASM_EXTABLE(1b, 2b - 1b) \
12154 : ltype(x) : "m" (__m(addr)))
12155 @@ -423,13 +465,24 @@ do { \
12156 int __gu_err; \
12157 unsigned long __gu_val; \
12158 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12159 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
12160 + (x) = (__typeof__(*(ptr)))__gu_val; \
12161 __gu_err; \
12162 })
12163
12164 /* FIXME: this hack is definitely wrong -AK */
12165 struct __large_struct { unsigned long buf[100]; };
12166 -#define __m(x) (*(struct __large_struct __user *)(x))
12167 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12168 +#define ____m(x) \
12169 +({ \
12170 + unsigned long ____x = (unsigned long)(x); \
12171 + if (____x < PAX_USER_SHADOW_BASE) \
12172 + ____x += PAX_USER_SHADOW_BASE; \
12173 + (void __user *)____x; \
12174 +})
12175 +#else
12176 +#define ____m(x) (x)
12177 +#endif
12178 +#define __m(x) (*(struct __large_struct __user *)____m(x))
12179
12180 /*
12181 * Tell gcc we read from memory instead of writing: this is because
12182 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
12183 * aliasing issues.
12184 */
12185 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12186 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12187 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12188 "2:\n" \
12189 ".section .fixup,\"ax\"\n" \
12190 "3: mov %3,%0\n" \
12191 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
12192 ".previous\n" \
12193 _ASM_EXTABLE(1b, 3b) \
12194 : "=r"(err) \
12195 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12196 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12197
12198 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12199 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12200 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12201 "2:\n" \
12202 _ASM_EXTABLE(1b, 2b - 1b) \
12203 : : ltype(x), "m" (__m(addr)))
12204 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
12205 * On error, the variable @x is set to zero.
12206 */
12207
12208 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12209 +#define __get_user(x, ptr) get_user((x), (ptr))
12210 +#else
12211 #define __get_user(x, ptr) \
12212 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12213 +#endif
12214
12215 /**
12216 * __put_user: - Write a simple value into user space, with less checking.
12217 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
12218 * Returns zero on success, or -EFAULT on error.
12219 */
12220
12221 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12222 +#define __put_user(x, ptr) put_user((x), (ptr))
12223 +#else
12224 #define __put_user(x, ptr) \
12225 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12226 +#endif
12227
12228 #define __get_user_unaligned __get_user
12229 #define __put_user_unaligned __put_user
12230 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
12231 #define get_user_ex(x, ptr) do { \
12232 unsigned long __gue_val; \
12233 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12234 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
12235 + (x) = (__typeof__(*(ptr)))__gue_val; \
12236 } while (0)
12237
12238 #ifdef CONFIG_X86_WP_WORKS_OK
12239 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12240 index 566e803..b9521e9 100644
12241 --- a/arch/x86/include/asm/uaccess_32.h
12242 +++ b/arch/x86/include/asm/uaccess_32.h
12243 @@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12244 static __always_inline unsigned long __must_check
12245 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12246 {
12247 + if ((long)n < 0)
12248 + return n;
12249 +
12250 if (__builtin_constant_p(n)) {
12251 unsigned long ret;
12252
12253 @@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12254 return ret;
12255 }
12256 }
12257 + if (!__builtin_constant_p(n))
12258 + check_object_size(from, n, true);
12259 return __copy_to_user_ll(to, from, n);
12260 }
12261
12262 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
12263 __copy_to_user(void __user *to, const void *from, unsigned long n)
12264 {
12265 might_fault();
12266 +
12267 return __copy_to_user_inatomic(to, from, n);
12268 }
12269
12270 static __always_inline unsigned long
12271 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12272 {
12273 + if ((long)n < 0)
12274 + return n;
12275 +
12276 /* Avoid zeroing the tail if the copy fails..
12277 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12278 * but as the zeroing behaviour is only significant when n is not
12279 @@ -137,6 +146,10 @@ static __always_inline unsigned long
12280 __copy_from_user(void *to, const void __user *from, unsigned long n)
12281 {
12282 might_fault();
12283 +
12284 + if ((long)n < 0)
12285 + return n;
12286 +
12287 if (__builtin_constant_p(n)) {
12288 unsigned long ret;
12289
12290 @@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12291 return ret;
12292 }
12293 }
12294 + if (!__builtin_constant_p(n))
12295 + check_object_size(to, n, false);
12296 return __copy_from_user_ll(to, from, n);
12297 }
12298
12299 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12300 const void __user *from, unsigned long n)
12301 {
12302 might_fault();
12303 +
12304 + if ((long)n < 0)
12305 + return n;
12306 +
12307 if (__builtin_constant_p(n)) {
12308 unsigned long ret;
12309
12310 @@ -181,15 +200,19 @@ static __always_inline unsigned long
12311 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12312 unsigned long n)
12313 {
12314 - return __copy_from_user_ll_nocache_nozero(to, from, n);
12315 + if ((long)n < 0)
12316 + return n;
12317 +
12318 + return __copy_from_user_ll_nocache_nozero(to, from, n);
12319 }
12320
12321 -unsigned long __must_check copy_to_user(void __user *to,
12322 - const void *from, unsigned long n);
12323 -unsigned long __must_check _copy_from_user(void *to,
12324 - const void __user *from,
12325 - unsigned long n);
12326 -
12327 +extern void copy_to_user_overflow(void)
12328 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12329 + __compiletime_error("copy_to_user() buffer size is not provably correct")
12330 +#else
12331 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
12332 +#endif
12333 +;
12334
12335 extern void copy_from_user_overflow(void)
12336 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12337 @@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
12338 #endif
12339 ;
12340
12341 -static inline unsigned long __must_check copy_from_user(void *to,
12342 - const void __user *from,
12343 - unsigned long n)
12344 +/**
12345 + * copy_to_user: - Copy a block of data into user space.
12346 + * @to: Destination address, in user space.
12347 + * @from: Source address, in kernel space.
12348 + * @n: Number of bytes to copy.
12349 + *
12350 + * Context: User context only. This function may sleep.
12351 + *
12352 + * Copy data from kernel space to user space.
12353 + *
12354 + * Returns number of bytes that could not be copied.
12355 + * On success, this will be zero.
12356 + */
12357 +static inline unsigned long __must_check
12358 +copy_to_user(void __user *to, const void *from, unsigned long n)
12359 +{
12360 + int sz = __compiletime_object_size(from);
12361 +
12362 + if (unlikely(sz != -1 && sz < n))
12363 + copy_to_user_overflow();
12364 + else if (access_ok(VERIFY_WRITE, to, n))
12365 + n = __copy_to_user(to, from, n);
12366 + return n;
12367 +}
12368 +
12369 +/**
12370 + * copy_from_user: - Copy a block of data from user space.
12371 + * @to: Destination address, in kernel space.
12372 + * @from: Source address, in user space.
12373 + * @n: Number of bytes to copy.
12374 + *
12375 + * Context: User context only. This function may sleep.
12376 + *
12377 + * Copy data from user space to kernel space.
12378 + *
12379 + * Returns number of bytes that could not be copied.
12380 + * On success, this will be zero.
12381 + *
12382 + * If some data could not be copied, this function will pad the copied
12383 + * data to the requested size using zero bytes.
12384 + */
12385 +static inline unsigned long __must_check
12386 +copy_from_user(void *to, const void __user *from, unsigned long n)
12387 {
12388 int sz = __compiletime_object_size(to);
12389
12390 - if (likely(sz == -1 || sz >= n))
12391 - n = _copy_from_user(to, from, n);
12392 - else
12393 + if (unlikely(sz != -1 && sz < n))
12394 copy_from_user_overflow();
12395 -
12396 + else if (access_ok(VERIFY_READ, from, n))
12397 + n = __copy_from_user(to, from, n);
12398 + else if ((long)n > 0) {
12399 + if (!__builtin_constant_p(n))
12400 + check_object_size(to, n, false);
12401 + memset(to, 0, n);
12402 + }
12403 return n;
12404 }
12405
12406 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12407 index 1c66d30..e66922c 100644
12408 --- a/arch/x86/include/asm/uaccess_64.h
12409 +++ b/arch/x86/include/asm/uaccess_64.h
12410 @@ -10,6 +10,9 @@
12411 #include <asm/alternative.h>
12412 #include <asm/cpufeature.h>
12413 #include <asm/page.h>
12414 +#include <asm/pgtable.h>
12415 +
12416 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
12417
12418 /*
12419 * Copy To/From Userspace
12420 @@ -17,12 +20,12 @@
12421
12422 /* Handles exceptions in both to and from, but doesn't do access_ok */
12423 __must_check unsigned long
12424 -copy_user_generic_string(void *to, const void *from, unsigned len);
12425 +copy_user_generic_string(void *to, const void *from, unsigned long len);
12426 __must_check unsigned long
12427 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
12428 +copy_user_generic_unrolled(void *to, const void *from, unsigned long len);
12429
12430 static __always_inline __must_check unsigned long
12431 -copy_user_generic(void *to, const void *from, unsigned len)
12432 +copy_user_generic(void *to, const void *from, unsigned long len)
12433 {
12434 unsigned ret;
12435
12436 @@ -32,142 +35,226 @@ copy_user_generic(void *to, const void *from, unsigned len)
12437 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
12438 "=d" (len)),
12439 "1" (to), "2" (from), "3" (len)
12440 - : "memory", "rcx", "r8", "r9", "r10", "r11");
12441 + : "memory", "rcx", "r8", "r9", "r11");
12442 return ret;
12443 }
12444
12445 +static __always_inline __must_check unsigned long
12446 +__copy_to_user(void __user *to, const void *from, unsigned long len);
12447 +static __always_inline __must_check unsigned long
12448 +__copy_from_user(void *to, const void __user *from, unsigned long len);
12449 __must_check unsigned long
12450 -_copy_to_user(void __user *to, const void *from, unsigned len);
12451 -__must_check unsigned long
12452 -_copy_from_user(void *to, const void __user *from, unsigned len);
12453 -__must_check unsigned long
12454 -copy_in_user(void __user *to, const void __user *from, unsigned len);
12455 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
12456
12457 static inline unsigned long __must_check copy_from_user(void *to,
12458 const void __user *from,
12459 unsigned long n)
12460 {
12461 - int sz = __compiletime_object_size(to);
12462 -
12463 might_fault();
12464 - if (likely(sz == -1 || sz >= n))
12465 - n = _copy_from_user(to, from, n);
12466 -#ifdef CONFIG_DEBUG_VM
12467 - else
12468 - WARN(1, "Buffer overflow detected!\n");
12469 -#endif
12470 +
12471 + if (access_ok(VERIFY_READ, from, n))
12472 + n = __copy_from_user(to, from, n);
12473 + else if (n < INT_MAX) {
12474 + if (!__builtin_constant_p(n))
12475 + check_object_size(to, n, false);
12476 + memset(to, 0, n);
12477 + }
12478 return n;
12479 }
12480
12481 static __always_inline __must_check
12482 -int copy_to_user(void __user *dst, const void *src, unsigned size)
12483 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
12484 {
12485 might_fault();
12486
12487 - return _copy_to_user(dst, src, size);
12488 + if (access_ok(VERIFY_WRITE, dst, size))
12489 + size = __copy_to_user(dst, src, size);
12490 + return size;
12491 }
12492
12493 static __always_inline __must_check
12494 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
12495 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
12496 {
12497 - int ret = 0;
12498 + int sz = __compiletime_object_size(dst);
12499 + unsigned ret = 0;
12500
12501 might_fault();
12502 - if (!__builtin_constant_p(size))
12503 - return copy_user_generic(dst, (__force void *)src, size);
12504 +
12505 + if (size > INT_MAX)
12506 + return size;
12507 +
12508 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12509 + if (!__access_ok(VERIFY_READ, src, size))
12510 + return size;
12511 +#endif
12512 +
12513 + if (unlikely(sz != -1 && sz < size)) {
12514 +#ifdef CONFIG_DEBUG_VM
12515 + WARN(1, "Buffer overflow detected!\n");
12516 +#endif
12517 + return size;
12518 + }
12519 +
12520 + if (!__builtin_constant_p(size)) {
12521 + check_object_size(dst, size, false);
12522 +
12523 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12524 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12525 + src += PAX_USER_SHADOW_BASE;
12526 +#endif
12527 +
12528 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12529 + }
12530 switch (size) {
12531 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
12532 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
12533 ret, "b", "b", "=q", 1);
12534 return ret;
12535 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
12536 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
12537 ret, "w", "w", "=r", 2);
12538 return ret;
12539 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
12540 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
12541 ret, "l", "k", "=r", 4);
12542 return ret;
12543 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
12544 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12545 ret, "q", "", "=r", 8);
12546 return ret;
12547 case 10:
12548 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12549 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12550 ret, "q", "", "=r", 10);
12551 if (unlikely(ret))
12552 return ret;
12553 __get_user_asm(*(u16 *)(8 + (char *)dst),
12554 - (u16 __user *)(8 + (char __user *)src),
12555 + (const u16 __user *)(8 + (const char __user *)src),
12556 ret, "w", "w", "=r", 2);
12557 return ret;
12558 case 16:
12559 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12560 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12561 ret, "q", "", "=r", 16);
12562 if (unlikely(ret))
12563 return ret;
12564 __get_user_asm(*(u64 *)(8 + (char *)dst),
12565 - (u64 __user *)(8 + (char __user *)src),
12566 + (const u64 __user *)(8 + (const char __user *)src),
12567 ret, "q", "", "=r", 8);
12568 return ret;
12569 default:
12570 - return copy_user_generic(dst, (__force void *)src, size);
12571 +
12572 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12573 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12574 + src += PAX_USER_SHADOW_BASE;
12575 +#endif
12576 +
12577 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12578 }
12579 }
12580
12581 static __always_inline __must_check
12582 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
12583 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
12584 {
12585 - int ret = 0;
12586 + int sz = __compiletime_object_size(src);
12587 + unsigned ret = 0;
12588
12589 might_fault();
12590 - if (!__builtin_constant_p(size))
12591 - return copy_user_generic((__force void *)dst, src, size);
12592 +
12593 + if (size > INT_MAX)
12594 + return size;
12595 +
12596 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12597 + if (!__access_ok(VERIFY_WRITE, dst, size))
12598 + return size;
12599 +#endif
12600 +
12601 + if (unlikely(sz != -1 && sz < size)) {
12602 +#ifdef CONFIG_DEBUG_VM
12603 + WARN(1, "Buffer overflow detected!\n");
12604 +#endif
12605 + return size;
12606 + }
12607 +
12608 + if (!__builtin_constant_p(size)) {
12609 + check_object_size(src, size, true);
12610 +
12611 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12612 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12613 + dst += PAX_USER_SHADOW_BASE;
12614 +#endif
12615 +
12616 + return copy_user_generic((__force_kernel void *)dst, src, size);
12617 + }
12618 switch (size) {
12619 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
12620 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
12621 ret, "b", "b", "iq", 1);
12622 return ret;
12623 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
12624 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
12625 ret, "w", "w", "ir", 2);
12626 return ret;
12627 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
12628 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
12629 ret, "l", "k", "ir", 4);
12630 return ret;
12631 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
12632 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12633 ret, "q", "", "er", 8);
12634 return ret;
12635 case 10:
12636 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12637 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12638 ret, "q", "", "er", 10);
12639 if (unlikely(ret))
12640 return ret;
12641 asm("":::"memory");
12642 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
12643 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
12644 ret, "w", "w", "ir", 2);
12645 return ret;
12646 case 16:
12647 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12648 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12649 ret, "q", "", "er", 16);
12650 if (unlikely(ret))
12651 return ret;
12652 asm("":::"memory");
12653 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
12654 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
12655 ret, "q", "", "er", 8);
12656 return ret;
12657 default:
12658 - return copy_user_generic((__force void *)dst, src, size);
12659 +
12660 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12661 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12662 + dst += PAX_USER_SHADOW_BASE;
12663 +#endif
12664 +
12665 + return copy_user_generic((__force_kernel void *)dst, src, size);
12666 }
12667 }
12668
12669 static __always_inline __must_check
12670 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12671 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
12672 {
12673 - int ret = 0;
12674 + unsigned ret = 0;
12675
12676 might_fault();
12677 - if (!__builtin_constant_p(size))
12678 - return copy_user_generic((__force void *)dst,
12679 - (__force void *)src, size);
12680 +
12681 + if (size > INT_MAX)
12682 + return size;
12683 +
12684 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12685 + if (!__access_ok(VERIFY_READ, src, size))
12686 + return size;
12687 + if (!__access_ok(VERIFY_WRITE, dst, size))
12688 + return size;
12689 +#endif
12690 +
12691 + if (!__builtin_constant_p(size)) {
12692 +
12693 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12694 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12695 + src += PAX_USER_SHADOW_BASE;
12696 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12697 + dst += PAX_USER_SHADOW_BASE;
12698 +#endif
12699 +
12700 + return copy_user_generic((__force_kernel void *)dst,
12701 + (__force_kernel const void *)src, size);
12702 + }
12703 switch (size) {
12704 case 1: {
12705 u8 tmp;
12706 - __get_user_asm(tmp, (u8 __user *)src,
12707 + __get_user_asm(tmp, (const u8 __user *)src,
12708 ret, "b", "b", "=q", 1);
12709 if (likely(!ret))
12710 __put_user_asm(tmp, (u8 __user *)dst,
12711 @@ -176,7 +263,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12712 }
12713 case 2: {
12714 u16 tmp;
12715 - __get_user_asm(tmp, (u16 __user *)src,
12716 + __get_user_asm(tmp, (const u16 __user *)src,
12717 ret, "w", "w", "=r", 2);
12718 if (likely(!ret))
12719 __put_user_asm(tmp, (u16 __user *)dst,
12720 @@ -186,7 +273,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12721
12722 case 4: {
12723 u32 tmp;
12724 - __get_user_asm(tmp, (u32 __user *)src,
12725 + __get_user_asm(tmp, (const u32 __user *)src,
12726 ret, "l", "k", "=r", 4);
12727 if (likely(!ret))
12728 __put_user_asm(tmp, (u32 __user *)dst,
12729 @@ -195,7 +282,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12730 }
12731 case 8: {
12732 u64 tmp;
12733 - __get_user_asm(tmp, (u64 __user *)src,
12734 + __get_user_asm(tmp, (const u64 __user *)src,
12735 ret, "q", "", "=r", 8);
12736 if (likely(!ret))
12737 __put_user_asm(tmp, (u64 __user *)dst,
12738 @@ -203,8 +290,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12739 return ret;
12740 }
12741 default:
12742 - return copy_user_generic((__force void *)dst,
12743 - (__force void *)src, size);
12744 +
12745 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12746 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12747 + src += PAX_USER_SHADOW_BASE;
12748 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12749 + dst += PAX_USER_SHADOW_BASE;
12750 +#endif
12751 +
12752 + return copy_user_generic((__force_kernel void *)dst,
12753 + (__force_kernel const void *)src, size);
12754 }
12755 }
12756
12757 @@ -219,35 +314,72 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
12758 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
12759
12760 static __must_check __always_inline int
12761 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
12762 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
12763 {
12764 - return copy_user_generic(dst, (__force const void *)src, size);
12765 + if (size > INT_MAX)
12766 + return size;
12767 +
12768 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12769 + if (!__access_ok(VERIFY_READ, src, size))
12770 + return size;
12771 +
12772 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12773 + src += PAX_USER_SHADOW_BASE;
12774 +#endif
12775 +
12776 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12777 }
12778
12779 -static __must_check __always_inline int
12780 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
12781 +static __must_check __always_inline unsigned long
12782 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
12783 {
12784 - return copy_user_generic((__force void *)dst, src, size);
12785 + if (size > INT_MAX)
12786 + return size;
12787 +
12788 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12789 + if (!__access_ok(VERIFY_WRITE, dst, size))
12790 + return size;
12791 +
12792 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12793 + dst += PAX_USER_SHADOW_BASE;
12794 +#endif
12795 +
12796 + return copy_user_generic((__force_kernel void *)dst, src, size);
12797 }
12798
12799 -extern long __copy_user_nocache(void *dst, const void __user *src,
12800 - unsigned size, int zerorest);
12801 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
12802 + unsigned long size, int zerorest);
12803
12804 -static inline int
12805 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
12806 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
12807 {
12808 might_sleep();
12809 +
12810 + if (size > INT_MAX)
12811 + return size;
12812 +
12813 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12814 + if (!__access_ok(VERIFY_READ, src, size))
12815 + return size;
12816 +#endif
12817 +
12818 return __copy_user_nocache(dst, src, size, 1);
12819 }
12820
12821 -static inline int
12822 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12823 - unsigned size)
12824 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12825 + unsigned long size)
12826 {
12827 + if (size > INT_MAX)
12828 + return size;
12829 +
12830 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12831 + if (!__access_ok(VERIFY_READ, src, size))
12832 + return size;
12833 +#endif
12834 +
12835 return __copy_user_nocache(dst, src, size, 0);
12836 }
12837
12838 -unsigned long
12839 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
12840 +extern unsigned long
12841 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
12842
12843 #endif /* _ASM_X86_UACCESS_64_H */
12844 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
12845 index bb05228..d763d5b 100644
12846 --- a/arch/x86/include/asm/vdso.h
12847 +++ b/arch/x86/include/asm/vdso.h
12848 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
12849 #define VDSO32_SYMBOL(base, name) \
12850 ({ \
12851 extern const char VDSO32_##name[]; \
12852 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12853 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12854 })
12855 #endif
12856
12857 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
12858 index 1971e65..1e3559b 100644
12859 --- a/arch/x86/include/asm/x86_init.h
12860 +++ b/arch/x86/include/asm/x86_init.h
12861 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
12862 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
12863 void (*find_smp_config)(void);
12864 void (*get_smp_config)(unsigned int early);
12865 -};
12866 +} __no_const;
12867
12868 /**
12869 * struct x86_init_resources - platform specific resource related ops
12870 @@ -42,7 +42,7 @@ struct x86_init_resources {
12871 void (*probe_roms)(void);
12872 void (*reserve_resources)(void);
12873 char *(*memory_setup)(void);
12874 -};
12875 +} __no_const;
12876
12877 /**
12878 * struct x86_init_irqs - platform specific interrupt setup
12879 @@ -55,7 +55,7 @@ struct x86_init_irqs {
12880 void (*pre_vector_init)(void);
12881 void (*intr_init)(void);
12882 void (*trap_init)(void);
12883 -};
12884 +} __no_const;
12885
12886 /**
12887 * struct x86_init_oem - oem platform specific customizing functions
12888 @@ -65,7 +65,7 @@ struct x86_init_irqs {
12889 struct x86_init_oem {
12890 void (*arch_setup)(void);
12891 void (*banner)(void);
12892 -};
12893 +} __no_const;
12894
12895 /**
12896 * struct x86_init_mapping - platform specific initial kernel pagetable setup
12897 @@ -76,7 +76,7 @@ struct x86_init_oem {
12898 */
12899 struct x86_init_mapping {
12900 void (*pagetable_reserve)(u64 start, u64 end);
12901 -};
12902 +} __no_const;
12903
12904 /**
12905 * struct x86_init_paging - platform specific paging functions
12906 @@ -86,7 +86,7 @@ struct x86_init_mapping {
12907 struct x86_init_paging {
12908 void (*pagetable_setup_start)(pgd_t *base);
12909 void (*pagetable_setup_done)(pgd_t *base);
12910 -};
12911 +} __no_const;
12912
12913 /**
12914 * struct x86_init_timers - platform specific timer setup
12915 @@ -101,7 +101,7 @@ struct x86_init_timers {
12916 void (*tsc_pre_init)(void);
12917 void (*timer_init)(void);
12918 void (*wallclock_init)(void);
12919 -};
12920 +} __no_const;
12921
12922 /**
12923 * struct x86_init_iommu - platform specific iommu setup
12924 @@ -109,7 +109,7 @@ struct x86_init_timers {
12925 */
12926 struct x86_init_iommu {
12927 int (*iommu_init)(void);
12928 -};
12929 +} __no_const;
12930
12931 /**
12932 * struct x86_init_pci - platform specific pci init functions
12933 @@ -123,7 +123,7 @@ struct x86_init_pci {
12934 int (*init)(void);
12935 void (*init_irq)(void);
12936 void (*fixup_irqs)(void);
12937 -};
12938 +} __no_const;
12939
12940 /**
12941 * struct x86_init_ops - functions for platform specific setup
12942 @@ -139,7 +139,7 @@ struct x86_init_ops {
12943 struct x86_init_timers timers;
12944 struct x86_init_iommu iommu;
12945 struct x86_init_pci pci;
12946 -};
12947 +} __no_const;
12948
12949 /**
12950 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
12951 @@ -147,7 +147,7 @@ struct x86_init_ops {
12952 */
12953 struct x86_cpuinit_ops {
12954 void (*setup_percpu_clockev)(void);
12955 -};
12956 +} __no_const;
12957
12958 /**
12959 * struct x86_platform_ops - platform specific runtime functions
12960 @@ -169,7 +169,7 @@ struct x86_platform_ops {
12961 void (*nmi_init)(void);
12962 unsigned char (*get_nmi_reason)(void);
12963 int (*i8042_detect)(void);
12964 -};
12965 +} __no_const;
12966
12967 struct pci_dev;
12968
12969 @@ -177,7 +177,7 @@ struct x86_msi_ops {
12970 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
12971 void (*teardown_msi_irq)(unsigned int irq);
12972 void (*teardown_msi_irqs)(struct pci_dev *dev);
12973 -};
12974 +} __no_const;
12975
12976 extern struct x86_init_ops x86_init;
12977 extern struct x86_cpuinit_ops x86_cpuinit;
12978 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
12979 index c6ce245..ffbdab7 100644
12980 --- a/arch/x86/include/asm/xsave.h
12981 +++ b/arch/x86/include/asm/xsave.h
12982 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
12983 {
12984 int err;
12985
12986 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12987 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
12988 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
12989 +#endif
12990 +
12991 /*
12992 * Clear the xsave header first, so that reserved fields are
12993 * initialized to zero.
12994 @@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
12995 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
12996 {
12997 int err;
12998 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
12999 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13000 u32 lmask = mask;
13001 u32 hmask = mask >> 32;
13002
13003 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13004 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13005 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13006 +#endif
13007 +
13008 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13009 "2:\n"
13010 ".section .fixup,\"ax\"\n"
13011 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13012 index 6a564ac..9b1340c 100644
13013 --- a/arch/x86/kernel/acpi/realmode/Makefile
13014 +++ b/arch/x86/kernel/acpi/realmode/Makefile
13015 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13016 $(call cc-option, -fno-stack-protector) \
13017 $(call cc-option, -mpreferred-stack-boundary=2)
13018 KBUILD_CFLAGS += $(call cc-option, -m32)
13019 +ifdef CONSTIFY_PLUGIN
13020 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13021 +endif
13022 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13023 GCOV_PROFILE := n
13024
13025 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13026 index b4fd836..4358fe3 100644
13027 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
13028 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13029 @@ -108,6 +108,9 @@ wakeup_code:
13030 /* Do any other stuff... */
13031
13032 #ifndef CONFIG_64BIT
13033 + /* Recheck NX bit overrides (64bit path does this in trampoline */
13034 + call verify_cpu
13035 +
13036 /* This could also be done in C code... */
13037 movl pmode_cr3, %eax
13038 movl %eax, %cr3
13039 @@ -131,6 +134,7 @@ wakeup_code:
13040 movl pmode_cr0, %eax
13041 movl %eax, %cr0
13042 jmp pmode_return
13043 +# include "../../verify_cpu.S"
13044 #else
13045 pushw $0
13046 pushw trampoline_segment
13047 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13048 index 103b6ab..2004d0a 100644
13049 --- a/arch/x86/kernel/acpi/sleep.c
13050 +++ b/arch/x86/kernel/acpi/sleep.c
13051 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
13052 header->trampoline_segment = trampoline_address() >> 4;
13053 #ifdef CONFIG_SMP
13054 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13055 +
13056 + pax_open_kernel();
13057 early_gdt_descr.address =
13058 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13059 + pax_close_kernel();
13060 +
13061 initial_gs = per_cpu_offset(smp_processor_id());
13062 #endif
13063 initial_code = (unsigned long)wakeup_long64;
13064 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13065 index 13ab720..95d5442 100644
13066 --- a/arch/x86/kernel/acpi/wakeup_32.S
13067 +++ b/arch/x86/kernel/acpi/wakeup_32.S
13068 @@ -30,13 +30,11 @@ wakeup_pmode_return:
13069 # and restore the stack ... but you need gdt for this to work
13070 movl saved_context_esp, %esp
13071
13072 - movl %cs:saved_magic, %eax
13073 - cmpl $0x12345678, %eax
13074 + cmpl $0x12345678, saved_magic
13075 jne bogus_magic
13076
13077 # jump to place where we left off
13078 - movl saved_eip, %eax
13079 - jmp *%eax
13080 + jmp *(saved_eip)
13081
13082 bogus_magic:
13083 jmp bogus_magic
13084 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13085 index 1f84794..e23f862 100644
13086 --- a/arch/x86/kernel/alternative.c
13087 +++ b/arch/x86/kernel/alternative.c
13088 @@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
13089 */
13090 for (a = start; a < end; a++) {
13091 instr = (u8 *)&a->instr_offset + a->instr_offset;
13092 +
13093 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13094 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13095 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
13096 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13097 +#endif
13098 +
13099 replacement = (u8 *)&a->repl_offset + a->repl_offset;
13100 BUG_ON(a->replacementlen > a->instrlen);
13101 BUG_ON(a->instrlen > sizeof(insnbuf));
13102 @@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
13103 for (poff = start; poff < end; poff++) {
13104 u8 *ptr = (u8 *)poff + *poff;
13105
13106 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13107 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13108 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13109 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13110 +#endif
13111 +
13112 if (!*poff || ptr < text || ptr >= text_end)
13113 continue;
13114 /* turn DS segment override prefix into lock prefix */
13115 - if (*ptr == 0x3e)
13116 + if (*ktla_ktva(ptr) == 0x3e)
13117 text_poke(ptr, ((unsigned char []){0xf0}), 1);
13118 };
13119 mutex_unlock(&text_mutex);
13120 @@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
13121 for (poff = start; poff < end; poff++) {
13122 u8 *ptr = (u8 *)poff + *poff;
13123
13124 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13125 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13126 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13127 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13128 +#endif
13129 +
13130 if (!*poff || ptr < text || ptr >= text_end)
13131 continue;
13132 /* turn lock prefix into DS segment override prefix */
13133 - if (*ptr == 0xf0)
13134 + if (*ktla_ktva(ptr) == 0xf0)
13135 text_poke(ptr, ((unsigned char []){0x3E}), 1);
13136 };
13137 mutex_unlock(&text_mutex);
13138 @@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13139
13140 BUG_ON(p->len > MAX_PATCH_LEN);
13141 /* prep the buffer with the original instructions */
13142 - memcpy(insnbuf, p->instr, p->len);
13143 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13144 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13145 (unsigned long)p->instr, p->len);
13146
13147 @@ -568,7 +587,7 @@ void __init alternative_instructions(void)
13148 if (smp_alt_once)
13149 free_init_pages("SMP alternatives",
13150 (unsigned long)__smp_locks,
13151 - (unsigned long)__smp_locks_end);
13152 + PAGE_ALIGN((unsigned long)__smp_locks_end));
13153
13154 restart_nmi();
13155 }
13156 @@ -585,13 +604,17 @@ void __init alternative_instructions(void)
13157 * instructions. And on the local CPU you need to be protected again NMI or MCE
13158 * handlers seeing an inconsistent instruction while you patch.
13159 */
13160 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
13161 +void *__kprobes text_poke_early(void *addr, const void *opcode,
13162 size_t len)
13163 {
13164 unsigned long flags;
13165 local_irq_save(flags);
13166 - memcpy(addr, opcode, len);
13167 +
13168 + pax_open_kernel();
13169 + memcpy(ktla_ktva(addr), opcode, len);
13170 sync_core();
13171 + pax_close_kernel();
13172 +
13173 local_irq_restore(flags);
13174 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13175 that causes hangs on some VIA CPUs. */
13176 @@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
13177 */
13178 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13179 {
13180 - unsigned long flags;
13181 - char *vaddr;
13182 + unsigned char *vaddr = ktla_ktva(addr);
13183 struct page *pages[2];
13184 - int i;
13185 + size_t i;
13186
13187 if (!core_kernel_text((unsigned long)addr)) {
13188 - pages[0] = vmalloc_to_page(addr);
13189 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13190 + pages[0] = vmalloc_to_page(vaddr);
13191 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13192 } else {
13193 - pages[0] = virt_to_page(addr);
13194 + pages[0] = virt_to_page(vaddr);
13195 WARN_ON(!PageReserved(pages[0]));
13196 - pages[1] = virt_to_page(addr + PAGE_SIZE);
13197 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13198 }
13199 BUG_ON(!pages[0]);
13200 - local_irq_save(flags);
13201 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13202 - if (pages[1])
13203 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13204 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13205 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13206 - clear_fixmap(FIX_TEXT_POKE0);
13207 - if (pages[1])
13208 - clear_fixmap(FIX_TEXT_POKE1);
13209 - local_flush_tlb();
13210 - sync_core();
13211 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
13212 - that causes hangs on some VIA CPUs. */
13213 + text_poke_early(addr, opcode, len);
13214 for (i = 0; i < len; i++)
13215 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13216 - local_irq_restore(flags);
13217 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13218 return addr;
13219 }
13220
13221 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13222 index f98d84c..e402a69 100644
13223 --- a/arch/x86/kernel/apic/apic.c
13224 +++ b/arch/x86/kernel/apic/apic.c
13225 @@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
13226 /*
13227 * Debug level, exported for io_apic.c
13228 */
13229 -unsigned int apic_verbosity;
13230 +int apic_verbosity;
13231
13232 int pic_mode;
13233
13234 @@ -1853,7 +1853,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13235 apic_write(APIC_ESR, 0);
13236 v1 = apic_read(APIC_ESR);
13237 ack_APIC_irq();
13238 - atomic_inc(&irq_err_count);
13239 + atomic_inc_unchecked(&irq_err_count);
13240
13241 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
13242 smp_processor_id(), v0 , v1);
13243 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13244 index 6d939d7..0697fcc 100644
13245 --- a/arch/x86/kernel/apic/io_apic.c
13246 +++ b/arch/x86/kernel/apic/io_apic.c
13247 @@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13248 }
13249 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13250
13251 -void lock_vector_lock(void)
13252 +void lock_vector_lock(void) __acquires(vector_lock)
13253 {
13254 /* Used to the online set of cpus does not change
13255 * during assign_irq_vector.
13256 @@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
13257 raw_spin_lock(&vector_lock);
13258 }
13259
13260 -void unlock_vector_lock(void)
13261 +void unlock_vector_lock(void) __releases(vector_lock)
13262 {
13263 raw_spin_unlock(&vector_lock);
13264 }
13265 @@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
13266 ack_APIC_irq();
13267 }
13268
13269 -atomic_t irq_mis_count;
13270 +atomic_unchecked_t irq_mis_count;
13271
13272 static void ack_apic_level(struct irq_data *data)
13273 {
13274 @@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
13275 * at the cpu.
13276 */
13277 if (!(v & (1 << (i & 0x1f)))) {
13278 - atomic_inc(&irq_mis_count);
13279 + atomic_inc_unchecked(&irq_mis_count);
13280
13281 eoi_ioapic_irq(irq, cfg);
13282 }
13283 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13284 index a46bd38..6b906d7 100644
13285 --- a/arch/x86/kernel/apm_32.c
13286 +++ b/arch/x86/kernel/apm_32.c
13287 @@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
13288 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13289 * even though they are called in protected mode.
13290 */
13291 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13292 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13293 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13294
13295 static const char driver_version[] = "1.16ac"; /* no spaces */
13296 @@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
13297 BUG_ON(cpu != 0);
13298 gdt = get_cpu_gdt_table(cpu);
13299 save_desc_40 = gdt[0x40 / 8];
13300 +
13301 + pax_open_kernel();
13302 gdt[0x40 / 8] = bad_bios_desc;
13303 + pax_close_kernel();
13304
13305 apm_irq_save(flags);
13306 APM_DO_SAVE_SEGS;
13307 @@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
13308 &call->esi);
13309 APM_DO_RESTORE_SEGS;
13310 apm_irq_restore(flags);
13311 +
13312 + pax_open_kernel();
13313 gdt[0x40 / 8] = save_desc_40;
13314 + pax_close_kernel();
13315 +
13316 put_cpu();
13317
13318 return call->eax & 0xff;
13319 @@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
13320 BUG_ON(cpu != 0);
13321 gdt = get_cpu_gdt_table(cpu);
13322 save_desc_40 = gdt[0x40 / 8];
13323 +
13324 + pax_open_kernel();
13325 gdt[0x40 / 8] = bad_bios_desc;
13326 + pax_close_kernel();
13327
13328 apm_irq_save(flags);
13329 APM_DO_SAVE_SEGS;
13330 @@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
13331 &call->eax);
13332 APM_DO_RESTORE_SEGS;
13333 apm_irq_restore(flags);
13334 +
13335 + pax_open_kernel();
13336 gdt[0x40 / 8] = save_desc_40;
13337 + pax_close_kernel();
13338 +
13339 put_cpu();
13340 return error;
13341 }
13342 @@ -2347,12 +2361,15 @@ static int __init apm_init(void)
13343 * code to that CPU.
13344 */
13345 gdt = get_cpu_gdt_table(0);
13346 +
13347 + pax_open_kernel();
13348 set_desc_base(&gdt[APM_CS >> 3],
13349 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13350 set_desc_base(&gdt[APM_CS_16 >> 3],
13351 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13352 set_desc_base(&gdt[APM_DS >> 3],
13353 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13354 + pax_close_kernel();
13355
13356 proc_create("apm", 0, NULL, &apm_file_ops);
13357
13358 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
13359 index 4f13faf..87db5d2 100644
13360 --- a/arch/x86/kernel/asm-offsets.c
13361 +++ b/arch/x86/kernel/asm-offsets.c
13362 @@ -33,6 +33,8 @@ void common(void) {
13363 OFFSET(TI_status, thread_info, status);
13364 OFFSET(TI_addr_limit, thread_info, addr_limit);
13365 OFFSET(TI_preempt_count, thread_info, preempt_count);
13366 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13367 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13368
13369 BLANK();
13370 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
13371 @@ -53,8 +55,26 @@ void common(void) {
13372 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13373 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13374 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13375 +
13376 +#ifdef CONFIG_PAX_KERNEXEC
13377 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13378 #endif
13379
13380 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13381 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13382 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13383 +#ifdef CONFIG_X86_64
13384 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13385 +#endif
13386 +#endif
13387 +
13388 +#endif
13389 +
13390 + BLANK();
13391 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13392 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13393 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13394 +
13395 #ifdef CONFIG_XEN
13396 BLANK();
13397 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13398 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13399 index e72a119..6e2955d 100644
13400 --- a/arch/x86/kernel/asm-offsets_64.c
13401 +++ b/arch/x86/kernel/asm-offsets_64.c
13402 @@ -69,6 +69,7 @@ int main(void)
13403 BLANK();
13404 #undef ENTRY
13405
13406 + DEFINE(TSS_size, sizeof(struct tss_struct));
13407 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
13408 BLANK();
13409
13410 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
13411 index 25f24dc..4094a7f 100644
13412 --- a/arch/x86/kernel/cpu/Makefile
13413 +++ b/arch/x86/kernel/cpu/Makefile
13414 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
13415 CFLAGS_REMOVE_perf_event.o = -pg
13416 endif
13417
13418 -# Make sure load_percpu_segment has no stackprotector
13419 -nostackp := $(call cc-option, -fno-stack-protector)
13420 -CFLAGS_common.o := $(nostackp)
13421 -
13422 obj-y := intel_cacheinfo.o scattered.o topology.o
13423 obj-y += proc.o capflags.o powerflags.o common.o
13424 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
13425 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
13426 index 0bab2b1..d0a1bf8 100644
13427 --- a/arch/x86/kernel/cpu/amd.c
13428 +++ b/arch/x86/kernel/cpu/amd.c
13429 @@ -664,7 +664,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
13430 unsigned int size)
13431 {
13432 /* AMD errata T13 (order #21922) */
13433 - if ((c->x86 == 6)) {
13434 + if (c->x86 == 6) {
13435 /* Duron Rev A0 */
13436 if (c->x86_model == 3 && c->x86_mask == 0)
13437 size = 64;
13438 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
13439 index aa003b1..47ea638 100644
13440 --- a/arch/x86/kernel/cpu/common.c
13441 +++ b/arch/x86/kernel/cpu/common.c
13442 @@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
13443
13444 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
13445
13446 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
13447 -#ifdef CONFIG_X86_64
13448 - /*
13449 - * We need valid kernel segments for data and code in long mode too
13450 - * IRET will check the segment types kkeil 2000/10/28
13451 - * Also sysret mandates a special GDT layout
13452 - *
13453 - * TLS descriptors are currently at a different place compared to i386.
13454 - * Hopefully nobody expects them at a fixed place (Wine?)
13455 - */
13456 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
13457 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
13458 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
13459 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
13460 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
13461 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
13462 -#else
13463 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
13464 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13465 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
13466 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
13467 - /*
13468 - * Segments used for calling PnP BIOS have byte granularity.
13469 - * They code segments and data segments have fixed 64k limits,
13470 - * the transfer segment sizes are set at run time.
13471 - */
13472 - /* 32-bit code */
13473 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13474 - /* 16-bit code */
13475 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13476 - /* 16-bit data */
13477 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
13478 - /* 16-bit data */
13479 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
13480 - /* 16-bit data */
13481 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
13482 - /*
13483 - * The APM segments have byte granularity and their bases
13484 - * are set at run time. All have 64k limits.
13485 - */
13486 - /* 32-bit code */
13487 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13488 - /* 16-bit code */
13489 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13490 - /* data */
13491 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
13492 -
13493 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13494 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13495 - GDT_STACK_CANARY_INIT
13496 -#endif
13497 -} };
13498 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
13499 -
13500 static int __init x86_xsave_setup(char *s)
13501 {
13502 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
13503 @@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
13504 {
13505 struct desc_ptr gdt_descr;
13506
13507 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
13508 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13509 gdt_descr.size = GDT_SIZE - 1;
13510 load_gdt(&gdt_descr);
13511 /* Reload the per-cpu base */
13512 @@ -844,6 +790,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
13513 /* Filter out anything that depends on CPUID levels we don't have */
13514 filter_cpuid_features(c, true);
13515
13516 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
13517 + setup_clear_cpu_cap(X86_FEATURE_SEP);
13518 +#endif
13519 +
13520 /* If the model name is still unset, do table lookup. */
13521 if (!c->x86_model_id[0]) {
13522 const char *p;
13523 @@ -1024,6 +974,9 @@ static __init int setup_disablecpuid(char *arg)
13524 }
13525 __setup("clearcpuid=", setup_disablecpuid);
13526
13527 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
13528 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
13529 +
13530 #ifdef CONFIG_X86_64
13531 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
13532
13533 @@ -1039,7 +992,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
13534 EXPORT_PER_CPU_SYMBOL(current_task);
13535
13536 DEFINE_PER_CPU(unsigned long, kernel_stack) =
13537 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
13538 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
13539 EXPORT_PER_CPU_SYMBOL(kernel_stack);
13540
13541 DEFINE_PER_CPU(char *, irq_stack_ptr) =
13542 @@ -1104,7 +1057,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
13543 {
13544 memset(regs, 0, sizeof(struct pt_regs));
13545 regs->fs = __KERNEL_PERCPU;
13546 - regs->gs = __KERNEL_STACK_CANARY;
13547 + savesegment(gs, regs->gs);
13548
13549 return regs;
13550 }
13551 @@ -1159,7 +1112,7 @@ void __cpuinit cpu_init(void)
13552 int i;
13553
13554 cpu = stack_smp_processor_id();
13555 - t = &per_cpu(init_tss, cpu);
13556 + t = init_tss + cpu;
13557 oist = &per_cpu(orig_ist, cpu);
13558
13559 #ifdef CONFIG_NUMA
13560 @@ -1185,7 +1138,7 @@ void __cpuinit cpu_init(void)
13561 switch_to_new_gdt(cpu);
13562 loadsegment(fs, 0);
13563
13564 - load_idt((const struct desc_ptr *)&idt_descr);
13565 + load_idt(&idt_descr);
13566
13567 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
13568 syscall_init();
13569 @@ -1194,7 +1147,6 @@ void __cpuinit cpu_init(void)
13570 wrmsrl(MSR_KERNEL_GS_BASE, 0);
13571 barrier();
13572
13573 - x86_configure_nx();
13574 if (cpu != 0)
13575 enable_x2apic();
13576
13577 @@ -1248,7 +1200,7 @@ void __cpuinit cpu_init(void)
13578 {
13579 int cpu = smp_processor_id();
13580 struct task_struct *curr = current;
13581 - struct tss_struct *t = &per_cpu(init_tss, cpu);
13582 + struct tss_struct *t = init_tss + cpu;
13583 struct thread_struct *thread = &curr->thread;
13584
13585 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
13586 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
13587 index 5231312..a78a987 100644
13588 --- a/arch/x86/kernel/cpu/intel.c
13589 +++ b/arch/x86/kernel/cpu/intel.c
13590 @@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
13591 * Update the IDT descriptor and reload the IDT so that
13592 * it uses the read-only mapped virtual address.
13593 */
13594 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
13595 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
13596 load_idt(&idt_descr);
13597 }
13598 #endif
13599 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
13600 index 2af127d..8ff7ac0 100644
13601 --- a/arch/x86/kernel/cpu/mcheck/mce.c
13602 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
13603 @@ -42,6 +42,7 @@
13604 #include <asm/processor.h>
13605 #include <asm/mce.h>
13606 #include <asm/msr.h>
13607 +#include <asm/local.h>
13608
13609 #include "mce-internal.h"
13610
13611 @@ -202,7 +203,7 @@ static void print_mce(struct mce *m)
13612 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
13613 m->cs, m->ip);
13614
13615 - if (m->cs == __KERNEL_CS)
13616 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
13617 print_symbol("{%s}", m->ip);
13618 pr_cont("\n");
13619 }
13620 @@ -235,10 +236,10 @@ static void print_mce(struct mce *m)
13621
13622 #define PANIC_TIMEOUT 5 /* 5 seconds */
13623
13624 -static atomic_t mce_paniced;
13625 +static atomic_unchecked_t mce_paniced;
13626
13627 static int fake_panic;
13628 -static atomic_t mce_fake_paniced;
13629 +static atomic_unchecked_t mce_fake_paniced;
13630
13631 /* Panic in progress. Enable interrupts and wait for final IPI */
13632 static void wait_for_panic(void)
13633 @@ -262,7 +263,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13634 /*
13635 * Make sure only one CPU runs in machine check panic
13636 */
13637 - if (atomic_inc_return(&mce_paniced) > 1)
13638 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
13639 wait_for_panic();
13640 barrier();
13641
13642 @@ -270,7 +271,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13643 console_verbose();
13644 } else {
13645 /* Don't log too much for fake panic */
13646 - if (atomic_inc_return(&mce_fake_paniced) > 1)
13647 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
13648 return;
13649 }
13650 /* First print corrected ones that are still unlogged */
13651 @@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
13652 * might have been modified by someone else.
13653 */
13654 rmb();
13655 - if (atomic_read(&mce_paniced))
13656 + if (atomic_read_unchecked(&mce_paniced))
13657 wait_for_panic();
13658 if (!monarch_timeout)
13659 goto out;
13660 @@ -1398,7 +1399,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
13661 }
13662
13663 /* Call the installed machine check handler for this CPU setup. */
13664 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
13665 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
13666 unexpected_machine_check;
13667
13668 /*
13669 @@ -1421,7 +1422,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
13670 return;
13671 }
13672
13673 + pax_open_kernel();
13674 machine_check_vector = do_machine_check;
13675 + pax_close_kernel();
13676
13677 __mcheck_cpu_init_generic();
13678 __mcheck_cpu_init_vendor(c);
13679 @@ -1435,7 +1438,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
13680 */
13681
13682 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
13683 -static int mce_chrdev_open_count; /* #times opened */
13684 +static local_t mce_chrdev_open_count; /* #times opened */
13685 static int mce_chrdev_open_exclu; /* already open exclusive? */
13686
13687 static int mce_chrdev_open(struct inode *inode, struct file *file)
13688 @@ -1443,7 +1446,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
13689 spin_lock(&mce_chrdev_state_lock);
13690
13691 if (mce_chrdev_open_exclu ||
13692 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
13693 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
13694 spin_unlock(&mce_chrdev_state_lock);
13695
13696 return -EBUSY;
13697 @@ -1451,7 +1454,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
13698
13699 if (file->f_flags & O_EXCL)
13700 mce_chrdev_open_exclu = 1;
13701 - mce_chrdev_open_count++;
13702 + local_inc(&mce_chrdev_open_count);
13703
13704 spin_unlock(&mce_chrdev_state_lock);
13705
13706 @@ -1462,7 +1465,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
13707 {
13708 spin_lock(&mce_chrdev_state_lock);
13709
13710 - mce_chrdev_open_count--;
13711 + local_dec(&mce_chrdev_open_count);
13712 mce_chrdev_open_exclu = 0;
13713
13714 spin_unlock(&mce_chrdev_state_lock);
13715 @@ -2171,7 +2174,7 @@ struct dentry *mce_get_debugfs_dir(void)
13716 static void mce_reset(void)
13717 {
13718 cpu_missing = 0;
13719 - atomic_set(&mce_fake_paniced, 0);
13720 + atomic_set_unchecked(&mce_fake_paniced, 0);
13721 atomic_set(&mce_executing, 0);
13722 atomic_set(&mce_callin, 0);
13723 atomic_set(&global_nwo, 0);
13724 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
13725 index 5c0e653..0882b0a 100644
13726 --- a/arch/x86/kernel/cpu/mcheck/p5.c
13727 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
13728 @@ -12,6 +12,7 @@
13729 #include <asm/system.h>
13730 #include <asm/mce.h>
13731 #include <asm/msr.h>
13732 +#include <asm/pgtable.h>
13733
13734 /* By default disabled */
13735 int mce_p5_enabled __read_mostly;
13736 @@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
13737 if (!cpu_has(c, X86_FEATURE_MCE))
13738 return;
13739
13740 + pax_open_kernel();
13741 machine_check_vector = pentium_machine_check;
13742 + pax_close_kernel();
13743 /* Make sure the vector pointer is visible before we enable MCEs: */
13744 wmb();
13745
13746 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
13747 index 54060f5..c1a7577 100644
13748 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
13749 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
13750 @@ -11,6 +11,7 @@
13751 #include <asm/system.h>
13752 #include <asm/mce.h>
13753 #include <asm/msr.h>
13754 +#include <asm/pgtable.h>
13755
13756 /* Machine check handler for WinChip C6: */
13757 static void winchip_machine_check(struct pt_regs *regs, long error_code)
13758 @@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
13759 {
13760 u32 lo, hi;
13761
13762 + pax_open_kernel();
13763 machine_check_vector = winchip_machine_check;
13764 + pax_close_kernel();
13765 /* Make sure the vector pointer is visible before we enable MCEs: */
13766 wmb();
13767
13768 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
13769 index 6b96110..0da73eb 100644
13770 --- a/arch/x86/kernel/cpu/mtrr/main.c
13771 +++ b/arch/x86/kernel/cpu/mtrr/main.c
13772 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
13773 u64 size_or_mask, size_and_mask;
13774 static bool mtrr_aps_delayed_init;
13775
13776 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
13777 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
13778
13779 const struct mtrr_ops *mtrr_if;
13780
13781 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
13782 index df5e41f..816c719 100644
13783 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
13784 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
13785 @@ -25,7 +25,7 @@ struct mtrr_ops {
13786 int (*validate_add_page)(unsigned long base, unsigned long size,
13787 unsigned int type);
13788 int (*have_wrcomb)(void);
13789 -};
13790 +} __do_const;
13791
13792 extern int generic_get_free_region(unsigned long base, unsigned long size,
13793 int replace_reg);
13794 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
13795 index 2bda212..78cc605 100644
13796 --- a/arch/x86/kernel/cpu/perf_event.c
13797 +++ b/arch/x86/kernel/cpu/perf_event.c
13798 @@ -1529,7 +1529,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
13799 break;
13800
13801 perf_callchain_store(entry, frame.return_address);
13802 - fp = frame.next_frame;
13803 + fp = (const void __force_user *)frame.next_frame;
13804 }
13805 }
13806
13807 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
13808 index 13ad899..f642b9a 100644
13809 --- a/arch/x86/kernel/crash.c
13810 +++ b/arch/x86/kernel/crash.c
13811 @@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
13812 {
13813 #ifdef CONFIG_X86_32
13814 struct pt_regs fixed_regs;
13815 -#endif
13816
13817 -#ifdef CONFIG_X86_32
13818 - if (!user_mode_vm(regs)) {
13819 + if (!user_mode(regs)) {
13820 crash_fixup_ss_esp(&fixed_regs, regs);
13821 regs = &fixed_regs;
13822 }
13823 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
13824 index 37250fe..bf2ec74 100644
13825 --- a/arch/x86/kernel/doublefault_32.c
13826 +++ b/arch/x86/kernel/doublefault_32.c
13827 @@ -11,7 +11,7 @@
13828
13829 #define DOUBLEFAULT_STACKSIZE (1024)
13830 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
13831 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
13832 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
13833
13834 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
13835
13836 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
13837 unsigned long gdt, tss;
13838
13839 store_gdt(&gdt_desc);
13840 - gdt = gdt_desc.address;
13841 + gdt = (unsigned long)gdt_desc.address;
13842
13843 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
13844
13845 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
13846 /* 0x2 bit is always set */
13847 .flags = X86_EFLAGS_SF | 0x2,
13848 .sp = STACK_START,
13849 - .es = __USER_DS,
13850 + .es = __KERNEL_DS,
13851 .cs = __KERNEL_CS,
13852 .ss = __KERNEL_DS,
13853 - .ds = __USER_DS,
13854 + .ds = __KERNEL_DS,
13855 .fs = __KERNEL_PERCPU,
13856
13857 .__cr3 = __pa_nodebug(swapper_pg_dir),
13858 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
13859 index 1aae78f..aab3a3d 100644
13860 --- a/arch/x86/kernel/dumpstack.c
13861 +++ b/arch/x86/kernel/dumpstack.c
13862 @@ -2,6 +2,9 @@
13863 * Copyright (C) 1991, 1992 Linus Torvalds
13864 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
13865 */
13866 +#ifdef CONFIG_GRKERNSEC_HIDESYM
13867 +#define __INCLUDED_BY_HIDESYM 1
13868 +#endif
13869 #include <linux/kallsyms.h>
13870 #include <linux/kprobes.h>
13871 #include <linux/uaccess.h>
13872 @@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
13873 static void
13874 print_ftrace_graph_addr(unsigned long addr, void *data,
13875 const struct stacktrace_ops *ops,
13876 - struct thread_info *tinfo, int *graph)
13877 + struct task_struct *task, int *graph)
13878 {
13879 - struct task_struct *task = tinfo->task;
13880 unsigned long ret_addr;
13881 int index = task->curr_ret_stack;
13882
13883 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
13884 static inline void
13885 print_ftrace_graph_addr(unsigned long addr, void *data,
13886 const struct stacktrace_ops *ops,
13887 - struct thread_info *tinfo, int *graph)
13888 + struct task_struct *task, int *graph)
13889 { }
13890 #endif
13891
13892 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
13893 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
13894 */
13895
13896 -static inline int valid_stack_ptr(struct thread_info *tinfo,
13897 - void *p, unsigned int size, void *end)
13898 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
13899 {
13900 - void *t = tinfo;
13901 if (end) {
13902 if (p < end && p >= (end-THREAD_SIZE))
13903 return 1;
13904 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
13905 }
13906
13907 unsigned long
13908 -print_context_stack(struct thread_info *tinfo,
13909 +print_context_stack(struct task_struct *task, void *stack_start,
13910 unsigned long *stack, unsigned long bp,
13911 const struct stacktrace_ops *ops, void *data,
13912 unsigned long *end, int *graph)
13913 {
13914 struct stack_frame *frame = (struct stack_frame *)bp;
13915
13916 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
13917 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
13918 unsigned long addr;
13919
13920 addr = *stack;
13921 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
13922 } else {
13923 ops->address(data, addr, 0);
13924 }
13925 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
13926 + print_ftrace_graph_addr(addr, data, ops, task, graph);
13927 }
13928 stack++;
13929 }
13930 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
13931 EXPORT_SYMBOL_GPL(print_context_stack);
13932
13933 unsigned long
13934 -print_context_stack_bp(struct thread_info *tinfo,
13935 +print_context_stack_bp(struct task_struct *task, void *stack_start,
13936 unsigned long *stack, unsigned long bp,
13937 const struct stacktrace_ops *ops, void *data,
13938 unsigned long *end, int *graph)
13939 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
13940 struct stack_frame *frame = (struct stack_frame *)bp;
13941 unsigned long *ret_addr = &frame->return_address;
13942
13943 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
13944 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
13945 unsigned long addr = *ret_addr;
13946
13947 if (!__kernel_text_address(addr))
13948 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
13949 ops->address(data, addr, 1);
13950 frame = frame->next_frame;
13951 ret_addr = &frame->return_address;
13952 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
13953 + print_ftrace_graph_addr(addr, data, ops, task, graph);
13954 }
13955
13956 return (unsigned long)frame;
13957 @@ -186,7 +186,7 @@ void dump_stack(void)
13958
13959 bp = stack_frame(current, NULL);
13960 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
13961 - current->pid, current->comm, print_tainted(),
13962 + task_pid_nr(current), current->comm, print_tainted(),
13963 init_utsname()->release,
13964 (int)strcspn(init_utsname()->version, " "),
13965 init_utsname()->version);
13966 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
13967 }
13968 EXPORT_SYMBOL_GPL(oops_begin);
13969
13970 +extern void gr_handle_kernel_exploit(void);
13971 +
13972 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
13973 {
13974 if (regs && kexec_should_crash(current))
13975 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
13976 panic("Fatal exception in interrupt");
13977 if (panic_on_oops)
13978 panic("Fatal exception");
13979 - do_exit(signr);
13980 +
13981 + gr_handle_kernel_exploit();
13982 +
13983 + do_group_exit(signr);
13984 }
13985
13986 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
13987 @@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
13988
13989 show_registers(regs);
13990 #ifdef CONFIG_X86_32
13991 - if (user_mode_vm(regs)) {
13992 + if (user_mode(regs)) {
13993 sp = regs->sp;
13994 ss = regs->ss & 0xffff;
13995 } else {
13996 @@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err)
13997 unsigned long flags = oops_begin();
13998 int sig = SIGSEGV;
13999
14000 - if (!user_mode_vm(regs))
14001 + if (!user_mode(regs))
14002 report_bug(regs->ip, regs);
14003
14004 if (__die(str, regs, err))
14005 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14006 index c99f9ed..2a15d80 100644
14007 --- a/arch/x86/kernel/dumpstack_32.c
14008 +++ b/arch/x86/kernel/dumpstack_32.c
14009 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14010 bp = stack_frame(task, regs);
14011
14012 for (;;) {
14013 - struct thread_info *context;
14014 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14015
14016 - context = (struct thread_info *)
14017 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14018 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
14019 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14020
14021 - stack = (unsigned long *)context->previous_esp;
14022 - if (!stack)
14023 + if (stack_start == task_stack_page(task))
14024 break;
14025 + stack = *(unsigned long **)stack_start;
14026 if (ops->stack(data, "IRQ") < 0)
14027 break;
14028 touch_nmi_watchdog();
14029 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
14030 * When in-kernel, we also print out the stack and code at the
14031 * time of the fault..
14032 */
14033 - if (!user_mode_vm(regs)) {
14034 + if (!user_mode(regs)) {
14035 unsigned int code_prologue = code_bytes * 43 / 64;
14036 unsigned int code_len = code_bytes;
14037 unsigned char c;
14038 u8 *ip;
14039 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14040
14041 printk(KERN_EMERG "Stack:\n");
14042 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
14043
14044 printk(KERN_EMERG "Code: ");
14045
14046 - ip = (u8 *)regs->ip - code_prologue;
14047 + ip = (u8 *)regs->ip - code_prologue + cs_base;
14048 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14049 /* try starting at IP */
14050 - ip = (u8 *)regs->ip;
14051 + ip = (u8 *)regs->ip + cs_base;
14052 code_len = code_len - code_prologue + 1;
14053 }
14054 for (i = 0; i < code_len; i++, ip++) {
14055 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
14056 printk(KERN_CONT " Bad EIP value.");
14057 break;
14058 }
14059 - if (ip == (u8 *)regs->ip)
14060 + if (ip == (u8 *)regs->ip + cs_base)
14061 printk(KERN_CONT "<%02x> ", c);
14062 else
14063 printk(KERN_CONT "%02x ", c);
14064 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
14065 {
14066 unsigned short ud2;
14067
14068 + ip = ktla_ktva(ip);
14069 if (ip < PAGE_OFFSET)
14070 return 0;
14071 if (probe_kernel_address((unsigned short *)ip, ud2))
14072 @@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
14073
14074 return ud2 == 0x0b0f;
14075 }
14076 +
14077 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14078 +void pax_check_alloca(unsigned long size)
14079 +{
14080 + unsigned long sp = (unsigned long)&sp, stack_left;
14081 +
14082 + /* all kernel stacks are of the same size */
14083 + stack_left = sp & (THREAD_SIZE - 1);
14084 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14085 +}
14086 +EXPORT_SYMBOL(pax_check_alloca);
14087 +#endif
14088 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14089 index 6d728d9..279514e 100644
14090 --- a/arch/x86/kernel/dumpstack_64.c
14091 +++ b/arch/x86/kernel/dumpstack_64.c
14092 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14093 unsigned long *irq_stack_end =
14094 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14095 unsigned used = 0;
14096 - struct thread_info *tinfo;
14097 int graph = 0;
14098 unsigned long dummy;
14099 + void *stack_start;
14100
14101 if (!task)
14102 task = current;
14103 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14104 * current stack address. If the stacks consist of nested
14105 * exceptions
14106 */
14107 - tinfo = task_thread_info(task);
14108 for (;;) {
14109 char *id;
14110 unsigned long *estack_end;
14111 +
14112 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14113 &used, &id);
14114
14115 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14116 if (ops->stack(data, id) < 0)
14117 break;
14118
14119 - bp = ops->walk_stack(tinfo, stack, bp, ops,
14120 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14121 data, estack_end, &graph);
14122 ops->stack(data, "<EOE>");
14123 /*
14124 @@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14125 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
14126 if (ops->stack(data, "IRQ") < 0)
14127 break;
14128 - bp = ops->walk_stack(tinfo, stack, bp,
14129 + bp = ops->walk_stack(task, irq_stack, stack, bp,
14130 ops, data, irq_stack_end, &graph);
14131 /*
14132 * We link to the next stack (which would be
14133 @@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14134 /*
14135 * This handles the process stack:
14136 */
14137 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14138 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14139 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14140 put_cpu();
14141 }
14142 EXPORT_SYMBOL(dump_trace);
14143 @@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
14144
14145 return ud2 == 0x0b0f;
14146 }
14147 +
14148 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14149 +void pax_check_alloca(unsigned long size)
14150 +{
14151 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14152 + unsigned cpu, used;
14153 + char *id;
14154 +
14155 + /* check the process stack first */
14156 + stack_start = (unsigned long)task_stack_page(current);
14157 + stack_end = stack_start + THREAD_SIZE;
14158 + if (likely(stack_start <= sp && sp < stack_end)) {
14159 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
14160 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14161 + return;
14162 + }
14163 +
14164 + cpu = get_cpu();
14165 +
14166 + /* check the irq stacks */
14167 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14168 + stack_start = stack_end - IRQ_STACK_SIZE;
14169 + if (stack_start <= sp && sp < stack_end) {
14170 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14171 + put_cpu();
14172 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14173 + return;
14174 + }
14175 +
14176 + /* check the exception stacks */
14177 + used = 0;
14178 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14179 + stack_start = stack_end - EXCEPTION_STKSZ;
14180 + if (stack_end && stack_start <= sp && sp < stack_end) {
14181 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14182 + put_cpu();
14183 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14184 + return;
14185 + }
14186 +
14187 + put_cpu();
14188 +
14189 + /* unknown stack */
14190 + BUG();
14191 +}
14192 +EXPORT_SYMBOL(pax_check_alloca);
14193 +#endif
14194 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14195 index cd28a35..c72ed9a 100644
14196 --- a/arch/x86/kernel/early_printk.c
14197 +++ b/arch/x86/kernel/early_printk.c
14198 @@ -7,6 +7,7 @@
14199 #include <linux/pci_regs.h>
14200 #include <linux/pci_ids.h>
14201 #include <linux/errno.h>
14202 +#include <linux/sched.h>
14203 #include <asm/io.h>
14204 #include <asm/processor.h>
14205 #include <asm/fcntl.h>
14206 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14207 index f3f6f53..0841b66 100644
14208 --- a/arch/x86/kernel/entry_32.S
14209 +++ b/arch/x86/kernel/entry_32.S
14210 @@ -186,13 +186,146 @@
14211 /*CFI_REL_OFFSET gs, PT_GS*/
14212 .endm
14213 .macro SET_KERNEL_GS reg
14214 +
14215 +#ifdef CONFIG_CC_STACKPROTECTOR
14216 movl $(__KERNEL_STACK_CANARY), \reg
14217 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14218 + movl $(__USER_DS), \reg
14219 +#else
14220 + xorl \reg, \reg
14221 +#endif
14222 +
14223 movl \reg, %gs
14224 .endm
14225
14226 #endif /* CONFIG_X86_32_LAZY_GS */
14227
14228 -.macro SAVE_ALL
14229 +.macro pax_enter_kernel
14230 +#ifdef CONFIG_PAX_KERNEXEC
14231 + call pax_enter_kernel
14232 +#endif
14233 +.endm
14234 +
14235 +.macro pax_exit_kernel
14236 +#ifdef CONFIG_PAX_KERNEXEC
14237 + call pax_exit_kernel
14238 +#endif
14239 +.endm
14240 +
14241 +#ifdef CONFIG_PAX_KERNEXEC
14242 +ENTRY(pax_enter_kernel)
14243 +#ifdef CONFIG_PARAVIRT
14244 + pushl %eax
14245 + pushl %ecx
14246 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14247 + mov %eax, %esi
14248 +#else
14249 + mov %cr0, %esi
14250 +#endif
14251 + bts $16, %esi
14252 + jnc 1f
14253 + mov %cs, %esi
14254 + cmp $__KERNEL_CS, %esi
14255 + jz 3f
14256 + ljmp $__KERNEL_CS, $3f
14257 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14258 +2:
14259 +#ifdef CONFIG_PARAVIRT
14260 + mov %esi, %eax
14261 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14262 +#else
14263 + mov %esi, %cr0
14264 +#endif
14265 +3:
14266 +#ifdef CONFIG_PARAVIRT
14267 + popl %ecx
14268 + popl %eax
14269 +#endif
14270 + ret
14271 +ENDPROC(pax_enter_kernel)
14272 +
14273 +ENTRY(pax_exit_kernel)
14274 +#ifdef CONFIG_PARAVIRT
14275 + pushl %eax
14276 + pushl %ecx
14277 +#endif
14278 + mov %cs, %esi
14279 + cmp $__KERNEXEC_KERNEL_CS, %esi
14280 + jnz 2f
14281 +#ifdef CONFIG_PARAVIRT
14282 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
14283 + mov %eax, %esi
14284 +#else
14285 + mov %cr0, %esi
14286 +#endif
14287 + btr $16, %esi
14288 + ljmp $__KERNEL_CS, $1f
14289 +1:
14290 +#ifdef CONFIG_PARAVIRT
14291 + mov %esi, %eax
14292 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
14293 +#else
14294 + mov %esi, %cr0
14295 +#endif
14296 +2:
14297 +#ifdef CONFIG_PARAVIRT
14298 + popl %ecx
14299 + popl %eax
14300 +#endif
14301 + ret
14302 +ENDPROC(pax_exit_kernel)
14303 +#endif
14304 +
14305 +.macro pax_erase_kstack
14306 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14307 + call pax_erase_kstack
14308 +#endif
14309 +.endm
14310 +
14311 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14312 +/*
14313 + * ebp: thread_info
14314 + * ecx, edx: can be clobbered
14315 + */
14316 +ENTRY(pax_erase_kstack)
14317 + pushl %edi
14318 + pushl %eax
14319 +
14320 + mov TI_lowest_stack(%ebp), %edi
14321 + mov $-0xBEEF, %eax
14322 + std
14323 +
14324 +1: mov %edi, %ecx
14325 + and $THREAD_SIZE_asm - 1, %ecx
14326 + shr $2, %ecx
14327 + repne scasl
14328 + jecxz 2f
14329 +
14330 + cmp $2*16, %ecx
14331 + jc 2f
14332 +
14333 + mov $2*16, %ecx
14334 + repe scasl
14335 + jecxz 2f
14336 + jne 1b
14337 +
14338 +2: cld
14339 + mov %esp, %ecx
14340 + sub %edi, %ecx
14341 + shr $2, %ecx
14342 + rep stosl
14343 +
14344 + mov TI_task_thread_sp0(%ebp), %edi
14345 + sub $128, %edi
14346 + mov %edi, TI_lowest_stack(%ebp)
14347 +
14348 + popl %eax
14349 + popl %edi
14350 + ret
14351 +ENDPROC(pax_erase_kstack)
14352 +#endif
14353 +
14354 +.macro __SAVE_ALL _DS
14355 cld
14356 PUSH_GS
14357 pushl_cfi %fs
14358 @@ -215,7 +348,7 @@
14359 CFI_REL_OFFSET ecx, 0
14360 pushl_cfi %ebx
14361 CFI_REL_OFFSET ebx, 0
14362 - movl $(__USER_DS), %edx
14363 + movl $\_DS, %edx
14364 movl %edx, %ds
14365 movl %edx, %es
14366 movl $(__KERNEL_PERCPU), %edx
14367 @@ -223,6 +356,15 @@
14368 SET_KERNEL_GS %edx
14369 .endm
14370
14371 +.macro SAVE_ALL
14372 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
14373 + __SAVE_ALL __KERNEL_DS
14374 + pax_enter_kernel
14375 +#else
14376 + __SAVE_ALL __USER_DS
14377 +#endif
14378 +.endm
14379 +
14380 .macro RESTORE_INT_REGS
14381 popl_cfi %ebx
14382 CFI_RESTORE ebx
14383 @@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
14384 popfl_cfi
14385 jmp syscall_exit
14386 CFI_ENDPROC
14387 -END(ret_from_fork)
14388 +ENDPROC(ret_from_fork)
14389
14390 /*
14391 * Interrupt exit functions should be protected against kprobes
14392 @@ -333,7 +475,15 @@ check_userspace:
14393 movb PT_CS(%esp), %al
14394 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
14395 cmpl $USER_RPL, %eax
14396 +
14397 +#ifdef CONFIG_PAX_KERNEXEC
14398 + jae resume_userspace
14399 +
14400 + PAX_EXIT_KERNEL
14401 + jmp resume_kernel
14402 +#else
14403 jb resume_kernel # not returning to v8086 or userspace
14404 +#endif
14405
14406 ENTRY(resume_userspace)
14407 LOCKDEP_SYS_EXIT
14408 @@ -345,8 +495,8 @@ ENTRY(resume_userspace)
14409 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
14410 # int/exception return?
14411 jne work_pending
14412 - jmp restore_all
14413 -END(ret_from_exception)
14414 + jmp restore_all_pax
14415 +ENDPROC(ret_from_exception)
14416
14417 #ifdef CONFIG_PREEMPT
14418 ENTRY(resume_kernel)
14419 @@ -361,7 +511,7 @@ need_resched:
14420 jz restore_all
14421 call preempt_schedule_irq
14422 jmp need_resched
14423 -END(resume_kernel)
14424 +ENDPROC(resume_kernel)
14425 #endif
14426 CFI_ENDPROC
14427 /*
14428 @@ -395,23 +545,34 @@ sysenter_past_esp:
14429 /*CFI_REL_OFFSET cs, 0*/
14430 /*
14431 * Push current_thread_info()->sysenter_return to the stack.
14432 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
14433 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
14434 */
14435 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
14436 + pushl_cfi $0
14437 CFI_REL_OFFSET eip, 0
14438
14439 pushl_cfi %eax
14440 SAVE_ALL
14441 + GET_THREAD_INFO(%ebp)
14442 + movl TI_sysenter_return(%ebp),%ebp
14443 + movl %ebp,PT_EIP(%esp)
14444 ENABLE_INTERRUPTS(CLBR_NONE)
14445
14446 /*
14447 * Load the potential sixth argument from user stack.
14448 * Careful about security.
14449 */
14450 + movl PT_OLDESP(%esp),%ebp
14451 +
14452 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14453 + mov PT_OLDSS(%esp),%ds
14454 +1: movl %ds:(%ebp),%ebp
14455 + push %ss
14456 + pop %ds
14457 +#else
14458 cmpl $__PAGE_OFFSET-3,%ebp
14459 jae syscall_fault
14460 1: movl (%ebp),%ebp
14461 +#endif
14462 +
14463 movl %ebp,PT_EBP(%esp)
14464 .section __ex_table,"a"
14465 .align 4
14466 @@ -434,12 +595,24 @@ sysenter_do_call:
14467 testl $_TIF_ALLWORK_MASK, %ecx
14468 jne sysexit_audit
14469 sysenter_exit:
14470 +
14471 +#ifdef CONFIG_PAX_RANDKSTACK
14472 + pushl_cfi %eax
14473 + movl %esp, %eax
14474 + call pax_randomize_kstack
14475 + popl_cfi %eax
14476 +#endif
14477 +
14478 + pax_erase_kstack
14479 +
14480 /* if something modifies registers it must also disable sysexit */
14481 movl PT_EIP(%esp), %edx
14482 movl PT_OLDESP(%esp), %ecx
14483 xorl %ebp,%ebp
14484 TRACE_IRQS_ON
14485 1: mov PT_FS(%esp), %fs
14486 +2: mov PT_DS(%esp), %ds
14487 +3: mov PT_ES(%esp), %es
14488 PTGS_TO_GS
14489 ENABLE_INTERRUPTS_SYSEXIT
14490
14491 @@ -456,6 +629,9 @@ sysenter_audit:
14492 movl %eax,%edx /* 2nd arg: syscall number */
14493 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
14494 call audit_syscall_entry
14495 +
14496 + pax_erase_kstack
14497 +
14498 pushl_cfi %ebx
14499 movl PT_EAX(%esp),%eax /* reload syscall number */
14500 jmp sysenter_do_call
14501 @@ -482,11 +658,17 @@ sysexit_audit:
14502
14503 CFI_ENDPROC
14504 .pushsection .fixup,"ax"
14505 -2: movl $0,PT_FS(%esp)
14506 +4: movl $0,PT_FS(%esp)
14507 + jmp 1b
14508 +5: movl $0,PT_DS(%esp)
14509 + jmp 1b
14510 +6: movl $0,PT_ES(%esp)
14511 jmp 1b
14512 .section __ex_table,"a"
14513 .align 4
14514 - .long 1b,2b
14515 + .long 1b,4b
14516 + .long 2b,5b
14517 + .long 3b,6b
14518 .popsection
14519 PTGS_TO_GS_EX
14520 ENDPROC(ia32_sysenter_target)
14521 @@ -519,6 +701,15 @@ syscall_exit:
14522 testl $_TIF_ALLWORK_MASK, %ecx # current->work
14523 jne syscall_exit_work
14524
14525 +restore_all_pax:
14526 +
14527 +#ifdef CONFIG_PAX_RANDKSTACK
14528 + movl %esp, %eax
14529 + call pax_randomize_kstack
14530 +#endif
14531 +
14532 + pax_erase_kstack
14533 +
14534 restore_all:
14535 TRACE_IRQS_IRET
14536 restore_all_notrace:
14537 @@ -578,14 +769,34 @@ ldt_ss:
14538 * compensating for the offset by changing to the ESPFIX segment with
14539 * a base address that matches for the difference.
14540 */
14541 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
14542 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
14543 mov %esp, %edx /* load kernel esp */
14544 mov PT_OLDESP(%esp), %eax /* load userspace esp */
14545 mov %dx, %ax /* eax: new kernel esp */
14546 sub %eax, %edx /* offset (low word is 0) */
14547 +#ifdef CONFIG_SMP
14548 + movl PER_CPU_VAR(cpu_number), %ebx
14549 + shll $PAGE_SHIFT_asm, %ebx
14550 + addl $cpu_gdt_table, %ebx
14551 +#else
14552 + movl $cpu_gdt_table, %ebx
14553 +#endif
14554 shr $16, %edx
14555 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
14556 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
14557 +
14558 +#ifdef CONFIG_PAX_KERNEXEC
14559 + mov %cr0, %esi
14560 + btr $16, %esi
14561 + mov %esi, %cr0
14562 +#endif
14563 +
14564 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
14565 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
14566 +
14567 +#ifdef CONFIG_PAX_KERNEXEC
14568 + bts $16, %esi
14569 + mov %esi, %cr0
14570 +#endif
14571 +
14572 pushl_cfi $__ESPFIX_SS
14573 pushl_cfi %eax /* new kernel esp */
14574 /* Disable interrupts, but do not irqtrace this section: we
14575 @@ -614,34 +825,28 @@ work_resched:
14576 movl TI_flags(%ebp), %ecx
14577 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
14578 # than syscall tracing?
14579 - jz restore_all
14580 + jz restore_all_pax
14581 testb $_TIF_NEED_RESCHED, %cl
14582 jnz work_resched
14583
14584 work_notifysig: # deal with pending signals and
14585 # notify-resume requests
14586 + movl %esp, %eax
14587 #ifdef CONFIG_VM86
14588 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
14589 - movl %esp, %eax
14590 - jne work_notifysig_v86 # returning to kernel-space or
14591 + jz 1f # returning to kernel-space or
14592 # vm86-space
14593 - xorl %edx, %edx
14594 - call do_notify_resume
14595 - jmp resume_userspace_sig
14596
14597 - ALIGN
14598 -work_notifysig_v86:
14599 pushl_cfi %ecx # save ti_flags for do_notify_resume
14600 call save_v86_state # %eax contains pt_regs pointer
14601 popl_cfi %ecx
14602 movl %eax, %esp
14603 -#else
14604 - movl %esp, %eax
14605 +1:
14606 #endif
14607 xorl %edx, %edx
14608 call do_notify_resume
14609 jmp resume_userspace_sig
14610 -END(work_pending)
14611 +ENDPROC(work_pending)
14612
14613 # perform syscall exit tracing
14614 ALIGN
14615 @@ -649,11 +854,14 @@ syscall_trace_entry:
14616 movl $-ENOSYS,PT_EAX(%esp)
14617 movl %esp, %eax
14618 call syscall_trace_enter
14619 +
14620 + pax_erase_kstack
14621 +
14622 /* What it returned is what we'll actually use. */
14623 cmpl $(nr_syscalls), %eax
14624 jnae syscall_call
14625 jmp syscall_exit
14626 -END(syscall_trace_entry)
14627 +ENDPROC(syscall_trace_entry)
14628
14629 # perform syscall exit tracing
14630 ALIGN
14631 @@ -666,20 +874,24 @@ syscall_exit_work:
14632 movl %esp, %eax
14633 call syscall_trace_leave
14634 jmp resume_userspace
14635 -END(syscall_exit_work)
14636 +ENDPROC(syscall_exit_work)
14637 CFI_ENDPROC
14638
14639 RING0_INT_FRAME # can't unwind into user space anyway
14640 syscall_fault:
14641 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14642 + push %ss
14643 + pop %ds
14644 +#endif
14645 GET_THREAD_INFO(%ebp)
14646 movl $-EFAULT,PT_EAX(%esp)
14647 jmp resume_userspace
14648 -END(syscall_fault)
14649 +ENDPROC(syscall_fault)
14650
14651 syscall_badsys:
14652 movl $-ENOSYS,PT_EAX(%esp)
14653 jmp resume_userspace
14654 -END(syscall_badsys)
14655 +ENDPROC(syscall_badsys)
14656 CFI_ENDPROC
14657 /*
14658 * End of kprobes section
14659 @@ -753,6 +965,36 @@ ptregs_clone:
14660 CFI_ENDPROC
14661 ENDPROC(ptregs_clone)
14662
14663 + ALIGN;
14664 +ENTRY(kernel_execve)
14665 + CFI_STARTPROC
14666 + pushl_cfi %ebp
14667 + sub $PT_OLDSS+4,%esp
14668 + pushl_cfi %edi
14669 + pushl_cfi %ecx
14670 + pushl_cfi %eax
14671 + lea 3*4(%esp),%edi
14672 + mov $PT_OLDSS/4+1,%ecx
14673 + xorl %eax,%eax
14674 + rep stosl
14675 + popl_cfi %eax
14676 + popl_cfi %ecx
14677 + popl_cfi %edi
14678 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
14679 + pushl_cfi %esp
14680 + call sys_execve
14681 + add $4,%esp
14682 + CFI_ADJUST_CFA_OFFSET -4
14683 + GET_THREAD_INFO(%ebp)
14684 + test %eax,%eax
14685 + jz syscall_exit
14686 + add $PT_OLDSS+4,%esp
14687 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
14688 + popl_cfi %ebp
14689 + ret
14690 + CFI_ENDPROC
14691 +ENDPROC(kernel_execve)
14692 +
14693 .macro FIXUP_ESPFIX_STACK
14694 /*
14695 * Switch back for ESPFIX stack to the normal zerobased stack
14696 @@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
14697 * normal stack and adjusts ESP with the matching offset.
14698 */
14699 /* fixup the stack */
14700 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
14701 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
14702 +#ifdef CONFIG_SMP
14703 + movl PER_CPU_VAR(cpu_number), %ebx
14704 + shll $PAGE_SHIFT_asm, %ebx
14705 + addl $cpu_gdt_table, %ebx
14706 +#else
14707 + movl $cpu_gdt_table, %ebx
14708 +#endif
14709 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
14710 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
14711 shl $16, %eax
14712 addl %esp, %eax /* the adjusted stack pointer */
14713 pushl_cfi $__KERNEL_DS
14714 @@ -816,7 +1065,7 @@ vector=vector+1
14715 .endr
14716 2: jmp common_interrupt
14717 .endr
14718 -END(irq_entries_start)
14719 +ENDPROC(irq_entries_start)
14720
14721 .previous
14722 END(interrupt)
14723 @@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
14724 pushl_cfi $do_coprocessor_error
14725 jmp error_code
14726 CFI_ENDPROC
14727 -END(coprocessor_error)
14728 +ENDPROC(coprocessor_error)
14729
14730 ENTRY(simd_coprocessor_error)
14731 RING0_INT_FRAME
14732 @@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
14733 #endif
14734 jmp error_code
14735 CFI_ENDPROC
14736 -END(simd_coprocessor_error)
14737 +ENDPROC(simd_coprocessor_error)
14738
14739 ENTRY(device_not_available)
14740 RING0_INT_FRAME
14741 @@ -893,7 +1142,7 @@ ENTRY(device_not_available)
14742 pushl_cfi $do_device_not_available
14743 jmp error_code
14744 CFI_ENDPROC
14745 -END(device_not_available)
14746 +ENDPROC(device_not_available)
14747
14748 #ifdef CONFIG_PARAVIRT
14749 ENTRY(native_iret)
14750 @@ -902,12 +1151,12 @@ ENTRY(native_iret)
14751 .align 4
14752 .long native_iret, iret_exc
14753 .previous
14754 -END(native_iret)
14755 +ENDPROC(native_iret)
14756
14757 ENTRY(native_irq_enable_sysexit)
14758 sti
14759 sysexit
14760 -END(native_irq_enable_sysexit)
14761 +ENDPROC(native_irq_enable_sysexit)
14762 #endif
14763
14764 ENTRY(overflow)
14765 @@ -916,7 +1165,7 @@ ENTRY(overflow)
14766 pushl_cfi $do_overflow
14767 jmp error_code
14768 CFI_ENDPROC
14769 -END(overflow)
14770 +ENDPROC(overflow)
14771
14772 ENTRY(bounds)
14773 RING0_INT_FRAME
14774 @@ -924,7 +1173,7 @@ ENTRY(bounds)
14775 pushl_cfi $do_bounds
14776 jmp error_code
14777 CFI_ENDPROC
14778 -END(bounds)
14779 +ENDPROC(bounds)
14780
14781 ENTRY(invalid_op)
14782 RING0_INT_FRAME
14783 @@ -932,7 +1181,7 @@ ENTRY(invalid_op)
14784 pushl_cfi $do_invalid_op
14785 jmp error_code
14786 CFI_ENDPROC
14787 -END(invalid_op)
14788 +ENDPROC(invalid_op)
14789
14790 ENTRY(coprocessor_segment_overrun)
14791 RING0_INT_FRAME
14792 @@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
14793 pushl_cfi $do_coprocessor_segment_overrun
14794 jmp error_code
14795 CFI_ENDPROC
14796 -END(coprocessor_segment_overrun)
14797 +ENDPROC(coprocessor_segment_overrun)
14798
14799 ENTRY(invalid_TSS)
14800 RING0_EC_FRAME
14801 pushl_cfi $do_invalid_TSS
14802 jmp error_code
14803 CFI_ENDPROC
14804 -END(invalid_TSS)
14805 +ENDPROC(invalid_TSS)
14806
14807 ENTRY(segment_not_present)
14808 RING0_EC_FRAME
14809 pushl_cfi $do_segment_not_present
14810 jmp error_code
14811 CFI_ENDPROC
14812 -END(segment_not_present)
14813 +ENDPROC(segment_not_present)
14814
14815 ENTRY(stack_segment)
14816 RING0_EC_FRAME
14817 pushl_cfi $do_stack_segment
14818 jmp error_code
14819 CFI_ENDPROC
14820 -END(stack_segment)
14821 +ENDPROC(stack_segment)
14822
14823 ENTRY(alignment_check)
14824 RING0_EC_FRAME
14825 pushl_cfi $do_alignment_check
14826 jmp error_code
14827 CFI_ENDPROC
14828 -END(alignment_check)
14829 +ENDPROC(alignment_check)
14830
14831 ENTRY(divide_error)
14832 RING0_INT_FRAME
14833 @@ -976,7 +1225,7 @@ ENTRY(divide_error)
14834 pushl_cfi $do_divide_error
14835 jmp error_code
14836 CFI_ENDPROC
14837 -END(divide_error)
14838 +ENDPROC(divide_error)
14839
14840 #ifdef CONFIG_X86_MCE
14841 ENTRY(machine_check)
14842 @@ -985,7 +1234,7 @@ ENTRY(machine_check)
14843 pushl_cfi machine_check_vector
14844 jmp error_code
14845 CFI_ENDPROC
14846 -END(machine_check)
14847 +ENDPROC(machine_check)
14848 #endif
14849
14850 ENTRY(spurious_interrupt_bug)
14851 @@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
14852 pushl_cfi $do_spurious_interrupt_bug
14853 jmp error_code
14854 CFI_ENDPROC
14855 -END(spurious_interrupt_bug)
14856 +ENDPROC(spurious_interrupt_bug)
14857 /*
14858 * End of kprobes section
14859 */
14860 @@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
14861
14862 ENTRY(mcount)
14863 ret
14864 -END(mcount)
14865 +ENDPROC(mcount)
14866
14867 ENTRY(ftrace_caller)
14868 cmpl $0, function_trace_stop
14869 @@ -1138,7 +1387,7 @@ ftrace_graph_call:
14870 .globl ftrace_stub
14871 ftrace_stub:
14872 ret
14873 -END(ftrace_caller)
14874 +ENDPROC(ftrace_caller)
14875
14876 #else /* ! CONFIG_DYNAMIC_FTRACE */
14877
14878 @@ -1174,7 +1423,7 @@ trace:
14879 popl %ecx
14880 popl %eax
14881 jmp ftrace_stub
14882 -END(mcount)
14883 +ENDPROC(mcount)
14884 #endif /* CONFIG_DYNAMIC_FTRACE */
14885 #endif /* CONFIG_FUNCTION_TRACER */
14886
14887 @@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
14888 popl %ecx
14889 popl %eax
14890 ret
14891 -END(ftrace_graph_caller)
14892 +ENDPROC(ftrace_graph_caller)
14893
14894 .globl return_to_handler
14895 return_to_handler:
14896 @@ -1209,7 +1458,6 @@ return_to_handler:
14897 jmp *%ecx
14898 #endif
14899
14900 -.section .rodata,"a"
14901 #include "syscall_table_32.S"
14902
14903 syscall_table_size=(.-sys_call_table)
14904 @@ -1255,15 +1503,18 @@ error_code:
14905 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
14906 REG_TO_PTGS %ecx
14907 SET_KERNEL_GS %ecx
14908 - movl $(__USER_DS), %ecx
14909 + movl $(__KERNEL_DS), %ecx
14910 movl %ecx, %ds
14911 movl %ecx, %es
14912 +
14913 + pax_enter_kernel
14914 +
14915 TRACE_IRQS_OFF
14916 movl %esp,%eax # pt_regs pointer
14917 call *%edi
14918 jmp ret_from_exception
14919 CFI_ENDPROC
14920 -END(page_fault)
14921 +ENDPROC(page_fault)
14922
14923 /*
14924 * Debug traps and NMI can happen at the one SYSENTER instruction
14925 @@ -1305,7 +1556,7 @@ debug_stack_correct:
14926 call do_debug
14927 jmp ret_from_exception
14928 CFI_ENDPROC
14929 -END(debug)
14930 +ENDPROC(debug)
14931
14932 /*
14933 * NMI is doubly nasty. It can happen _while_ we're handling
14934 @@ -1342,6 +1593,9 @@ nmi_stack_correct:
14935 xorl %edx,%edx # zero error code
14936 movl %esp,%eax # pt_regs pointer
14937 call do_nmi
14938 +
14939 + pax_exit_kernel
14940 +
14941 jmp restore_all_notrace
14942 CFI_ENDPROC
14943
14944 @@ -1378,12 +1632,15 @@ nmi_espfix_stack:
14945 FIXUP_ESPFIX_STACK # %eax == %esp
14946 xorl %edx,%edx # zero error code
14947 call do_nmi
14948 +
14949 + pax_exit_kernel
14950 +
14951 RESTORE_REGS
14952 lss 12+4(%esp), %esp # back to espfix stack
14953 CFI_ADJUST_CFA_OFFSET -24
14954 jmp irq_return
14955 CFI_ENDPROC
14956 -END(nmi)
14957 +ENDPROC(nmi)
14958
14959 ENTRY(int3)
14960 RING0_INT_FRAME
14961 @@ -1395,14 +1652,14 @@ ENTRY(int3)
14962 call do_int3
14963 jmp ret_from_exception
14964 CFI_ENDPROC
14965 -END(int3)
14966 +ENDPROC(int3)
14967
14968 ENTRY(general_protection)
14969 RING0_EC_FRAME
14970 pushl_cfi $do_general_protection
14971 jmp error_code
14972 CFI_ENDPROC
14973 -END(general_protection)
14974 +ENDPROC(general_protection)
14975
14976 #ifdef CONFIG_KVM_GUEST
14977 ENTRY(async_page_fault)
14978 @@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
14979 pushl_cfi $do_async_page_fault
14980 jmp error_code
14981 CFI_ENDPROC
14982 -END(async_page_fault)
14983 +ENDPROC(async_page_fault)
14984 #endif
14985
14986 /*
14987 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
14988 index faf8d5e..4f16a68 100644
14989 --- a/arch/x86/kernel/entry_64.S
14990 +++ b/arch/x86/kernel/entry_64.S
14991 @@ -55,6 +55,8 @@
14992 #include <asm/paravirt.h>
14993 #include <asm/ftrace.h>
14994 #include <asm/percpu.h>
14995 +#include <asm/pgtable.h>
14996 +#include <asm/alternative-asm.h>
14997
14998 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14999 #include <linux/elf-em.h>
15000 @@ -68,8 +70,9 @@
15001 #ifdef CONFIG_FUNCTION_TRACER
15002 #ifdef CONFIG_DYNAMIC_FTRACE
15003 ENTRY(mcount)
15004 + pax_force_retaddr
15005 retq
15006 -END(mcount)
15007 +ENDPROC(mcount)
15008
15009 ENTRY(ftrace_caller)
15010 cmpl $0, function_trace_stop
15011 @@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
15012 #endif
15013
15014 GLOBAL(ftrace_stub)
15015 + pax_force_retaddr
15016 retq
15017 -END(ftrace_caller)
15018 +ENDPROC(ftrace_caller)
15019
15020 #else /* ! CONFIG_DYNAMIC_FTRACE */
15021 ENTRY(mcount)
15022 @@ -112,6 +116,7 @@ ENTRY(mcount)
15023 #endif
15024
15025 GLOBAL(ftrace_stub)
15026 + pax_force_retaddr
15027 retq
15028
15029 trace:
15030 @@ -121,12 +126,13 @@ trace:
15031 movq 8(%rbp), %rsi
15032 subq $MCOUNT_INSN_SIZE, %rdi
15033
15034 + pax_force_fptr ftrace_trace_function
15035 call *ftrace_trace_function
15036
15037 MCOUNT_RESTORE_FRAME
15038
15039 jmp ftrace_stub
15040 -END(mcount)
15041 +ENDPROC(mcount)
15042 #endif /* CONFIG_DYNAMIC_FTRACE */
15043 #endif /* CONFIG_FUNCTION_TRACER */
15044
15045 @@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
15046
15047 MCOUNT_RESTORE_FRAME
15048
15049 + pax_force_retaddr
15050 retq
15051 -END(ftrace_graph_caller)
15052 +ENDPROC(ftrace_graph_caller)
15053
15054 GLOBAL(return_to_handler)
15055 subq $24, %rsp
15056 @@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
15057 movq 8(%rsp), %rdx
15058 movq (%rsp), %rax
15059 addq $24, %rsp
15060 + pax_force_fptr %rdi
15061 jmp *%rdi
15062 #endif
15063
15064 @@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64)
15065 ENDPROC(native_usergs_sysret64)
15066 #endif /* CONFIG_PARAVIRT */
15067
15068 + .macro ljmpq sel, off
15069 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15070 + .byte 0x48; ljmp *1234f(%rip)
15071 + .pushsection .rodata
15072 + .align 16
15073 + 1234: .quad \off; .word \sel
15074 + .popsection
15075 +#else
15076 + pushq $\sel
15077 + pushq $\off
15078 + lretq
15079 +#endif
15080 + .endm
15081 +
15082 + .macro pax_enter_kernel
15083 + pax_set_fptr_mask
15084 +#ifdef CONFIG_PAX_KERNEXEC
15085 + call pax_enter_kernel
15086 +#endif
15087 + .endm
15088 +
15089 + .macro pax_exit_kernel
15090 +#ifdef CONFIG_PAX_KERNEXEC
15091 + call pax_exit_kernel
15092 +#endif
15093 + .endm
15094 +
15095 +#ifdef CONFIG_PAX_KERNEXEC
15096 +ENTRY(pax_enter_kernel)
15097 + pushq %rdi
15098 +
15099 +#ifdef CONFIG_PARAVIRT
15100 + PV_SAVE_REGS(CLBR_RDI)
15101 +#endif
15102 +
15103 + GET_CR0_INTO_RDI
15104 + bts $16,%rdi
15105 + jnc 3f
15106 + mov %cs,%edi
15107 + cmp $__KERNEL_CS,%edi
15108 + jnz 2f
15109 +1:
15110 +
15111 +#ifdef CONFIG_PARAVIRT
15112 + PV_RESTORE_REGS(CLBR_RDI)
15113 +#endif
15114 +
15115 + popq %rdi
15116 + pax_force_retaddr
15117 + retq
15118 +
15119 +2: ljmpq __KERNEL_CS,1f
15120 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
15121 +4: SET_RDI_INTO_CR0
15122 + jmp 1b
15123 +ENDPROC(pax_enter_kernel)
15124 +
15125 +ENTRY(pax_exit_kernel)
15126 + pushq %rdi
15127 +
15128 +#ifdef CONFIG_PARAVIRT
15129 + PV_SAVE_REGS(CLBR_RDI)
15130 +#endif
15131 +
15132 + mov %cs,%rdi
15133 + cmp $__KERNEXEC_KERNEL_CS,%edi
15134 + jz 2f
15135 +1:
15136 +
15137 +#ifdef CONFIG_PARAVIRT
15138 + PV_RESTORE_REGS(CLBR_RDI);
15139 +#endif
15140 +
15141 + popq %rdi
15142 + pax_force_retaddr
15143 + retq
15144 +
15145 +2: GET_CR0_INTO_RDI
15146 + btr $16,%rdi
15147 + ljmpq __KERNEL_CS,3f
15148 +3: SET_RDI_INTO_CR0
15149 + jmp 1b
15150 +#ifdef CONFIG_PARAVIRT
15151 + PV_RESTORE_REGS(CLBR_RDI);
15152 +#endif
15153 +
15154 + popq %rdi
15155 + pax_force_retaddr
15156 + retq
15157 +ENDPROC(pax_exit_kernel)
15158 +#endif
15159 +
15160 + .macro pax_enter_kernel_user
15161 + pax_set_fptr_mask
15162 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15163 + call pax_enter_kernel_user
15164 +#endif
15165 + .endm
15166 +
15167 + .macro pax_exit_kernel_user
15168 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15169 + call pax_exit_kernel_user
15170 +#endif
15171 +#ifdef CONFIG_PAX_RANDKSTACK
15172 + pushq %rax
15173 + call pax_randomize_kstack
15174 + popq %rax
15175 +#endif
15176 + .endm
15177 +
15178 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15179 +ENTRY(pax_enter_kernel_user)
15180 + pushq %rdi
15181 + pushq %rbx
15182 +
15183 +#ifdef CONFIG_PARAVIRT
15184 + PV_SAVE_REGS(CLBR_RDI)
15185 +#endif
15186 +
15187 + GET_CR3_INTO_RDI
15188 + mov %rdi,%rbx
15189 + add $__START_KERNEL_map,%rbx
15190 + sub phys_base(%rip),%rbx
15191 +
15192 +#ifdef CONFIG_PARAVIRT
15193 + pushq %rdi
15194 + cmpl $0, pv_info+PARAVIRT_enabled
15195 + jz 1f
15196 + i = 0
15197 + .rept USER_PGD_PTRS
15198 + mov i*8(%rbx),%rsi
15199 + mov $0,%sil
15200 + lea i*8(%rbx),%rdi
15201 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15202 + i = i + 1
15203 + .endr
15204 + jmp 2f
15205 +1:
15206 +#endif
15207 +
15208 + i = 0
15209 + .rept USER_PGD_PTRS
15210 + movb $0,i*8(%rbx)
15211 + i = i + 1
15212 + .endr
15213 +
15214 +#ifdef CONFIG_PARAVIRT
15215 +2: popq %rdi
15216 +#endif
15217 + SET_RDI_INTO_CR3
15218 +
15219 +#ifdef CONFIG_PAX_KERNEXEC
15220 + GET_CR0_INTO_RDI
15221 + bts $16,%rdi
15222 + SET_RDI_INTO_CR0
15223 +#endif
15224 +
15225 +#ifdef CONFIG_PARAVIRT
15226 + PV_RESTORE_REGS(CLBR_RDI)
15227 +#endif
15228 +
15229 + popq %rbx
15230 + popq %rdi
15231 + pax_force_retaddr
15232 + retq
15233 +ENDPROC(pax_enter_kernel_user)
15234 +
15235 +ENTRY(pax_exit_kernel_user)
15236 + push %rdi
15237 +
15238 +#ifdef CONFIG_PARAVIRT
15239 + pushq %rbx
15240 + PV_SAVE_REGS(CLBR_RDI)
15241 +#endif
15242 +
15243 +#ifdef CONFIG_PAX_KERNEXEC
15244 + GET_CR0_INTO_RDI
15245 + btr $16,%rdi
15246 + SET_RDI_INTO_CR0
15247 +#endif
15248 +
15249 + GET_CR3_INTO_RDI
15250 + add $__START_KERNEL_map,%rdi
15251 + sub phys_base(%rip),%rdi
15252 +
15253 +#ifdef CONFIG_PARAVIRT
15254 + cmpl $0, pv_info+PARAVIRT_enabled
15255 + jz 1f
15256 + mov %rdi,%rbx
15257 + i = 0
15258 + .rept USER_PGD_PTRS
15259 + mov i*8(%rbx),%rsi
15260 + mov $0x67,%sil
15261 + lea i*8(%rbx),%rdi
15262 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15263 + i = i + 1
15264 + .endr
15265 + jmp 2f
15266 +1:
15267 +#endif
15268 +
15269 + i = 0
15270 + .rept USER_PGD_PTRS
15271 + movb $0x67,i*8(%rdi)
15272 + i = i + 1
15273 + .endr
15274 +
15275 +#ifdef CONFIG_PARAVIRT
15276 +2: PV_RESTORE_REGS(CLBR_RDI)
15277 + popq %rbx
15278 +#endif
15279 +
15280 + popq %rdi
15281 + pax_force_retaddr
15282 + retq
15283 +ENDPROC(pax_exit_kernel_user)
15284 +#endif
15285 +
15286 +.macro pax_erase_kstack
15287 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15288 + call pax_erase_kstack
15289 +#endif
15290 +.endm
15291 +
15292 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15293 +/*
15294 + * r11: thread_info
15295 + * rcx, rdx: can be clobbered
15296 + */
15297 +ENTRY(pax_erase_kstack)
15298 + pushq %rdi
15299 + pushq %rax
15300 + pushq %r11
15301 +
15302 + GET_THREAD_INFO(%r11)
15303 + mov TI_lowest_stack(%r11), %rdi
15304 + mov $-0xBEEF, %rax
15305 + std
15306 +
15307 +1: mov %edi, %ecx
15308 + and $THREAD_SIZE_asm - 1, %ecx
15309 + shr $3, %ecx
15310 + repne scasq
15311 + jecxz 2f
15312 +
15313 + cmp $2*8, %ecx
15314 + jc 2f
15315 +
15316 + mov $2*8, %ecx
15317 + repe scasq
15318 + jecxz 2f
15319 + jne 1b
15320 +
15321 +2: cld
15322 + mov %esp, %ecx
15323 + sub %edi, %ecx
15324 +
15325 + cmp $THREAD_SIZE_asm, %rcx
15326 + jb 3f
15327 + ud2
15328 +3:
15329 +
15330 + shr $3, %ecx
15331 + rep stosq
15332 +
15333 + mov TI_task_thread_sp0(%r11), %rdi
15334 + sub $256, %rdi
15335 + mov %rdi, TI_lowest_stack(%r11)
15336 +
15337 + popq %r11
15338 + popq %rax
15339 + popq %rdi
15340 + pax_force_retaddr
15341 + ret
15342 +ENDPROC(pax_erase_kstack)
15343 +#endif
15344
15345 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
15346 #ifdef CONFIG_TRACE_IRQFLAGS
15347 @@ -231,8 +515,8 @@ ENDPROC(native_usergs_sysret64)
15348 .endm
15349
15350 .macro UNFAKE_STACK_FRAME
15351 - addq $8*6, %rsp
15352 - CFI_ADJUST_CFA_OFFSET -(6*8)
15353 + addq $8*6 + ARG_SKIP, %rsp
15354 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
15355 .endm
15356
15357 /*
15358 @@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64)
15359 movq %rsp, %rsi
15360
15361 leaq -RBP(%rsp),%rdi /* arg1 for handler */
15362 - testl $3, CS(%rdi)
15363 + testb $3, CS(%rdi)
15364 je 1f
15365 SWAPGS
15366 /*
15367 @@ -355,9 +639,10 @@ ENTRY(save_rest)
15368 movq_cfi r15, R15+16
15369 movq %r11, 8(%rsp) /* return address */
15370 FIXUP_TOP_OF_STACK %r11, 16
15371 + pax_force_retaddr
15372 ret
15373 CFI_ENDPROC
15374 -END(save_rest)
15375 +ENDPROC(save_rest)
15376
15377 /* save complete stack frame */
15378 .pushsection .kprobes.text, "ax"
15379 @@ -386,9 +671,10 @@ ENTRY(save_paranoid)
15380 js 1f /* negative -> in kernel */
15381 SWAPGS
15382 xorl %ebx,%ebx
15383 -1: ret
15384 +1: pax_force_retaddr_bts
15385 + ret
15386 CFI_ENDPROC
15387 -END(save_paranoid)
15388 +ENDPROC(save_paranoid)
15389 .popsection
15390
15391 /*
15392 @@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
15393
15394 RESTORE_REST
15395
15396 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
15397 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
15398 je int_ret_from_sys_call
15399
15400 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
15401 @@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
15402 jmp ret_from_sys_call # go to the SYSRET fastpath
15403
15404 CFI_ENDPROC
15405 -END(ret_from_fork)
15406 +ENDPROC(ret_from_fork)
15407
15408 /*
15409 * System call entry. Up to 6 arguments in registers are supported.
15410 @@ -456,7 +742,7 @@ END(ret_from_fork)
15411 ENTRY(system_call)
15412 CFI_STARTPROC simple
15413 CFI_SIGNAL_FRAME
15414 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
15415 + CFI_DEF_CFA rsp,0
15416 CFI_REGISTER rip,rcx
15417 /*CFI_REGISTER rflags,r11*/
15418 SWAPGS_UNSAFE_STACK
15419 @@ -469,12 +755,13 @@ ENTRY(system_call_after_swapgs)
15420
15421 movq %rsp,PER_CPU_VAR(old_rsp)
15422 movq PER_CPU_VAR(kernel_stack),%rsp
15423 + SAVE_ARGS 8*6,0
15424 + pax_enter_kernel_user
15425 /*
15426 * No need to follow this irqs off/on section - it's straight
15427 * and short:
15428 */
15429 ENABLE_INTERRUPTS(CLBR_NONE)
15430 - SAVE_ARGS 8,0
15431 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
15432 movq %rcx,RIP-ARGOFFSET(%rsp)
15433 CFI_REL_OFFSET rip,RIP-ARGOFFSET
15434 @@ -484,7 +771,7 @@ ENTRY(system_call_after_swapgs)
15435 system_call_fastpath:
15436 cmpq $__NR_syscall_max,%rax
15437 ja badsys
15438 - movq %r10,%rcx
15439 + movq R10-ARGOFFSET(%rsp),%rcx
15440 call *sys_call_table(,%rax,8) # XXX: rip relative
15441 movq %rax,RAX-ARGOFFSET(%rsp)
15442 /*
15443 @@ -503,6 +790,8 @@ sysret_check:
15444 andl %edi,%edx
15445 jnz sysret_careful
15446 CFI_REMEMBER_STATE
15447 + pax_exit_kernel_user
15448 + pax_erase_kstack
15449 /*
15450 * sysretq will re-enable interrupts:
15451 */
15452 @@ -554,14 +843,18 @@ badsys:
15453 * jump back to the normal fast path.
15454 */
15455 auditsys:
15456 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
15457 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
15458 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
15459 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
15460 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
15461 movq %rax,%rsi /* 2nd arg: syscall number */
15462 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
15463 call audit_syscall_entry
15464 +
15465 + pax_erase_kstack
15466 +
15467 LOAD_ARGS 0 /* reload call-clobbered registers */
15468 + pax_set_fptr_mask
15469 jmp system_call_fastpath
15470
15471 /*
15472 @@ -591,16 +884,20 @@ tracesys:
15473 FIXUP_TOP_OF_STACK %rdi
15474 movq %rsp,%rdi
15475 call syscall_trace_enter
15476 +
15477 + pax_erase_kstack
15478 +
15479 /*
15480 * Reload arg registers from stack in case ptrace changed them.
15481 * We don't reload %rax because syscall_trace_enter() returned
15482 * the value it wants us to use in the table lookup.
15483 */
15484 LOAD_ARGS ARGOFFSET, 1
15485 + pax_set_fptr_mask
15486 RESTORE_REST
15487 cmpq $__NR_syscall_max,%rax
15488 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
15489 - movq %r10,%rcx /* fixup for C */
15490 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
15491 call *sys_call_table(,%rax,8)
15492 movq %rax,RAX-ARGOFFSET(%rsp)
15493 /* Use IRET because user could have changed frame */
15494 @@ -612,7 +909,7 @@ tracesys:
15495 GLOBAL(int_ret_from_sys_call)
15496 DISABLE_INTERRUPTS(CLBR_NONE)
15497 TRACE_IRQS_OFF
15498 - testl $3,CS-ARGOFFSET(%rsp)
15499 + testb $3,CS-ARGOFFSET(%rsp)
15500 je retint_restore_args
15501 movl $_TIF_ALLWORK_MASK,%edi
15502 /* edi: mask to check */
15503 @@ -623,6 +920,7 @@ GLOBAL(int_with_check)
15504 andl %edi,%edx
15505 jnz int_careful
15506 andl $~TS_COMPAT,TI_status(%rcx)
15507 + pax_erase_kstack
15508 jmp retint_swapgs
15509
15510 /* Either reschedule or signal or syscall exit tracking needed. */
15511 @@ -669,7 +967,7 @@ int_restore_rest:
15512 TRACE_IRQS_OFF
15513 jmp int_with_check
15514 CFI_ENDPROC
15515 -END(system_call)
15516 +ENDPROC(system_call)
15517
15518 /*
15519 * Certain special system calls that need to save a complete full stack frame.
15520 @@ -685,7 +983,7 @@ ENTRY(\label)
15521 call \func
15522 jmp ptregscall_common
15523 CFI_ENDPROC
15524 -END(\label)
15525 +ENDPROC(\label)
15526 .endm
15527
15528 PTREGSCALL stub_clone, sys_clone, %r8
15529 @@ -703,9 +1001,10 @@ ENTRY(ptregscall_common)
15530 movq_cfi_restore R12+8, r12
15531 movq_cfi_restore RBP+8, rbp
15532 movq_cfi_restore RBX+8, rbx
15533 + pax_force_retaddr
15534 ret $REST_SKIP /* pop extended registers */
15535 CFI_ENDPROC
15536 -END(ptregscall_common)
15537 +ENDPROC(ptregscall_common)
15538
15539 ENTRY(stub_execve)
15540 CFI_STARTPROC
15541 @@ -720,7 +1019,7 @@ ENTRY(stub_execve)
15542 RESTORE_REST
15543 jmp int_ret_from_sys_call
15544 CFI_ENDPROC
15545 -END(stub_execve)
15546 +ENDPROC(stub_execve)
15547
15548 /*
15549 * sigreturn is special because it needs to restore all registers on return.
15550 @@ -738,7 +1037,7 @@ ENTRY(stub_rt_sigreturn)
15551 RESTORE_REST
15552 jmp int_ret_from_sys_call
15553 CFI_ENDPROC
15554 -END(stub_rt_sigreturn)
15555 +ENDPROC(stub_rt_sigreturn)
15556
15557 /*
15558 * Build the entry stubs and pointer table with some assembler magic.
15559 @@ -773,7 +1072,7 @@ vector=vector+1
15560 2: jmp common_interrupt
15561 .endr
15562 CFI_ENDPROC
15563 -END(irq_entries_start)
15564 +ENDPROC(irq_entries_start)
15565
15566 .previous
15567 END(interrupt)
15568 @@ -793,6 +1092,16 @@ END(interrupt)
15569 subq $ORIG_RAX-RBP, %rsp
15570 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
15571 SAVE_ARGS_IRQ
15572 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15573 + testb $3, CS(%rdi)
15574 + jnz 1f
15575 + pax_enter_kernel
15576 + jmp 2f
15577 +1: pax_enter_kernel_user
15578 +2:
15579 +#else
15580 + pax_enter_kernel
15581 +#endif
15582 call \func
15583 .endm
15584
15585 @@ -824,7 +1133,7 @@ ret_from_intr:
15586
15587 exit_intr:
15588 GET_THREAD_INFO(%rcx)
15589 - testl $3,CS-ARGOFFSET(%rsp)
15590 + testb $3,CS-ARGOFFSET(%rsp)
15591 je retint_kernel
15592
15593 /* Interrupt came from user space */
15594 @@ -846,12 +1155,15 @@ retint_swapgs: /* return to user-space */
15595 * The iretq could re-enable interrupts:
15596 */
15597 DISABLE_INTERRUPTS(CLBR_ANY)
15598 + pax_exit_kernel_user
15599 TRACE_IRQS_IRETQ
15600 SWAPGS
15601 jmp restore_args
15602
15603 retint_restore_args: /* return to kernel space */
15604 DISABLE_INTERRUPTS(CLBR_ANY)
15605 + pax_exit_kernel
15606 + pax_force_retaddr RIP-ARGOFFSET
15607 /*
15608 * The iretq could re-enable interrupts:
15609 */
15610 @@ -940,7 +1252,7 @@ ENTRY(retint_kernel)
15611 #endif
15612
15613 CFI_ENDPROC
15614 -END(common_interrupt)
15615 +ENDPROC(common_interrupt)
15616 /*
15617 * End of kprobes section
15618 */
15619 @@ -956,7 +1268,7 @@ ENTRY(\sym)
15620 interrupt \do_sym
15621 jmp ret_from_intr
15622 CFI_ENDPROC
15623 -END(\sym)
15624 +ENDPROC(\sym)
15625 .endm
15626
15627 #ifdef CONFIG_SMP
15628 @@ -1021,12 +1333,22 @@ ENTRY(\sym)
15629 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15630 call error_entry
15631 DEFAULT_FRAME 0
15632 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15633 + testb $3, CS(%rsp)
15634 + jnz 1f
15635 + pax_enter_kernel
15636 + jmp 2f
15637 +1: pax_enter_kernel_user
15638 +2:
15639 +#else
15640 + pax_enter_kernel
15641 +#endif
15642 movq %rsp,%rdi /* pt_regs pointer */
15643 xorl %esi,%esi /* no error code */
15644 call \do_sym
15645 jmp error_exit /* %ebx: no swapgs flag */
15646 CFI_ENDPROC
15647 -END(\sym)
15648 +ENDPROC(\sym)
15649 .endm
15650
15651 .macro paranoidzeroentry sym do_sym
15652 @@ -1038,15 +1360,25 @@ ENTRY(\sym)
15653 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15654 call save_paranoid
15655 TRACE_IRQS_OFF
15656 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15657 + testb $3, CS(%rsp)
15658 + jnz 1f
15659 + pax_enter_kernel
15660 + jmp 2f
15661 +1: pax_enter_kernel_user
15662 +2:
15663 +#else
15664 + pax_enter_kernel
15665 +#endif
15666 movq %rsp,%rdi /* pt_regs pointer */
15667 xorl %esi,%esi /* no error code */
15668 call \do_sym
15669 jmp paranoid_exit /* %ebx: no swapgs flag */
15670 CFI_ENDPROC
15671 -END(\sym)
15672 +ENDPROC(\sym)
15673 .endm
15674
15675 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
15676 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
15677 .macro paranoidzeroentry_ist sym do_sym ist
15678 ENTRY(\sym)
15679 INTR_FRAME
15680 @@ -1056,14 +1388,30 @@ ENTRY(\sym)
15681 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15682 call save_paranoid
15683 TRACE_IRQS_OFF
15684 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15685 + testb $3, CS(%rsp)
15686 + jnz 1f
15687 + pax_enter_kernel
15688 + jmp 2f
15689 +1: pax_enter_kernel_user
15690 +2:
15691 +#else
15692 + pax_enter_kernel
15693 +#endif
15694 movq %rsp,%rdi /* pt_regs pointer */
15695 xorl %esi,%esi /* no error code */
15696 +#ifdef CONFIG_SMP
15697 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
15698 + lea init_tss(%r12), %r12
15699 +#else
15700 + lea init_tss(%rip), %r12
15701 +#endif
15702 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
15703 call \do_sym
15704 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
15705 jmp paranoid_exit /* %ebx: no swapgs flag */
15706 CFI_ENDPROC
15707 -END(\sym)
15708 +ENDPROC(\sym)
15709 .endm
15710
15711 .macro errorentry sym do_sym
15712 @@ -1074,13 +1422,23 @@ ENTRY(\sym)
15713 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15714 call error_entry
15715 DEFAULT_FRAME 0
15716 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15717 + testb $3, CS(%rsp)
15718 + jnz 1f
15719 + pax_enter_kernel
15720 + jmp 2f
15721 +1: pax_enter_kernel_user
15722 +2:
15723 +#else
15724 + pax_enter_kernel
15725 +#endif
15726 movq %rsp,%rdi /* pt_regs pointer */
15727 movq ORIG_RAX(%rsp),%rsi /* get error code */
15728 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
15729 call \do_sym
15730 jmp error_exit /* %ebx: no swapgs flag */
15731 CFI_ENDPROC
15732 -END(\sym)
15733 +ENDPROC(\sym)
15734 .endm
15735
15736 /* error code is on the stack already */
15737 @@ -1093,13 +1451,23 @@ ENTRY(\sym)
15738 call save_paranoid
15739 DEFAULT_FRAME 0
15740 TRACE_IRQS_OFF
15741 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15742 + testb $3, CS(%rsp)
15743 + jnz 1f
15744 + pax_enter_kernel
15745 + jmp 2f
15746 +1: pax_enter_kernel_user
15747 +2:
15748 +#else
15749 + pax_enter_kernel
15750 +#endif
15751 movq %rsp,%rdi /* pt_regs pointer */
15752 movq ORIG_RAX(%rsp),%rsi /* get error code */
15753 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
15754 call \do_sym
15755 jmp paranoid_exit /* %ebx: no swapgs flag */
15756 CFI_ENDPROC
15757 -END(\sym)
15758 +ENDPROC(\sym)
15759 .endm
15760
15761 zeroentry divide_error do_divide_error
15762 @@ -1129,9 +1497,10 @@ gs_change:
15763 2: mfence /* workaround */
15764 SWAPGS
15765 popfq_cfi
15766 + pax_force_retaddr
15767 ret
15768 CFI_ENDPROC
15769 -END(native_load_gs_index)
15770 +ENDPROC(native_load_gs_index)
15771
15772 .section __ex_table,"a"
15773 .align 8
15774 @@ -1153,13 +1522,14 @@ ENTRY(kernel_thread_helper)
15775 * Here we are in the child and the registers are set as they were
15776 * at kernel_thread() invocation in the parent.
15777 */
15778 + pax_force_fptr %rsi
15779 call *%rsi
15780 # exit
15781 mov %eax, %edi
15782 call do_exit
15783 ud2 # padding for call trace
15784 CFI_ENDPROC
15785 -END(kernel_thread_helper)
15786 +ENDPROC(kernel_thread_helper)
15787
15788 /*
15789 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
15790 @@ -1186,11 +1556,11 @@ ENTRY(kernel_execve)
15791 RESTORE_REST
15792 testq %rax,%rax
15793 je int_ret_from_sys_call
15794 - RESTORE_ARGS
15795 UNFAKE_STACK_FRAME
15796 + pax_force_retaddr
15797 ret
15798 CFI_ENDPROC
15799 -END(kernel_execve)
15800 +ENDPROC(kernel_execve)
15801
15802 /* Call softirq on interrupt stack. Interrupts are off. */
15803 ENTRY(call_softirq)
15804 @@ -1208,9 +1578,10 @@ ENTRY(call_softirq)
15805 CFI_DEF_CFA_REGISTER rsp
15806 CFI_ADJUST_CFA_OFFSET -8
15807 decl PER_CPU_VAR(irq_count)
15808 + pax_force_retaddr
15809 ret
15810 CFI_ENDPROC
15811 -END(call_softirq)
15812 +ENDPROC(call_softirq)
15813
15814 #ifdef CONFIG_XEN
15815 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
15816 @@ -1248,7 +1619,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
15817 decl PER_CPU_VAR(irq_count)
15818 jmp error_exit
15819 CFI_ENDPROC
15820 -END(xen_do_hypervisor_callback)
15821 +ENDPROC(xen_do_hypervisor_callback)
15822
15823 /*
15824 * Hypervisor uses this for application faults while it executes.
15825 @@ -1307,7 +1678,7 @@ ENTRY(xen_failsafe_callback)
15826 SAVE_ALL
15827 jmp error_exit
15828 CFI_ENDPROC
15829 -END(xen_failsafe_callback)
15830 +ENDPROC(xen_failsafe_callback)
15831
15832 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
15833 xen_hvm_callback_vector xen_evtchn_do_upcall
15834 @@ -1356,16 +1727,31 @@ ENTRY(paranoid_exit)
15835 TRACE_IRQS_OFF
15836 testl %ebx,%ebx /* swapgs needed? */
15837 jnz paranoid_restore
15838 - testl $3,CS(%rsp)
15839 + testb $3,CS(%rsp)
15840 jnz paranoid_userspace
15841 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15842 + pax_exit_kernel
15843 + TRACE_IRQS_IRETQ 0
15844 + SWAPGS_UNSAFE_STACK
15845 + RESTORE_ALL 8
15846 + pax_force_retaddr_bts
15847 + jmp irq_return
15848 +#endif
15849 paranoid_swapgs:
15850 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15851 + pax_exit_kernel_user
15852 +#else
15853 + pax_exit_kernel
15854 +#endif
15855 TRACE_IRQS_IRETQ 0
15856 SWAPGS_UNSAFE_STACK
15857 RESTORE_ALL 8
15858 jmp irq_return
15859 paranoid_restore:
15860 + pax_exit_kernel
15861 TRACE_IRQS_IRETQ 0
15862 RESTORE_ALL 8
15863 + pax_force_retaddr_bts
15864 jmp irq_return
15865 paranoid_userspace:
15866 GET_THREAD_INFO(%rcx)
15867 @@ -1394,7 +1780,7 @@ paranoid_schedule:
15868 TRACE_IRQS_OFF
15869 jmp paranoid_userspace
15870 CFI_ENDPROC
15871 -END(paranoid_exit)
15872 +ENDPROC(paranoid_exit)
15873
15874 /*
15875 * Exception entry point. This expects an error code/orig_rax on the stack.
15876 @@ -1421,12 +1807,13 @@ ENTRY(error_entry)
15877 movq_cfi r14, R14+8
15878 movq_cfi r15, R15+8
15879 xorl %ebx,%ebx
15880 - testl $3,CS+8(%rsp)
15881 + testb $3,CS+8(%rsp)
15882 je error_kernelspace
15883 error_swapgs:
15884 SWAPGS
15885 error_sti:
15886 TRACE_IRQS_OFF
15887 + pax_force_retaddr_bts
15888 ret
15889
15890 /*
15891 @@ -1453,7 +1840,7 @@ bstep_iret:
15892 movq %rcx,RIP+8(%rsp)
15893 jmp error_swapgs
15894 CFI_ENDPROC
15895 -END(error_entry)
15896 +ENDPROC(error_entry)
15897
15898
15899 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
15900 @@ -1473,7 +1860,7 @@ ENTRY(error_exit)
15901 jnz retint_careful
15902 jmp retint_swapgs
15903 CFI_ENDPROC
15904 -END(error_exit)
15905 +ENDPROC(error_exit)
15906
15907
15908 /* runs on exception stack */
15909 @@ -1485,6 +1872,16 @@ ENTRY(nmi)
15910 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15911 call save_paranoid
15912 DEFAULT_FRAME 0
15913 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15914 + testb $3, CS(%rsp)
15915 + jnz 1f
15916 + pax_enter_kernel
15917 + jmp 2f
15918 +1: pax_enter_kernel_user
15919 +2:
15920 +#else
15921 + pax_enter_kernel
15922 +#endif
15923 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
15924 movq %rsp,%rdi
15925 movq $-1,%rsi
15926 @@ -1495,12 +1892,28 @@ ENTRY(nmi)
15927 DISABLE_INTERRUPTS(CLBR_NONE)
15928 testl %ebx,%ebx /* swapgs needed? */
15929 jnz nmi_restore
15930 - testl $3,CS(%rsp)
15931 + testb $3,CS(%rsp)
15932 jnz nmi_userspace
15933 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15934 + pax_exit_kernel
15935 + SWAPGS_UNSAFE_STACK
15936 + RESTORE_ALL 8
15937 + pax_force_retaddr_bts
15938 + jmp irq_return
15939 +#endif
15940 nmi_swapgs:
15941 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15942 + pax_exit_kernel_user
15943 +#else
15944 + pax_exit_kernel
15945 +#endif
15946 SWAPGS_UNSAFE_STACK
15947 + RESTORE_ALL 8
15948 + jmp irq_return
15949 nmi_restore:
15950 + pax_exit_kernel
15951 RESTORE_ALL 8
15952 + pax_force_retaddr_bts
15953 jmp irq_return
15954 nmi_userspace:
15955 GET_THREAD_INFO(%rcx)
15956 @@ -1529,14 +1942,14 @@ nmi_schedule:
15957 jmp paranoid_exit
15958 CFI_ENDPROC
15959 #endif
15960 -END(nmi)
15961 +ENDPROC(nmi)
15962
15963 ENTRY(ignore_sysret)
15964 CFI_STARTPROC
15965 mov $-ENOSYS,%eax
15966 sysret
15967 CFI_ENDPROC
15968 -END(ignore_sysret)
15969 +ENDPROC(ignore_sysret)
15970
15971 /*
15972 * End of kprobes section
15973 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
15974 index c9a281f..ce2f317 100644
15975 --- a/arch/x86/kernel/ftrace.c
15976 +++ b/arch/x86/kernel/ftrace.c
15977 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
15978 static const void *mod_code_newcode; /* holds the text to write to the IP */
15979
15980 static unsigned nmi_wait_count;
15981 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
15982 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
15983
15984 int ftrace_arch_read_dyn_info(char *buf, int size)
15985 {
15986 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
15987
15988 r = snprintf(buf, size, "%u %u",
15989 nmi_wait_count,
15990 - atomic_read(&nmi_update_count));
15991 + atomic_read_unchecked(&nmi_update_count));
15992 return r;
15993 }
15994
15995 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
15996
15997 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
15998 smp_rmb();
15999 + pax_open_kernel();
16000 ftrace_mod_code();
16001 - atomic_inc(&nmi_update_count);
16002 + pax_close_kernel();
16003 + atomic_inc_unchecked(&nmi_update_count);
16004 }
16005 /* Must have previous changes seen before executions */
16006 smp_mb();
16007 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
16008 {
16009 unsigned char replaced[MCOUNT_INSN_SIZE];
16010
16011 + ip = ktla_ktva(ip);
16012 +
16013 /*
16014 * Note: Due to modules and __init, code can
16015 * disappear and change, we need to protect against faulting
16016 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16017 unsigned char old[MCOUNT_INSN_SIZE], *new;
16018 int ret;
16019
16020 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16021 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16022 new = ftrace_call_replace(ip, (unsigned long)func);
16023 ret = ftrace_modify_code(ip, old, new);
16024
16025 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16026 {
16027 unsigned char code[MCOUNT_INSN_SIZE];
16028
16029 + ip = ktla_ktva(ip);
16030 +
16031 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16032 return -EFAULT;
16033
16034 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16035 index 3bb0850..55a56f4 100644
16036 --- a/arch/x86/kernel/head32.c
16037 +++ b/arch/x86/kernel/head32.c
16038 @@ -19,6 +19,7 @@
16039 #include <asm/io_apic.h>
16040 #include <asm/bios_ebda.h>
16041 #include <asm/tlbflush.h>
16042 +#include <asm/boot.h>
16043
16044 static void __init i386_default_early_setup(void)
16045 {
16046 @@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
16047 {
16048 memblock_init();
16049
16050 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16051 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16052
16053 #ifdef CONFIG_BLK_DEV_INITRD
16054 /* Reserve INITRD */
16055 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16056 index ce0be7c..c41476e 100644
16057 --- a/arch/x86/kernel/head_32.S
16058 +++ b/arch/x86/kernel/head_32.S
16059 @@ -25,6 +25,12 @@
16060 /* Physical address */
16061 #define pa(X) ((X) - __PAGE_OFFSET)
16062
16063 +#ifdef CONFIG_PAX_KERNEXEC
16064 +#define ta(X) (X)
16065 +#else
16066 +#define ta(X) ((X) - __PAGE_OFFSET)
16067 +#endif
16068 +
16069 /*
16070 * References to members of the new_cpu_data structure.
16071 */
16072 @@ -54,11 +60,7 @@
16073 * and small than max_low_pfn, otherwise will waste some page table entries
16074 */
16075
16076 -#if PTRS_PER_PMD > 1
16077 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16078 -#else
16079 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16080 -#endif
16081 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16082
16083 /* Number of possible pages in the lowmem region */
16084 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
16085 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
16086 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16087
16088 /*
16089 + * Real beginning of normal "text" segment
16090 + */
16091 +ENTRY(stext)
16092 +ENTRY(_stext)
16093 +
16094 +/*
16095 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16096 * %esi points to the real-mode code as a 32-bit pointer.
16097 * CS and DS must be 4 GB flat segments, but we don't depend on
16098 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16099 * can.
16100 */
16101 __HEAD
16102 +
16103 +#ifdef CONFIG_PAX_KERNEXEC
16104 + jmp startup_32
16105 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16106 +.fill PAGE_SIZE-5,1,0xcc
16107 +#endif
16108 +
16109 ENTRY(startup_32)
16110 movl pa(stack_start),%ecx
16111
16112 @@ -105,6 +120,57 @@ ENTRY(startup_32)
16113 2:
16114 leal -__PAGE_OFFSET(%ecx),%esp
16115
16116 +#ifdef CONFIG_SMP
16117 + movl $pa(cpu_gdt_table),%edi
16118 + movl $__per_cpu_load,%eax
16119 + movw %ax,__KERNEL_PERCPU + 2(%edi)
16120 + rorl $16,%eax
16121 + movb %al,__KERNEL_PERCPU + 4(%edi)
16122 + movb %ah,__KERNEL_PERCPU + 7(%edi)
16123 + movl $__per_cpu_end - 1,%eax
16124 + subl $__per_cpu_start,%eax
16125 + movw %ax,__KERNEL_PERCPU + 0(%edi)
16126 +#endif
16127 +
16128 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16129 + movl $NR_CPUS,%ecx
16130 + movl $pa(cpu_gdt_table),%edi
16131 +1:
16132 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16133 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16134 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16135 + addl $PAGE_SIZE_asm,%edi
16136 + loop 1b
16137 +#endif
16138 +
16139 +#ifdef CONFIG_PAX_KERNEXEC
16140 + movl $pa(boot_gdt),%edi
16141 + movl $__LOAD_PHYSICAL_ADDR,%eax
16142 + movw %ax,__BOOT_CS + 2(%edi)
16143 + rorl $16,%eax
16144 + movb %al,__BOOT_CS + 4(%edi)
16145 + movb %ah,__BOOT_CS + 7(%edi)
16146 + rorl $16,%eax
16147 +
16148 + ljmp $(__BOOT_CS),$1f
16149 +1:
16150 +
16151 + movl $NR_CPUS,%ecx
16152 + movl $pa(cpu_gdt_table),%edi
16153 + addl $__PAGE_OFFSET,%eax
16154 +1:
16155 + movw %ax,__KERNEL_CS + 2(%edi)
16156 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16157 + rorl $16,%eax
16158 + movb %al,__KERNEL_CS + 4(%edi)
16159 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16160 + movb %ah,__KERNEL_CS + 7(%edi)
16161 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16162 + rorl $16,%eax
16163 + addl $PAGE_SIZE_asm,%edi
16164 + loop 1b
16165 +#endif
16166 +
16167 /*
16168 * Clear BSS first so that there are no surprises...
16169 */
16170 @@ -195,8 +261,11 @@ ENTRY(startup_32)
16171 movl %eax, pa(max_pfn_mapped)
16172
16173 /* Do early initialization of the fixmap area */
16174 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16175 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
16176 +#ifdef CONFIG_COMPAT_VDSO
16177 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
16178 +#else
16179 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
16180 +#endif
16181 #else /* Not PAE */
16182
16183 page_pde_offset = (__PAGE_OFFSET >> 20);
16184 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16185 movl %eax, pa(max_pfn_mapped)
16186
16187 /* Do early initialization of the fixmap area */
16188 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16189 - movl %eax,pa(initial_page_table+0xffc)
16190 +#ifdef CONFIG_COMPAT_VDSO
16191 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
16192 +#else
16193 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
16194 +#endif
16195 #endif
16196
16197 #ifdef CONFIG_PARAVIRT
16198 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16199 cmpl $num_subarch_entries, %eax
16200 jae bad_subarch
16201
16202 - movl pa(subarch_entries)(,%eax,4), %eax
16203 - subl $__PAGE_OFFSET, %eax
16204 - jmp *%eax
16205 + jmp *pa(subarch_entries)(,%eax,4)
16206
16207 bad_subarch:
16208 WEAK(lguest_entry)
16209 @@ -255,10 +325,10 @@ WEAK(xen_entry)
16210 __INITDATA
16211
16212 subarch_entries:
16213 - .long default_entry /* normal x86/PC */
16214 - .long lguest_entry /* lguest hypervisor */
16215 - .long xen_entry /* Xen hypervisor */
16216 - .long default_entry /* Moorestown MID */
16217 + .long ta(default_entry) /* normal x86/PC */
16218 + .long ta(lguest_entry) /* lguest hypervisor */
16219 + .long ta(xen_entry) /* Xen hypervisor */
16220 + .long ta(default_entry) /* Moorestown MID */
16221 num_subarch_entries = (. - subarch_entries) / 4
16222 .previous
16223 #else
16224 @@ -312,6 +382,7 @@ default_entry:
16225 orl %edx,%eax
16226 movl %eax,%cr4
16227
16228 +#ifdef CONFIG_X86_PAE
16229 testb $X86_CR4_PAE, %al # check if PAE is enabled
16230 jz 6f
16231
16232 @@ -340,6 +411,9 @@ default_entry:
16233 /* Make changes effective */
16234 wrmsr
16235
16236 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16237 +#endif
16238 +
16239 6:
16240
16241 /*
16242 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
16243 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16244 movl %eax,%ss # after changing gdt.
16245
16246 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
16247 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
16248 movl %eax,%ds
16249 movl %eax,%es
16250
16251 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
16252 */
16253 cmpb $0,ready
16254 jne 1f
16255 - movl $gdt_page,%eax
16256 + movl $cpu_gdt_table,%eax
16257 movl $stack_canary,%ecx
16258 +#ifdef CONFIG_SMP
16259 + addl $__per_cpu_load,%ecx
16260 +#endif
16261 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
16262 shrl $16, %ecx
16263 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
16264 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
16265 1:
16266 -#endif
16267 movl $(__KERNEL_STACK_CANARY),%eax
16268 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
16269 + movl $(__USER_DS),%eax
16270 +#else
16271 + xorl %eax,%eax
16272 +#endif
16273 movl %eax,%gs
16274
16275 xorl %eax,%eax # Clear LDT
16276 @@ -558,22 +639,22 @@ early_page_fault:
16277 jmp early_fault
16278
16279 early_fault:
16280 - cld
16281 #ifdef CONFIG_PRINTK
16282 + cmpl $1,%ss:early_recursion_flag
16283 + je hlt_loop
16284 + incl %ss:early_recursion_flag
16285 + cld
16286 pusha
16287 movl $(__KERNEL_DS),%eax
16288 movl %eax,%ds
16289 movl %eax,%es
16290 - cmpl $2,early_recursion_flag
16291 - je hlt_loop
16292 - incl early_recursion_flag
16293 movl %cr2,%eax
16294 pushl %eax
16295 pushl %edx /* trapno */
16296 pushl $fault_msg
16297 call printk
16298 +; call dump_stack
16299 #endif
16300 - call dump_stack
16301 hlt_loop:
16302 hlt
16303 jmp hlt_loop
16304 @@ -581,8 +662,11 @@ hlt_loop:
16305 /* This is the default interrupt "handler" :-) */
16306 ALIGN
16307 ignore_int:
16308 - cld
16309 #ifdef CONFIG_PRINTK
16310 + cmpl $2,%ss:early_recursion_flag
16311 + je hlt_loop
16312 + incl %ss:early_recursion_flag
16313 + cld
16314 pushl %eax
16315 pushl %ecx
16316 pushl %edx
16317 @@ -591,9 +675,6 @@ ignore_int:
16318 movl $(__KERNEL_DS),%eax
16319 movl %eax,%ds
16320 movl %eax,%es
16321 - cmpl $2,early_recursion_flag
16322 - je hlt_loop
16323 - incl early_recursion_flag
16324 pushl 16(%esp)
16325 pushl 24(%esp)
16326 pushl 32(%esp)
16327 @@ -622,29 +703,43 @@ ENTRY(initial_code)
16328 /*
16329 * BSS section
16330 */
16331 -__PAGE_ALIGNED_BSS
16332 - .align PAGE_SIZE
16333 #ifdef CONFIG_X86_PAE
16334 +.section .initial_pg_pmd,"a",@progbits
16335 initial_pg_pmd:
16336 .fill 1024*KPMDS,4,0
16337 #else
16338 +.section .initial_page_table,"a",@progbits
16339 ENTRY(initial_page_table)
16340 .fill 1024,4,0
16341 #endif
16342 +.section .initial_pg_fixmap,"a",@progbits
16343 initial_pg_fixmap:
16344 .fill 1024,4,0
16345 +.section .empty_zero_page,"a",@progbits
16346 ENTRY(empty_zero_page)
16347 .fill 4096,1,0
16348 +.section .swapper_pg_dir,"a",@progbits
16349 ENTRY(swapper_pg_dir)
16350 +#ifdef CONFIG_X86_PAE
16351 + .fill 4,8,0
16352 +#else
16353 .fill 1024,4,0
16354 +#endif
16355 +
16356 +/*
16357 + * The IDT has to be page-aligned to simplify the Pentium
16358 + * F0 0F bug workaround.. We have a special link segment
16359 + * for this.
16360 + */
16361 +.section .idt,"a",@progbits
16362 +ENTRY(idt_table)
16363 + .fill 256,8,0
16364
16365 /*
16366 * This starts the data section.
16367 */
16368 #ifdef CONFIG_X86_PAE
16369 -__PAGE_ALIGNED_DATA
16370 - /* Page-aligned for the benefit of paravirt? */
16371 - .align PAGE_SIZE
16372 +.section .initial_page_table,"a",@progbits
16373 ENTRY(initial_page_table)
16374 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
16375 # if KPMDS == 3
16376 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
16377 # error "Kernel PMDs should be 1, 2 or 3"
16378 # endif
16379 .align PAGE_SIZE /* needs to be page-sized too */
16380 +
16381 +#ifdef CONFIG_PAX_PER_CPU_PGD
16382 +ENTRY(cpu_pgd)
16383 + .rept NR_CPUS
16384 + .fill 4,8,0
16385 + .endr
16386 +#endif
16387 +
16388 #endif
16389
16390 .data
16391 .balign 4
16392 ENTRY(stack_start)
16393 - .long init_thread_union+THREAD_SIZE
16394 + .long init_thread_union+THREAD_SIZE-8
16395
16396 +ready: .byte 0
16397 +
16398 +.section .rodata,"a",@progbits
16399 early_recursion_flag:
16400 .long 0
16401
16402 -ready: .byte 0
16403 -
16404 int_msg:
16405 .asciz "Unknown interrupt or fault at: %p %p %p\n"
16406
16407 @@ -707,7 +811,7 @@ fault_msg:
16408 .word 0 # 32 bit align gdt_desc.address
16409 boot_gdt_descr:
16410 .word __BOOT_DS+7
16411 - .long boot_gdt - __PAGE_OFFSET
16412 + .long pa(boot_gdt)
16413
16414 .word 0 # 32-bit align idt_desc.address
16415 idt_descr:
16416 @@ -718,7 +822,7 @@ idt_descr:
16417 .word 0 # 32 bit align gdt_desc.address
16418 ENTRY(early_gdt_descr)
16419 .word GDT_ENTRIES*8-1
16420 - .long gdt_page /* Overwritten for secondary CPUs */
16421 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
16422
16423 /*
16424 * The boot_gdt must mirror the equivalent in setup.S and is
16425 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
16426 .align L1_CACHE_BYTES
16427 ENTRY(boot_gdt)
16428 .fill GDT_ENTRY_BOOT_CS,8,0
16429 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
16430 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
16431 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
16432 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
16433 +
16434 + .align PAGE_SIZE_asm
16435 +ENTRY(cpu_gdt_table)
16436 + .rept NR_CPUS
16437 + .quad 0x0000000000000000 /* NULL descriptor */
16438 + .quad 0x0000000000000000 /* 0x0b reserved */
16439 + .quad 0x0000000000000000 /* 0x13 reserved */
16440 + .quad 0x0000000000000000 /* 0x1b reserved */
16441 +
16442 +#ifdef CONFIG_PAX_KERNEXEC
16443 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
16444 +#else
16445 + .quad 0x0000000000000000 /* 0x20 unused */
16446 +#endif
16447 +
16448 + .quad 0x0000000000000000 /* 0x28 unused */
16449 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
16450 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
16451 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
16452 + .quad 0x0000000000000000 /* 0x4b reserved */
16453 + .quad 0x0000000000000000 /* 0x53 reserved */
16454 + .quad 0x0000000000000000 /* 0x5b reserved */
16455 +
16456 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
16457 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
16458 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
16459 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
16460 +
16461 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
16462 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
16463 +
16464 + /*
16465 + * Segments used for calling PnP BIOS have byte granularity.
16466 + * The code segments and data segments have fixed 64k limits,
16467 + * the transfer segment sizes are set at run time.
16468 + */
16469 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
16470 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
16471 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
16472 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
16473 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
16474 +
16475 + /*
16476 + * The APM segments have byte granularity and their bases
16477 + * are set at run time. All have 64k limits.
16478 + */
16479 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
16480 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
16481 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
16482 +
16483 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
16484 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
16485 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
16486 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
16487 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
16488 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
16489 +
16490 + /* Be sure this is zeroed to avoid false validations in Xen */
16491 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
16492 + .endr
16493 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
16494 index e11e394..9aebc5d 100644
16495 --- a/arch/x86/kernel/head_64.S
16496 +++ b/arch/x86/kernel/head_64.S
16497 @@ -19,6 +19,8 @@
16498 #include <asm/cache.h>
16499 #include <asm/processor-flags.h>
16500 #include <asm/percpu.h>
16501 +#include <asm/cpufeature.h>
16502 +#include <asm/alternative-asm.h>
16503
16504 #ifdef CONFIG_PARAVIRT
16505 #include <asm/asm-offsets.h>
16506 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
16507 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
16508 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
16509 L3_START_KERNEL = pud_index(__START_KERNEL_map)
16510 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
16511 +L3_VMALLOC_START = pud_index(VMALLOC_START)
16512 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
16513 +L3_VMALLOC_END = pud_index(VMALLOC_END)
16514 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
16515 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
16516
16517 .text
16518 __HEAD
16519 @@ -85,35 +93,23 @@ startup_64:
16520 */
16521 addq %rbp, init_level4_pgt + 0(%rip)
16522 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
16523 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
16524 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
16525 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
16526 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
16527
16528 addq %rbp, level3_ident_pgt + 0(%rip)
16529 +#ifndef CONFIG_XEN
16530 + addq %rbp, level3_ident_pgt + 8(%rip)
16531 +#endif
16532
16533 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
16534 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
16535 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
16536 +
16537 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
16538 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
16539
16540 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
16541 -
16542 - /* Add an Identity mapping if I am above 1G */
16543 - leaq _text(%rip), %rdi
16544 - andq $PMD_PAGE_MASK, %rdi
16545 -
16546 - movq %rdi, %rax
16547 - shrq $PUD_SHIFT, %rax
16548 - andq $(PTRS_PER_PUD - 1), %rax
16549 - jz ident_complete
16550 -
16551 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
16552 - leaq level3_ident_pgt(%rip), %rbx
16553 - movq %rdx, 0(%rbx, %rax, 8)
16554 -
16555 - movq %rdi, %rax
16556 - shrq $PMD_SHIFT, %rax
16557 - andq $(PTRS_PER_PMD - 1), %rax
16558 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
16559 - leaq level2_spare_pgt(%rip), %rbx
16560 - movq %rdx, 0(%rbx, %rax, 8)
16561 -ident_complete:
16562 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
16563
16564 /*
16565 * Fixup the kernel text+data virtual addresses. Note that
16566 @@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
16567 * after the boot processor executes this code.
16568 */
16569
16570 - /* Enable PAE mode and PGE */
16571 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
16572 + /* Enable PAE mode and PSE/PGE */
16573 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
16574 movq %rax, %cr4
16575
16576 /* Setup early boot stage 4 level pagetables. */
16577 @@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
16578 movl $MSR_EFER, %ecx
16579 rdmsr
16580 btsl $_EFER_SCE, %eax /* Enable System Call */
16581 - btl $20,%edi /* No Execute supported? */
16582 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
16583 jnc 1f
16584 btsl $_EFER_NX, %eax
16585 + leaq init_level4_pgt(%rip), %rdi
16586 +#ifndef CONFIG_EFI
16587 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
16588 +#endif
16589 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
16590 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
16591 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
16592 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
16593 1: wrmsr /* Make changes effective */
16594
16595 /* Setup cr0 */
16596 @@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
16597 * jump. In addition we need to ensure %cs is set so we make this
16598 * a far return.
16599 */
16600 + pax_set_fptr_mask
16601 movq initial_code(%rip),%rax
16602 pushq $0 # fake return address to stop unwinder
16603 pushq $__KERNEL_CS # set correct cs
16604 @@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
16605 bad_address:
16606 jmp bad_address
16607
16608 - .section ".init.text","ax"
16609 + __INIT
16610 #ifdef CONFIG_EARLY_PRINTK
16611 .globl early_idt_handlers
16612 early_idt_handlers:
16613 @@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
16614 #endif /* EARLY_PRINTK */
16615 1: hlt
16616 jmp 1b
16617 + .previous
16618
16619 #ifdef CONFIG_EARLY_PRINTK
16620 + __INITDATA
16621 early_recursion_flag:
16622 .long 0
16623 + .previous
16624
16625 + .section .rodata,"a",@progbits
16626 early_idt_msg:
16627 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
16628 early_idt_ripmsg:
16629 .asciz "RIP %s\n"
16630 + .previous
16631 #endif /* CONFIG_EARLY_PRINTK */
16632 - .previous
16633
16634 + .section .rodata,"a",@progbits
16635 #define NEXT_PAGE(name) \
16636 .balign PAGE_SIZE; \
16637 ENTRY(name)
16638 @@ -338,7 +348,6 @@ ENTRY(name)
16639 i = i + 1 ; \
16640 .endr
16641
16642 - .data
16643 /*
16644 * This default setting generates an ident mapping at address 0x100000
16645 * and a mapping for the kernel that precisely maps virtual address
16646 @@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
16647 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
16648 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
16649 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
16650 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
16651 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
16652 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
16653 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
16654 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
16655 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
16656 .org init_level4_pgt + L4_START_KERNEL*8, 0
16657 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
16658 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
16659
16660 +#ifdef CONFIG_PAX_PER_CPU_PGD
16661 +NEXT_PAGE(cpu_pgd)
16662 + .rept NR_CPUS
16663 + .fill 512,8,0
16664 + .endr
16665 +#endif
16666 +
16667 NEXT_PAGE(level3_ident_pgt)
16668 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
16669 +#ifdef CONFIG_XEN
16670 .fill 511,8,0
16671 +#else
16672 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
16673 + .fill 510,8,0
16674 +#endif
16675 +
16676 +NEXT_PAGE(level3_vmalloc_start_pgt)
16677 + .fill 512,8,0
16678 +
16679 +NEXT_PAGE(level3_vmalloc_end_pgt)
16680 + .fill 512,8,0
16681 +
16682 +NEXT_PAGE(level3_vmemmap_pgt)
16683 + .fill L3_VMEMMAP_START,8,0
16684 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
16685
16686 NEXT_PAGE(level3_kernel_pgt)
16687 .fill L3_START_KERNEL,8,0
16688 @@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
16689 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
16690 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
16691
16692 +NEXT_PAGE(level2_vmemmap_pgt)
16693 + .fill 512,8,0
16694 +
16695 NEXT_PAGE(level2_fixmap_pgt)
16696 - .fill 506,8,0
16697 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
16698 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
16699 - .fill 5,8,0
16700 + .fill 507,8,0
16701 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
16702 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
16703 + .fill 4,8,0
16704
16705 -NEXT_PAGE(level1_fixmap_pgt)
16706 +NEXT_PAGE(level1_vsyscall_pgt)
16707 .fill 512,8,0
16708
16709 -NEXT_PAGE(level2_ident_pgt)
16710 - /* Since I easily can, map the first 1G.
16711 + /* Since I easily can, map the first 2G.
16712 * Don't set NX because code runs from these pages.
16713 */
16714 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
16715 +NEXT_PAGE(level2_ident_pgt)
16716 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
16717
16718 NEXT_PAGE(level2_kernel_pgt)
16719 /*
16720 @@ -389,33 +429,55 @@ NEXT_PAGE(level2_kernel_pgt)
16721 * If you want to increase this then increase MODULES_VADDR
16722 * too.)
16723 */
16724 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
16725 - KERNEL_IMAGE_SIZE/PMD_SIZE)
16726 -
16727 -NEXT_PAGE(level2_spare_pgt)
16728 - .fill 512, 8, 0
16729 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
16730
16731 #undef PMDS
16732 #undef NEXT_PAGE
16733
16734 - .data
16735 + .align PAGE_SIZE
16736 +ENTRY(cpu_gdt_table)
16737 + .rept NR_CPUS
16738 + .quad 0x0000000000000000 /* NULL descriptor */
16739 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
16740 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
16741 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
16742 + .quad 0x00cffb000000ffff /* __USER32_CS */
16743 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
16744 + .quad 0x00affb000000ffff /* __USER_CS */
16745 +
16746 +#ifdef CONFIG_PAX_KERNEXEC
16747 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
16748 +#else
16749 + .quad 0x0 /* unused */
16750 +#endif
16751 +
16752 + .quad 0,0 /* TSS */
16753 + .quad 0,0 /* LDT */
16754 + .quad 0,0,0 /* three TLS descriptors */
16755 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
16756 + /* asm/segment.h:GDT_ENTRIES must match this */
16757 +
16758 + /* zero the remaining page */
16759 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
16760 + .endr
16761 +
16762 .align 16
16763 .globl early_gdt_descr
16764 early_gdt_descr:
16765 .word GDT_ENTRIES*8-1
16766 early_gdt_descr_base:
16767 - .quad INIT_PER_CPU_VAR(gdt_page)
16768 + .quad cpu_gdt_table
16769
16770 ENTRY(phys_base)
16771 /* This must match the first entry in level2_kernel_pgt */
16772 .quad 0x0000000000000000
16773
16774 #include "../../x86/xen/xen-head.S"
16775 -
16776 - .section .bss, "aw", @nobits
16777 +
16778 + .section .rodata,"a",@progbits
16779 .align L1_CACHE_BYTES
16780 ENTRY(idt_table)
16781 - .skip IDT_ENTRIES * 16
16782 + .fill 512,8,0
16783
16784 __PAGE_ALIGNED_BSS
16785 .align PAGE_SIZE
16786 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
16787 index 9c3bd4a..e1d9b35 100644
16788 --- a/arch/x86/kernel/i386_ksyms_32.c
16789 +++ b/arch/x86/kernel/i386_ksyms_32.c
16790 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
16791 EXPORT_SYMBOL(cmpxchg8b_emu);
16792 #endif
16793
16794 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
16795 +
16796 /* Networking helper routines. */
16797 EXPORT_SYMBOL(csum_partial_copy_generic);
16798 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
16799 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
16800
16801 EXPORT_SYMBOL(__get_user_1);
16802 EXPORT_SYMBOL(__get_user_2);
16803 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
16804
16805 EXPORT_SYMBOL(csum_partial);
16806 EXPORT_SYMBOL(empty_zero_page);
16807 +
16808 +#ifdef CONFIG_PAX_KERNEXEC
16809 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
16810 +#endif
16811 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
16812 index 6104852..6114160 100644
16813 --- a/arch/x86/kernel/i8259.c
16814 +++ b/arch/x86/kernel/i8259.c
16815 @@ -210,7 +210,7 @@ spurious_8259A_irq:
16816 "spurious 8259A interrupt: IRQ%d.\n", irq);
16817 spurious_irq_mask |= irqmask;
16818 }
16819 - atomic_inc(&irq_err_count);
16820 + atomic_inc_unchecked(&irq_err_count);
16821 /*
16822 * Theoretically we do not have to handle this IRQ,
16823 * but in Linux this does not cause problems and is
16824 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
16825 index 43e9ccf..44ccf6f 100644
16826 --- a/arch/x86/kernel/init_task.c
16827 +++ b/arch/x86/kernel/init_task.c
16828 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
16829 * way process stacks are handled. This is done by having a special
16830 * "init_task" linker map entry..
16831 */
16832 -union thread_union init_thread_union __init_task_data =
16833 - { INIT_THREAD_INFO(init_task) };
16834 +union thread_union init_thread_union __init_task_data;
16835
16836 /*
16837 * Initial task structure.
16838 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
16839 * section. Since TSS's are completely CPU-local, we want them
16840 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
16841 */
16842 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
16843 -
16844 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
16845 +EXPORT_SYMBOL(init_tss);
16846 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
16847 index 8c96897..be66bfa 100644
16848 --- a/arch/x86/kernel/ioport.c
16849 +++ b/arch/x86/kernel/ioport.c
16850 @@ -6,6 +6,7 @@
16851 #include <linux/sched.h>
16852 #include <linux/kernel.h>
16853 #include <linux/capability.h>
16854 +#include <linux/security.h>
16855 #include <linux/errno.h>
16856 #include <linux/types.h>
16857 #include <linux/ioport.h>
16858 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
16859
16860 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
16861 return -EINVAL;
16862 +#ifdef CONFIG_GRKERNSEC_IO
16863 + if (turn_on && grsec_disable_privio) {
16864 + gr_handle_ioperm();
16865 + return -EPERM;
16866 + }
16867 +#endif
16868 if (turn_on && !capable(CAP_SYS_RAWIO))
16869 return -EPERM;
16870
16871 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
16872 * because the ->io_bitmap_max value must match the bitmap
16873 * contents:
16874 */
16875 - tss = &per_cpu(init_tss, get_cpu());
16876 + tss = init_tss + get_cpu();
16877
16878 if (turn_on)
16879 bitmap_clear(t->io_bitmap_ptr, from, num);
16880 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
16881 return -EINVAL;
16882 /* Trying to gain more privileges? */
16883 if (level > old) {
16884 +#ifdef CONFIG_GRKERNSEC_IO
16885 + if (grsec_disable_privio) {
16886 + gr_handle_iopl();
16887 + return -EPERM;
16888 + }
16889 +#endif
16890 if (!capable(CAP_SYS_RAWIO))
16891 return -EPERM;
16892 }
16893 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
16894 index 429e0c9..17b3ece 100644
16895 --- a/arch/x86/kernel/irq.c
16896 +++ b/arch/x86/kernel/irq.c
16897 @@ -18,7 +18,7 @@
16898 #include <asm/mce.h>
16899 #include <asm/hw_irq.h>
16900
16901 -atomic_t irq_err_count;
16902 +atomic_unchecked_t irq_err_count;
16903
16904 /* Function pointer for generic interrupt vector handling */
16905 void (*x86_platform_ipi_callback)(void) = NULL;
16906 @@ -117,9 +117,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
16907 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
16908 seq_printf(p, " Machine check polls\n");
16909 #endif
16910 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
16911 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
16912 #if defined(CONFIG_X86_IO_APIC)
16913 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
16914 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
16915 #endif
16916 return 0;
16917 }
16918 @@ -159,10 +159,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
16919
16920 u64 arch_irq_stat(void)
16921 {
16922 - u64 sum = atomic_read(&irq_err_count);
16923 + u64 sum = atomic_read_unchecked(&irq_err_count);
16924
16925 #ifdef CONFIG_X86_IO_APIC
16926 - sum += atomic_read(&irq_mis_count);
16927 + sum += atomic_read_unchecked(&irq_mis_count);
16928 #endif
16929 return sum;
16930 }
16931 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
16932 index 7209070..cbcd71a 100644
16933 --- a/arch/x86/kernel/irq_32.c
16934 +++ b/arch/x86/kernel/irq_32.c
16935 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
16936 __asm__ __volatile__("andl %%esp,%0" :
16937 "=r" (sp) : "0" (THREAD_SIZE - 1));
16938
16939 - return sp < (sizeof(struct thread_info) + STACK_WARN);
16940 + return sp < STACK_WARN;
16941 }
16942
16943 static void print_stack_overflow(void)
16944 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(void) { }
16945 * per-CPU IRQ handling contexts (thread information and stack)
16946 */
16947 union irq_ctx {
16948 - struct thread_info tinfo;
16949 - u32 stack[THREAD_SIZE/sizeof(u32)];
16950 + unsigned long previous_esp;
16951 + u32 stack[THREAD_SIZE/sizeof(u32)];
16952 } __attribute__((aligned(THREAD_SIZE)));
16953
16954 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
16955 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, void *stack)
16956 static inline int
16957 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
16958 {
16959 - union irq_ctx *curctx, *irqctx;
16960 + union irq_ctx *irqctx;
16961 u32 *isp, arg1, arg2;
16962
16963 - curctx = (union irq_ctx *) current_thread_info();
16964 irqctx = __this_cpu_read(hardirq_ctx);
16965
16966 /*
16967 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
16968 * handler) we can't do that and just have to keep using the
16969 * current stack (which is the irq stack already after all)
16970 */
16971 - if (unlikely(curctx == irqctx))
16972 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
16973 return 0;
16974
16975 /* build the stack frame on the IRQ stack */
16976 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
16977 - irqctx->tinfo.task = curctx->tinfo.task;
16978 - irqctx->tinfo.previous_esp = current_stack_pointer;
16979 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
16980 + irqctx->previous_esp = current_stack_pointer;
16981
16982 - /*
16983 - * Copy the softirq bits in preempt_count so that the
16984 - * softirq checks work in the hardirq context.
16985 - */
16986 - irqctx->tinfo.preempt_count =
16987 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
16988 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
16989 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16990 + __set_fs(MAKE_MM_SEG(0));
16991 +#endif
16992
16993 if (unlikely(overflow))
16994 call_on_stack(print_stack_overflow, isp);
16995 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
16996 : "0" (irq), "1" (desc), "2" (isp),
16997 "D" (desc->handle_irq)
16998 : "memory", "cc", "ecx");
16999 +
17000 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17001 + __set_fs(current_thread_info()->addr_limit);
17002 +#endif
17003 +
17004 return 1;
17005 }
17006
17007 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17008 */
17009 void __cpuinit irq_ctx_init(int cpu)
17010 {
17011 - union irq_ctx *irqctx;
17012 -
17013 if (per_cpu(hardirq_ctx, cpu))
17014 return;
17015
17016 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17017 - THREAD_FLAGS,
17018 - THREAD_ORDER));
17019 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17020 - irqctx->tinfo.cpu = cpu;
17021 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17022 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17023 -
17024 - per_cpu(hardirq_ctx, cpu) = irqctx;
17025 -
17026 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17027 - THREAD_FLAGS,
17028 - THREAD_ORDER));
17029 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17030 - irqctx->tinfo.cpu = cpu;
17031 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17032 -
17033 - per_cpu(softirq_ctx, cpu) = irqctx;
17034 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17035 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17036
17037 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17038 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17039 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
17040 asmlinkage void do_softirq(void)
17041 {
17042 unsigned long flags;
17043 - struct thread_info *curctx;
17044 union irq_ctx *irqctx;
17045 u32 *isp;
17046
17047 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
17048 local_irq_save(flags);
17049
17050 if (local_softirq_pending()) {
17051 - curctx = current_thread_info();
17052 irqctx = __this_cpu_read(softirq_ctx);
17053 - irqctx->tinfo.task = curctx->task;
17054 - irqctx->tinfo.previous_esp = current_stack_pointer;
17055 + irqctx->previous_esp = current_stack_pointer;
17056
17057 /* build the stack frame on the softirq stack */
17058 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17059 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17060 +
17061 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17062 + __set_fs(MAKE_MM_SEG(0));
17063 +#endif
17064
17065 call_on_stack(__do_softirq, isp);
17066 +
17067 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17068 + __set_fs(current_thread_info()->addr_limit);
17069 +#endif
17070 +
17071 /*
17072 * Shouldn't happen, we returned above if in_interrupt():
17073 */
17074 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
17075 index 69bca46..0bac999 100644
17076 --- a/arch/x86/kernel/irq_64.c
17077 +++ b/arch/x86/kernel/irq_64.c
17078 @@ -38,7 +38,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
17079 #ifdef CONFIG_DEBUG_STACKOVERFLOW
17080 u64 curbase = (u64)task_stack_page(current);
17081
17082 - if (user_mode_vm(regs))
17083 + if (user_mode(regs))
17084 return;
17085
17086 WARN_ONCE(regs->sp >= curbase &&
17087 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17088 index faba577..93b9e71 100644
17089 --- a/arch/x86/kernel/kgdb.c
17090 +++ b/arch/x86/kernel/kgdb.c
17091 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
17092 #ifdef CONFIG_X86_32
17093 switch (regno) {
17094 case GDB_SS:
17095 - if (!user_mode_vm(regs))
17096 + if (!user_mode(regs))
17097 *(unsigned long *)mem = __KERNEL_DS;
17098 break;
17099 case GDB_SP:
17100 - if (!user_mode_vm(regs))
17101 + if (!user_mode(regs))
17102 *(unsigned long *)mem = kernel_stack_pointer(regs);
17103 break;
17104 case GDB_GS:
17105 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17106 case 'k':
17107 /* clear the trace bit */
17108 linux_regs->flags &= ~X86_EFLAGS_TF;
17109 - atomic_set(&kgdb_cpu_doing_single_step, -1);
17110 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17111
17112 /* set the trace bit if we're stepping */
17113 if (remcomInBuffer[0] == 's') {
17114 linux_regs->flags |= X86_EFLAGS_TF;
17115 - atomic_set(&kgdb_cpu_doing_single_step,
17116 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17117 raw_smp_processor_id());
17118 }
17119
17120 @@ -543,7 +543,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17121
17122 switch (cmd) {
17123 case DIE_DEBUG:
17124 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
17125 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
17126 if (user_mode(regs))
17127 return single_step_cont(regs, args);
17128 break;
17129 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17130 index 7da647d..56fe348 100644
17131 --- a/arch/x86/kernel/kprobes.c
17132 +++ b/arch/x86/kernel/kprobes.c
17133 @@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
17134 } __attribute__((packed)) *insn;
17135
17136 insn = (struct __arch_relative_insn *)from;
17137 +
17138 + pax_open_kernel();
17139 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
17140 insn->op = op;
17141 + pax_close_kernel();
17142 }
17143
17144 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
17145 @@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
17146 kprobe_opcode_t opcode;
17147 kprobe_opcode_t *orig_opcodes = opcodes;
17148
17149 - if (search_exception_tables((unsigned long)opcodes))
17150 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17151 return 0; /* Page fault may occur on this address. */
17152
17153 retry:
17154 @@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
17155 }
17156 }
17157 insn_get_length(&insn);
17158 + pax_open_kernel();
17159 memcpy(dest, insn.kaddr, insn.length);
17160 + pax_close_kernel();
17161
17162 #ifdef CONFIG_X86_64
17163 if (insn_rip_relative(&insn)) {
17164 @@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
17165 (u8 *) dest;
17166 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
17167 disp = (u8 *) dest + insn_offset_displacement(&insn);
17168 + pax_open_kernel();
17169 *(s32 *) disp = (s32) newdisp;
17170 + pax_close_kernel();
17171 }
17172 #endif
17173 return insn.length;
17174 @@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
17175 */
17176 __copy_instruction(p->ainsn.insn, p->addr, 0);
17177
17178 - if (can_boost(p->addr))
17179 + if (can_boost(ktla_ktva(p->addr)))
17180 p->ainsn.boostable = 0;
17181 else
17182 p->ainsn.boostable = -1;
17183
17184 - p->opcode = *p->addr;
17185 + p->opcode = *(ktla_ktva(p->addr));
17186 }
17187
17188 int __kprobes arch_prepare_kprobe(struct kprobe *p)
17189 @@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17190 * nor set current_kprobe, because it doesn't use single
17191 * stepping.
17192 */
17193 - regs->ip = (unsigned long)p->ainsn.insn;
17194 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17195 preempt_enable_no_resched();
17196 return;
17197 }
17198 @@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17199 if (p->opcode == BREAKPOINT_INSTRUCTION)
17200 regs->ip = (unsigned long)p->addr;
17201 else
17202 - regs->ip = (unsigned long)p->ainsn.insn;
17203 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17204 }
17205
17206 /*
17207 @@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17208 setup_singlestep(p, regs, kcb, 0);
17209 return 1;
17210 }
17211 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
17212 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17213 /*
17214 * The breakpoint instruction was removed right
17215 * after we hit it. Another cpu has removed
17216 @@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17217 " movq %rax, 152(%rsp)\n"
17218 RESTORE_REGS_STRING
17219 " popfq\n"
17220 +#ifdef KERNEXEC_PLUGIN
17221 + " btsq $63,(%rsp)\n"
17222 +#endif
17223 #else
17224 " pushf\n"
17225 SAVE_REGS_STRING
17226 @@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
17227 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
17228 {
17229 unsigned long *tos = stack_addr(regs);
17230 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
17231 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
17232 unsigned long orig_ip = (unsigned long)p->addr;
17233 kprobe_opcode_t *insn = p->ainsn.insn;
17234
17235 @@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
17236 struct die_args *args = data;
17237 int ret = NOTIFY_DONE;
17238
17239 - if (args->regs && user_mode_vm(args->regs))
17240 + if (args->regs && user_mode(args->regs))
17241 return ret;
17242
17243 switch (val) {
17244 @@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17245 * Verify if the address gap is in 2GB range, because this uses
17246 * a relative jump.
17247 */
17248 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
17249 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
17250 if (abs(rel) > 0x7fffffff)
17251 return -ERANGE;
17252
17253 @@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17254 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
17255
17256 /* Set probe function call */
17257 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
17258 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
17259
17260 /* Set returning jmp instruction at the tail of out-of-line buffer */
17261 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
17262 - (u8 *)op->kp.addr + op->optinsn.size);
17263 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
17264
17265 flush_icache_range((unsigned long) buf,
17266 (unsigned long) buf + TMPL_END_IDX +
17267 @@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
17268 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
17269
17270 /* Backup instructions which will be replaced by jump address */
17271 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
17272 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
17273 RELATIVE_ADDR_SIZE);
17274
17275 insn_buf[0] = RELATIVEJUMP_OPCODE;
17276 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
17277 index a9c2116..a52d4fc 100644
17278 --- a/arch/x86/kernel/kvm.c
17279 +++ b/arch/x86/kernel/kvm.c
17280 @@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void)
17281 pv_mmu_ops.set_pud = kvm_set_pud;
17282 #if PAGETABLE_LEVELS == 4
17283 pv_mmu_ops.set_pgd = kvm_set_pgd;
17284 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
17285 #endif
17286 #endif
17287 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
17288 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
17289 index ea69726..604d066 100644
17290 --- a/arch/x86/kernel/ldt.c
17291 +++ b/arch/x86/kernel/ldt.c
17292 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
17293 if (reload) {
17294 #ifdef CONFIG_SMP
17295 preempt_disable();
17296 - load_LDT(pc);
17297 + load_LDT_nolock(pc);
17298 if (!cpumask_equal(mm_cpumask(current->mm),
17299 cpumask_of(smp_processor_id())))
17300 smp_call_function(flush_ldt, current->mm, 1);
17301 preempt_enable();
17302 #else
17303 - load_LDT(pc);
17304 + load_LDT_nolock(pc);
17305 #endif
17306 }
17307 if (oldsize) {
17308 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
17309 return err;
17310
17311 for (i = 0; i < old->size; i++)
17312 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
17313 + write_ldt_entry(new->ldt, i, old->ldt + i);
17314 return 0;
17315 }
17316
17317 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
17318 retval = copy_ldt(&mm->context, &old_mm->context);
17319 mutex_unlock(&old_mm->context.lock);
17320 }
17321 +
17322 + if (tsk == current) {
17323 + mm->context.vdso = 0;
17324 +
17325 +#ifdef CONFIG_X86_32
17326 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17327 + mm->context.user_cs_base = 0UL;
17328 + mm->context.user_cs_limit = ~0UL;
17329 +
17330 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17331 + cpus_clear(mm->context.cpu_user_cs_mask);
17332 +#endif
17333 +
17334 +#endif
17335 +#endif
17336 +
17337 + }
17338 +
17339 return retval;
17340 }
17341
17342 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
17343 }
17344 }
17345
17346 +#ifdef CONFIG_PAX_SEGMEXEC
17347 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
17348 + error = -EINVAL;
17349 + goto out_unlock;
17350 + }
17351 +#endif
17352 +
17353 fill_ldt(&ldt, &ldt_info);
17354 if (oldmode)
17355 ldt.avl = 0;
17356 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
17357 index a3fa43b..8966f4c 100644
17358 --- a/arch/x86/kernel/machine_kexec_32.c
17359 +++ b/arch/x86/kernel/machine_kexec_32.c
17360 @@ -27,7 +27,7 @@
17361 #include <asm/cacheflush.h>
17362 #include <asm/debugreg.h>
17363
17364 -static void set_idt(void *newidt, __u16 limit)
17365 +static void set_idt(struct desc_struct *newidt, __u16 limit)
17366 {
17367 struct desc_ptr curidt;
17368
17369 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
17370 }
17371
17372
17373 -static void set_gdt(void *newgdt, __u16 limit)
17374 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
17375 {
17376 struct desc_ptr curgdt;
17377
17378 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
17379 }
17380
17381 control_page = page_address(image->control_code_page);
17382 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
17383 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
17384
17385 relocate_kernel_ptr = control_page;
17386 page_list[PA_CONTROL_PAGE] = __pa(control_page);
17387 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
17388 index 3ca42d0..7cff8cc 100644
17389 --- a/arch/x86/kernel/microcode_intel.c
17390 +++ b/arch/x86/kernel/microcode_intel.c
17391 @@ -436,13 +436,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
17392
17393 static int get_ucode_user(void *to, const void *from, size_t n)
17394 {
17395 - return copy_from_user(to, from, n);
17396 + return copy_from_user(to, (const void __force_user *)from, n);
17397 }
17398
17399 static enum ucode_state
17400 request_microcode_user(int cpu, const void __user *buf, size_t size)
17401 {
17402 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
17403 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
17404 }
17405
17406 static void microcode_fini_cpu(int cpu)
17407 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
17408 index 925179f..267ac7a 100644
17409 --- a/arch/x86/kernel/module.c
17410 +++ b/arch/x86/kernel/module.c
17411 @@ -36,15 +36,60 @@
17412 #define DEBUGP(fmt...)
17413 #endif
17414
17415 -void *module_alloc(unsigned long size)
17416 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
17417 {
17418 - if (PAGE_ALIGN(size) > MODULES_LEN)
17419 + if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
17420 return NULL;
17421 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
17422 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
17423 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
17424 -1, __builtin_return_address(0));
17425 }
17426
17427 +void *module_alloc(unsigned long size)
17428 +{
17429 +
17430 +#ifdef CONFIG_PAX_KERNEXEC
17431 + return __module_alloc(size, PAGE_KERNEL);
17432 +#else
17433 + return __module_alloc(size, PAGE_KERNEL_EXEC);
17434 +#endif
17435 +
17436 +}
17437 +
17438 +#ifdef CONFIG_PAX_KERNEXEC
17439 +#ifdef CONFIG_X86_32
17440 +void *module_alloc_exec(unsigned long size)
17441 +{
17442 + struct vm_struct *area;
17443 +
17444 + if (size == 0)
17445 + return NULL;
17446 +
17447 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
17448 + return area ? area->addr : NULL;
17449 +}
17450 +EXPORT_SYMBOL(module_alloc_exec);
17451 +
17452 +void module_free_exec(struct module *mod, void *module_region)
17453 +{
17454 + vunmap(module_region);
17455 +}
17456 +EXPORT_SYMBOL(module_free_exec);
17457 +#else
17458 +void module_free_exec(struct module *mod, void *module_region)
17459 +{
17460 + module_free(mod, module_region);
17461 +}
17462 +EXPORT_SYMBOL(module_free_exec);
17463 +
17464 +void *module_alloc_exec(unsigned long size)
17465 +{
17466 + return __module_alloc(size, PAGE_KERNEL_RX);
17467 +}
17468 +EXPORT_SYMBOL(module_alloc_exec);
17469 +#endif
17470 +#endif
17471 +
17472 #ifdef CONFIG_X86_32
17473 int apply_relocate(Elf32_Shdr *sechdrs,
17474 const char *strtab,
17475 @@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
17476 unsigned int i;
17477 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
17478 Elf32_Sym *sym;
17479 - uint32_t *location;
17480 + uint32_t *plocation, location;
17481
17482 DEBUGP("Applying relocate section %u to %u\n", relsec,
17483 sechdrs[relsec].sh_info);
17484 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
17485 /* This is where to make the change */
17486 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
17487 - + rel[i].r_offset;
17488 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
17489 + location = (uint32_t)plocation;
17490 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
17491 + plocation = ktla_ktva((void *)plocation);
17492 /* This is the symbol it is referring to. Note that all
17493 undefined symbols have been resolved. */
17494 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
17495 @@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
17496 switch (ELF32_R_TYPE(rel[i].r_info)) {
17497 case R_386_32:
17498 /* We add the value into the location given */
17499 - *location += sym->st_value;
17500 + pax_open_kernel();
17501 + *plocation += sym->st_value;
17502 + pax_close_kernel();
17503 break;
17504 case R_386_PC32:
17505 /* Add the value, subtract its postition */
17506 - *location += sym->st_value - (uint32_t)location;
17507 + pax_open_kernel();
17508 + *plocation += sym->st_value - location;
17509 + pax_close_kernel();
17510 break;
17511 default:
17512 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
17513 @@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
17514 case R_X86_64_NONE:
17515 break;
17516 case R_X86_64_64:
17517 + pax_open_kernel();
17518 *(u64 *)loc = val;
17519 + pax_close_kernel();
17520 break;
17521 case R_X86_64_32:
17522 + pax_open_kernel();
17523 *(u32 *)loc = val;
17524 + pax_close_kernel();
17525 if (val != *(u32 *)loc)
17526 goto overflow;
17527 break;
17528 case R_X86_64_32S:
17529 + pax_open_kernel();
17530 *(s32 *)loc = val;
17531 + pax_close_kernel();
17532 if ((s64)val != *(s32 *)loc)
17533 goto overflow;
17534 break;
17535 case R_X86_64_PC32:
17536 val -= (u64)loc;
17537 + pax_open_kernel();
17538 *(u32 *)loc = val;
17539 + pax_close_kernel();
17540 +
17541 #if 0
17542 if ((s64)val != *(s32 *)loc)
17543 goto overflow;
17544 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
17545 index e88f37b..1353db6 100644
17546 --- a/arch/x86/kernel/nmi.c
17547 +++ b/arch/x86/kernel/nmi.c
17548 @@ -408,6 +408,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
17549 dotraplinkage notrace __kprobes void
17550 do_nmi(struct pt_regs *regs, long error_code)
17551 {
17552 +
17553 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17554 + if (!user_mode(regs)) {
17555 + unsigned long cs = regs->cs & 0xFFFF;
17556 + unsigned long ip = ktva_ktla(regs->ip);
17557 +
17558 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17559 + regs->ip = ip;
17560 + }
17561 +#endif
17562 +
17563 nmi_enter();
17564
17565 inc_irq_stat(__nmi_count);
17566 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
17567 index 676b8c7..870ba04 100644
17568 --- a/arch/x86/kernel/paravirt-spinlocks.c
17569 +++ b/arch/x86/kernel/paravirt-spinlocks.c
17570 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
17571 arch_spin_lock(lock);
17572 }
17573
17574 -struct pv_lock_ops pv_lock_ops = {
17575 +struct pv_lock_ops pv_lock_ops __read_only = {
17576 #ifdef CONFIG_SMP
17577 .spin_is_locked = __ticket_spin_is_locked,
17578 .spin_is_contended = __ticket_spin_is_contended,
17579 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
17580 index d90272e..6bb013b 100644
17581 --- a/arch/x86/kernel/paravirt.c
17582 +++ b/arch/x86/kernel/paravirt.c
17583 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
17584 {
17585 return x;
17586 }
17587 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
17588 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
17589 +#endif
17590
17591 void __init default_banner(void)
17592 {
17593 @@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
17594 if (opfunc == NULL)
17595 /* If there's no function, patch it with a ud2a (BUG) */
17596 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
17597 - else if (opfunc == _paravirt_nop)
17598 + else if (opfunc == (void *)_paravirt_nop)
17599 /* If the operation is a nop, then nop the callsite */
17600 ret = paravirt_patch_nop();
17601
17602 /* identity functions just return their single argument */
17603 - else if (opfunc == _paravirt_ident_32)
17604 + else if (opfunc == (void *)_paravirt_ident_32)
17605 ret = paravirt_patch_ident_32(insnbuf, len);
17606 - else if (opfunc == _paravirt_ident_64)
17607 + else if (opfunc == (void *)_paravirt_ident_64)
17608 ret = paravirt_patch_ident_64(insnbuf, len);
17609 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
17610 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
17611 + ret = paravirt_patch_ident_64(insnbuf, len);
17612 +#endif
17613
17614 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
17615 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
17616 @@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
17617 if (insn_len > len || start == NULL)
17618 insn_len = len;
17619 else
17620 - memcpy(insnbuf, start, insn_len);
17621 + memcpy(insnbuf, ktla_ktva(start), insn_len);
17622
17623 return insn_len;
17624 }
17625 @@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
17626 preempt_enable();
17627 }
17628
17629 -struct pv_info pv_info = {
17630 +struct pv_info pv_info __read_only = {
17631 .name = "bare hardware",
17632 .paravirt_enabled = 0,
17633 .kernel_rpl = 0,
17634 @@ -313,16 +320,16 @@ struct pv_info pv_info = {
17635 #endif
17636 };
17637
17638 -struct pv_init_ops pv_init_ops = {
17639 +struct pv_init_ops pv_init_ops __read_only = {
17640 .patch = native_patch,
17641 };
17642
17643 -struct pv_time_ops pv_time_ops = {
17644 +struct pv_time_ops pv_time_ops __read_only = {
17645 .sched_clock = native_sched_clock,
17646 .steal_clock = native_steal_clock,
17647 };
17648
17649 -struct pv_irq_ops pv_irq_ops = {
17650 +struct pv_irq_ops pv_irq_ops __read_only = {
17651 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
17652 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
17653 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
17654 @@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
17655 #endif
17656 };
17657
17658 -struct pv_cpu_ops pv_cpu_ops = {
17659 +struct pv_cpu_ops pv_cpu_ops __read_only = {
17660 .cpuid = native_cpuid,
17661 .get_debugreg = native_get_debugreg,
17662 .set_debugreg = native_set_debugreg,
17663 @@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
17664 .end_context_switch = paravirt_nop,
17665 };
17666
17667 -struct pv_apic_ops pv_apic_ops = {
17668 +struct pv_apic_ops pv_apic_ops __read_only = {
17669 #ifdef CONFIG_X86_LOCAL_APIC
17670 .startup_ipi_hook = paravirt_nop,
17671 #endif
17672 };
17673
17674 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
17675 +#ifdef CONFIG_X86_32
17676 +#ifdef CONFIG_X86_PAE
17677 +/* 64-bit pagetable entries */
17678 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
17679 +#else
17680 /* 32-bit pagetable entries */
17681 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
17682 +#endif
17683 #else
17684 /* 64-bit pagetable entries */
17685 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
17686 #endif
17687
17688 -struct pv_mmu_ops pv_mmu_ops = {
17689 +struct pv_mmu_ops pv_mmu_ops __read_only = {
17690
17691 .read_cr2 = native_read_cr2,
17692 .write_cr2 = native_write_cr2,
17693 @@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
17694 .make_pud = PTE_IDENT,
17695
17696 .set_pgd = native_set_pgd,
17697 + .set_pgd_batched = native_set_pgd_batched,
17698 #endif
17699 #endif /* PAGETABLE_LEVELS >= 3 */
17700
17701 @@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
17702 },
17703
17704 .set_fixmap = native_set_fixmap,
17705 +
17706 +#ifdef CONFIG_PAX_KERNEXEC
17707 + .pax_open_kernel = native_pax_open_kernel,
17708 + .pax_close_kernel = native_pax_close_kernel,
17709 +#endif
17710 +
17711 };
17712
17713 EXPORT_SYMBOL_GPL(pv_time_ops);
17714 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
17715 index 35ccf75..7a15747 100644
17716 --- a/arch/x86/kernel/pci-iommu_table.c
17717 +++ b/arch/x86/kernel/pci-iommu_table.c
17718 @@ -2,7 +2,7 @@
17719 #include <asm/iommu_table.h>
17720 #include <linux/string.h>
17721 #include <linux/kallsyms.h>
17722 -
17723 +#include <linux/sched.h>
17724
17725 #define DEBUG 1
17726
17727 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
17728 index ee5d4fb..426649b 100644
17729 --- a/arch/x86/kernel/process.c
17730 +++ b/arch/x86/kernel/process.c
17731 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
17732
17733 void free_thread_info(struct thread_info *ti)
17734 {
17735 - free_thread_xstate(ti->task);
17736 free_pages((unsigned long)ti, THREAD_ORDER);
17737 }
17738
17739 +static struct kmem_cache *task_struct_cachep;
17740 +
17741 void arch_task_cache_init(void)
17742 {
17743 - task_xstate_cachep =
17744 - kmem_cache_create("task_xstate", xstate_size,
17745 + /* create a slab on which task_structs can be allocated */
17746 + task_struct_cachep =
17747 + kmem_cache_create("task_struct", sizeof(struct task_struct),
17748 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
17749 +
17750 + task_xstate_cachep =
17751 + kmem_cache_create("task_xstate", xstate_size,
17752 __alignof__(union thread_xstate),
17753 - SLAB_PANIC | SLAB_NOTRACK, NULL);
17754 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
17755 +}
17756 +
17757 +struct task_struct *alloc_task_struct_node(int node)
17758 +{
17759 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
17760 +}
17761 +
17762 +void free_task_struct(struct task_struct *task)
17763 +{
17764 + free_thread_xstate(task);
17765 + kmem_cache_free(task_struct_cachep, task);
17766 }
17767
17768 /*
17769 @@ -70,7 +87,7 @@ void exit_thread(void)
17770 unsigned long *bp = t->io_bitmap_ptr;
17771
17772 if (bp) {
17773 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
17774 + struct tss_struct *tss = init_tss + get_cpu();
17775
17776 t->io_bitmap_ptr = NULL;
17777 clear_thread_flag(TIF_IO_BITMAP);
17778 @@ -106,7 +123,7 @@ void show_regs_common(void)
17779
17780 printk(KERN_CONT "\n");
17781 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
17782 - current->pid, current->comm, print_tainted(),
17783 + task_pid_nr(current), current->comm, print_tainted(),
17784 init_utsname()->release,
17785 (int)strcspn(init_utsname()->version, " "),
17786 init_utsname()->version);
17787 @@ -120,6 +137,9 @@ void flush_thread(void)
17788 {
17789 struct task_struct *tsk = current;
17790
17791 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
17792 + loadsegment(gs, 0);
17793 +#endif
17794 flush_ptrace_hw_breakpoint(tsk);
17795 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
17796 /*
17797 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
17798 regs.di = (unsigned long) arg;
17799
17800 #ifdef CONFIG_X86_32
17801 - regs.ds = __USER_DS;
17802 - regs.es = __USER_DS;
17803 + regs.ds = __KERNEL_DS;
17804 + regs.es = __KERNEL_DS;
17805 regs.fs = __KERNEL_PERCPU;
17806 - regs.gs = __KERNEL_STACK_CANARY;
17807 + savesegment(gs, regs.gs);
17808 #else
17809 regs.ss = __KERNEL_DS;
17810 #endif
17811 @@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
17812
17813 return ret;
17814 }
17815 -void stop_this_cpu(void *dummy)
17816 +__noreturn void stop_this_cpu(void *dummy)
17817 {
17818 local_irq_disable();
17819 /*
17820 @@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
17821 }
17822 early_param("idle", idle_setup);
17823
17824 -unsigned long arch_align_stack(unsigned long sp)
17825 +#ifdef CONFIG_PAX_RANDKSTACK
17826 +void pax_randomize_kstack(struct pt_regs *regs)
17827 {
17828 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
17829 - sp -= get_random_int() % 8192;
17830 - return sp & ~0xf;
17831 -}
17832 + struct thread_struct *thread = &current->thread;
17833 + unsigned long time;
17834
17835 -unsigned long arch_randomize_brk(struct mm_struct *mm)
17836 -{
17837 - unsigned long range_end = mm->brk + 0x02000000;
17838 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
17839 -}
17840 + if (!randomize_va_space)
17841 + return;
17842 +
17843 + if (v8086_mode(regs))
17844 + return;
17845
17846 + rdtscl(time);
17847 +
17848 + /* P4 seems to return a 0 LSB, ignore it */
17849 +#ifdef CONFIG_MPENTIUM4
17850 + time &= 0x3EUL;
17851 + time <<= 2;
17852 +#elif defined(CONFIG_X86_64)
17853 + time &= 0xFUL;
17854 + time <<= 4;
17855 +#else
17856 + time &= 0x1FUL;
17857 + time <<= 3;
17858 +#endif
17859 +
17860 + thread->sp0 ^= time;
17861 + load_sp0(init_tss + smp_processor_id(), thread);
17862 +
17863 +#ifdef CONFIG_X86_64
17864 + percpu_write(kernel_stack, thread->sp0);
17865 +#endif
17866 +}
17867 +#endif
17868 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
17869 index 8598296..bfadef0 100644
17870 --- a/arch/x86/kernel/process_32.c
17871 +++ b/arch/x86/kernel/process_32.c
17872 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
17873 unsigned long thread_saved_pc(struct task_struct *tsk)
17874 {
17875 return ((unsigned long *)tsk->thread.sp)[3];
17876 +//XXX return tsk->thread.eip;
17877 }
17878
17879 #ifndef CONFIG_SMP
17880 @@ -130,15 +131,14 @@ void __show_regs(struct pt_regs *regs, int all)
17881 unsigned long sp;
17882 unsigned short ss, gs;
17883
17884 - if (user_mode_vm(regs)) {
17885 + if (user_mode(regs)) {
17886 sp = regs->sp;
17887 ss = regs->ss & 0xffff;
17888 - gs = get_user_gs(regs);
17889 } else {
17890 sp = kernel_stack_pointer(regs);
17891 savesegment(ss, ss);
17892 - savesegment(gs, gs);
17893 }
17894 + gs = get_user_gs(regs);
17895
17896 show_regs_common();
17897
17898 @@ -200,13 +200,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
17899 struct task_struct *tsk;
17900 int err;
17901
17902 - childregs = task_pt_regs(p);
17903 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
17904 *childregs = *regs;
17905 childregs->ax = 0;
17906 childregs->sp = sp;
17907
17908 p->thread.sp = (unsigned long) childregs;
17909 p->thread.sp0 = (unsigned long) (childregs+1);
17910 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
17911
17912 p->thread.ip = (unsigned long) ret_from_fork;
17913
17914 @@ -296,7 +297,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17915 struct thread_struct *prev = &prev_p->thread,
17916 *next = &next_p->thread;
17917 int cpu = smp_processor_id();
17918 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
17919 + struct tss_struct *tss = init_tss + cpu;
17920 fpu_switch_t fpu;
17921
17922 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
17923 @@ -320,6 +321,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17924 */
17925 lazy_save_gs(prev->gs);
17926
17927 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17928 + __set_fs(task_thread_info(next_p)->addr_limit);
17929 +#endif
17930 +
17931 /*
17932 * Load the per-thread Thread-Local Storage descriptor.
17933 */
17934 @@ -350,6 +355,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17935 */
17936 arch_end_context_switch(next_p);
17937
17938 + percpu_write(current_task, next_p);
17939 + percpu_write(current_tinfo, &next_p->tinfo);
17940 +
17941 /*
17942 * Restore %gs if needed (which is common)
17943 */
17944 @@ -358,8 +366,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17945
17946 switch_fpu_finish(next_p, fpu);
17947
17948 - percpu_write(current_task, next_p);
17949 -
17950 return prev_p;
17951 }
17952
17953 @@ -389,4 +395,3 @@ unsigned long get_wchan(struct task_struct *p)
17954 } while (count++ < 16);
17955 return 0;
17956 }
17957 -
17958 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
17959 index 6a364a6..b147d11 100644
17960 --- a/arch/x86/kernel/process_64.c
17961 +++ b/arch/x86/kernel/process_64.c
17962 @@ -89,7 +89,7 @@ static void __exit_idle(void)
17963 void exit_idle(void)
17964 {
17965 /* idle loop has pid 0 */
17966 - if (current->pid)
17967 + if (task_pid_nr(current))
17968 return;
17969 __exit_idle();
17970 }
17971 @@ -264,8 +264,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
17972 struct pt_regs *childregs;
17973 struct task_struct *me = current;
17974
17975 - childregs = ((struct pt_regs *)
17976 - (THREAD_SIZE + task_stack_page(p))) - 1;
17977 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
17978 *childregs = *regs;
17979
17980 childregs->ax = 0;
17981 @@ -277,6 +276,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
17982 p->thread.sp = (unsigned long) childregs;
17983 p->thread.sp0 = (unsigned long) (childregs+1);
17984 p->thread.usersp = me->thread.usersp;
17985 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
17986
17987 set_tsk_thread_flag(p, TIF_FORK);
17988
17989 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17990 struct thread_struct *prev = &prev_p->thread;
17991 struct thread_struct *next = &next_p->thread;
17992 int cpu = smp_processor_id();
17993 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
17994 + struct tss_struct *tss = init_tss + cpu;
17995 unsigned fsindex, gsindex;
17996 fpu_switch_t fpu;
17997
17998 @@ -461,10 +461,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
17999 prev->usersp = percpu_read(old_rsp);
18000 percpu_write(old_rsp, next->usersp);
18001 percpu_write(current_task, next_p);
18002 + percpu_write(current_tinfo, &next_p->tinfo);
18003
18004 - percpu_write(kernel_stack,
18005 - (unsigned long)task_stack_page(next_p) +
18006 - THREAD_SIZE - KERNEL_STACK_OFFSET);
18007 + percpu_write(kernel_stack, next->sp0);
18008
18009 /*
18010 * Now maybe reload the debug registers and handle I/O bitmaps
18011 @@ -519,12 +518,11 @@ unsigned long get_wchan(struct task_struct *p)
18012 if (!p || p == current || p->state == TASK_RUNNING)
18013 return 0;
18014 stack = (unsigned long)task_stack_page(p);
18015 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18016 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18017 return 0;
18018 fp = *(u64 *)(p->thread.sp);
18019 do {
18020 - if (fp < (unsigned long)stack ||
18021 - fp >= (unsigned long)stack+THREAD_SIZE)
18022 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18023 return 0;
18024 ip = *(u64 *)(fp+8);
18025 if (!in_sched_functions(ip))
18026 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18027 index 8252879..d3219e0 100644
18028 --- a/arch/x86/kernel/ptrace.c
18029 +++ b/arch/x86/kernel/ptrace.c
18030 @@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *child, long request,
18031 unsigned long addr, unsigned long data)
18032 {
18033 int ret;
18034 - unsigned long __user *datap = (unsigned long __user *)data;
18035 + unsigned long __user *datap = (__force unsigned long __user *)data;
18036
18037 switch (request) {
18038 /* read the word at location addr in the USER area. */
18039 @@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *child, long request,
18040 if ((int) addr < 0)
18041 return -EIO;
18042 ret = do_get_thread_area(child, addr,
18043 - (struct user_desc __user *)data);
18044 + (__force struct user_desc __user *) data);
18045 break;
18046
18047 case PTRACE_SET_THREAD_AREA:
18048 if ((int) addr < 0)
18049 return -EIO;
18050 ret = do_set_thread_area(child, addr,
18051 - (struct user_desc __user *)data, 0);
18052 + (__force struct user_desc __user *) data, 0);
18053 break;
18054 #endif
18055
18056 @@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
18057 memset(info, 0, sizeof(*info));
18058 info->si_signo = SIGTRAP;
18059 info->si_code = si_code;
18060 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
18061 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
18062 }
18063
18064 void user_single_step_siginfo(struct task_struct *tsk,
18065 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
18066 index 42eb330..139955c 100644
18067 --- a/arch/x86/kernel/pvclock.c
18068 +++ b/arch/x86/kernel/pvclock.c
18069 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
18070 return pv_tsc_khz;
18071 }
18072
18073 -static atomic64_t last_value = ATOMIC64_INIT(0);
18074 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
18075
18076 void pvclock_resume(void)
18077 {
18078 - atomic64_set(&last_value, 0);
18079 + atomic64_set_unchecked(&last_value, 0);
18080 }
18081
18082 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18083 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18084 * updating at the same time, and one of them could be slightly behind,
18085 * making the assumption that last_value always go forward fail to hold.
18086 */
18087 - last = atomic64_read(&last_value);
18088 + last = atomic64_read_unchecked(&last_value);
18089 do {
18090 if (ret < last)
18091 return last;
18092 - last = atomic64_cmpxchg(&last_value, last, ret);
18093 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
18094 } while (unlikely(last != ret));
18095
18096 return ret;
18097 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18098 index 37a458b..e63d183 100644
18099 --- a/arch/x86/kernel/reboot.c
18100 +++ b/arch/x86/kernel/reboot.c
18101 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
18102 EXPORT_SYMBOL(pm_power_off);
18103
18104 static const struct desc_ptr no_idt = {};
18105 -static int reboot_mode;
18106 +static unsigned short reboot_mode;
18107 enum reboot_type reboot_type = BOOT_ACPI;
18108 int reboot_force;
18109
18110 @@ -324,13 +324,17 @@ core_initcall(reboot_init);
18111 extern const unsigned char machine_real_restart_asm[];
18112 extern const u64 machine_real_restart_gdt[3];
18113
18114 -void machine_real_restart(unsigned int type)
18115 +__noreturn void machine_real_restart(unsigned int type)
18116 {
18117 void *restart_va;
18118 unsigned long restart_pa;
18119 - void (*restart_lowmem)(unsigned int);
18120 + void (* __noreturn restart_lowmem)(unsigned int);
18121 u64 *lowmem_gdt;
18122
18123 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18124 + struct desc_struct *gdt;
18125 +#endif
18126 +
18127 local_irq_disable();
18128
18129 /* Write zero to CMOS register number 0x0f, which the BIOS POST
18130 @@ -356,14 +360,14 @@ void machine_real_restart(unsigned int type)
18131 boot)". This seems like a fairly standard thing that gets set by
18132 REBOOT.COM programs, and the previous reset routine did this
18133 too. */
18134 - *((unsigned short *)0x472) = reboot_mode;
18135 + *(unsigned short *)(__va(0x472)) = reboot_mode;
18136
18137 /* Patch the GDT in the low memory trampoline */
18138 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
18139
18140 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
18141 restart_pa = virt_to_phys(restart_va);
18142 - restart_lowmem = (void (*)(unsigned int))restart_pa;
18143 + restart_lowmem = (void *)restart_pa;
18144
18145 /* GDT[0]: GDT self-pointer */
18146 lowmem_gdt[0] =
18147 @@ -374,7 +378,33 @@ void machine_real_restart(unsigned int type)
18148 GDT_ENTRY(0x009b, restart_pa, 0xffff);
18149
18150 /* Jump to the identity-mapped low memory code */
18151 +
18152 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18153 + gdt = get_cpu_gdt_table(smp_processor_id());
18154 + pax_open_kernel();
18155 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18156 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
18157 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
18158 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
18159 +#endif
18160 +#ifdef CONFIG_PAX_KERNEXEC
18161 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
18162 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
18163 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
18164 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
18165 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
18166 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
18167 +#endif
18168 + pax_close_kernel();
18169 +#endif
18170 +
18171 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18172 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
18173 + unreachable();
18174 +#else
18175 restart_lowmem(type);
18176 +#endif
18177 +
18178 }
18179 #ifdef CONFIG_APM_MODULE
18180 EXPORT_SYMBOL(machine_real_restart);
18181 @@ -540,7 +570,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
18182 * try to force a triple fault and then cycle between hitting the keyboard
18183 * controller and doing that
18184 */
18185 -static void native_machine_emergency_restart(void)
18186 +__noreturn static void native_machine_emergency_restart(void)
18187 {
18188 int i;
18189 int attempt = 0;
18190 @@ -664,13 +694,13 @@ void native_machine_shutdown(void)
18191 #endif
18192 }
18193
18194 -static void __machine_emergency_restart(int emergency)
18195 +static __noreturn void __machine_emergency_restart(int emergency)
18196 {
18197 reboot_emergency = emergency;
18198 machine_ops.emergency_restart();
18199 }
18200
18201 -static void native_machine_restart(char *__unused)
18202 +static __noreturn void native_machine_restart(char *__unused)
18203 {
18204 printk("machine restart\n");
18205
18206 @@ -679,7 +709,7 @@ static void native_machine_restart(char *__unused)
18207 __machine_emergency_restart(0);
18208 }
18209
18210 -static void native_machine_halt(void)
18211 +static __noreturn void native_machine_halt(void)
18212 {
18213 /* stop other cpus and apics */
18214 machine_shutdown();
18215 @@ -690,7 +720,7 @@ static void native_machine_halt(void)
18216 stop_this_cpu(NULL);
18217 }
18218
18219 -static void native_machine_power_off(void)
18220 +__noreturn static void native_machine_power_off(void)
18221 {
18222 if (pm_power_off) {
18223 if (!reboot_force)
18224 @@ -699,6 +729,7 @@ static void native_machine_power_off(void)
18225 }
18226 /* a fallback in case there is no PM info available */
18227 tboot_shutdown(TB_SHUTDOWN_HALT);
18228 + unreachable();
18229 }
18230
18231 struct machine_ops machine_ops = {
18232 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
18233 index 7a6f3b3..bed145d7 100644
18234 --- a/arch/x86/kernel/relocate_kernel_64.S
18235 +++ b/arch/x86/kernel/relocate_kernel_64.S
18236 @@ -11,6 +11,7 @@
18237 #include <asm/kexec.h>
18238 #include <asm/processor-flags.h>
18239 #include <asm/pgtable_types.h>
18240 +#include <asm/alternative-asm.h>
18241
18242 /*
18243 * Must be relocatable PIC code callable as a C function
18244 @@ -160,13 +161,14 @@ identity_mapped:
18245 xorq %rbp, %rbp
18246 xorq %r8, %r8
18247 xorq %r9, %r9
18248 - xorq %r10, %r9
18249 + xorq %r10, %r10
18250 xorq %r11, %r11
18251 xorq %r12, %r12
18252 xorq %r13, %r13
18253 xorq %r14, %r14
18254 xorq %r15, %r15
18255
18256 + pax_force_retaddr 0, 1
18257 ret
18258
18259 1:
18260 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
18261 index cf0ef98..e3f780b 100644
18262 --- a/arch/x86/kernel/setup.c
18263 +++ b/arch/x86/kernel/setup.c
18264 @@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
18265
18266 switch (data->type) {
18267 case SETUP_E820_EXT:
18268 - parse_e820_ext(data);
18269 + parse_e820_ext((struct setup_data __force_kernel *)data);
18270 break;
18271 case SETUP_DTB:
18272 add_dtb(pa_data);
18273 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
18274 * area (640->1Mb) as ram even though it is not.
18275 * take them out.
18276 */
18277 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
18278 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
18279 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
18280 }
18281
18282 @@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
18283
18284 if (!boot_params.hdr.root_flags)
18285 root_mountflags &= ~MS_RDONLY;
18286 - init_mm.start_code = (unsigned long) _text;
18287 - init_mm.end_code = (unsigned long) _etext;
18288 + init_mm.start_code = ktla_ktva((unsigned long) _text);
18289 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
18290 init_mm.end_data = (unsigned long) _edata;
18291 init_mm.brk = _brk_end;
18292
18293 - code_resource.start = virt_to_phys(_text);
18294 - code_resource.end = virt_to_phys(_etext)-1;
18295 - data_resource.start = virt_to_phys(_etext);
18296 + code_resource.start = virt_to_phys(ktla_ktva(_text));
18297 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
18298 + data_resource.start = virt_to_phys(_sdata);
18299 data_resource.end = virt_to_phys(_edata)-1;
18300 bss_resource.start = virt_to_phys(&__bss_start);
18301 bss_resource.end = virt_to_phys(&__bss_stop)-1;
18302 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
18303 index 71f4727..16dc9f7 100644
18304 --- a/arch/x86/kernel/setup_percpu.c
18305 +++ b/arch/x86/kernel/setup_percpu.c
18306 @@ -21,19 +21,17 @@
18307 #include <asm/cpu.h>
18308 #include <asm/stackprotector.h>
18309
18310 -DEFINE_PER_CPU(int, cpu_number);
18311 +#ifdef CONFIG_SMP
18312 +DEFINE_PER_CPU(unsigned int, cpu_number);
18313 EXPORT_PER_CPU_SYMBOL(cpu_number);
18314 +#endif
18315
18316 -#ifdef CONFIG_X86_64
18317 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
18318 -#else
18319 -#define BOOT_PERCPU_OFFSET 0
18320 -#endif
18321
18322 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
18323 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
18324
18325 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
18326 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
18327 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
18328 };
18329 EXPORT_SYMBOL(__per_cpu_offset);
18330 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
18331 {
18332 #ifdef CONFIG_X86_32
18333 struct desc_struct gdt;
18334 + unsigned long base = per_cpu_offset(cpu);
18335
18336 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
18337 - 0x2 | DESCTYPE_S, 0x8);
18338 - gdt.s = 1;
18339 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
18340 + 0x83 | DESCTYPE_S, 0xC);
18341 write_gdt_entry(get_cpu_gdt_table(cpu),
18342 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
18343 #endif
18344 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
18345 /* alrighty, percpu areas up and running */
18346 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
18347 for_each_possible_cpu(cpu) {
18348 +#ifdef CONFIG_CC_STACKPROTECTOR
18349 +#ifdef CONFIG_X86_32
18350 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
18351 +#endif
18352 +#endif
18353 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
18354 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
18355 per_cpu(cpu_number, cpu) = cpu;
18356 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
18357 */
18358 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
18359 #endif
18360 +#ifdef CONFIG_CC_STACKPROTECTOR
18361 +#ifdef CONFIG_X86_32
18362 + if (!cpu)
18363 + per_cpu(stack_canary.canary, cpu) = canary;
18364 +#endif
18365 +#endif
18366 /*
18367 * Up to this point, the boot CPU has been using .init.data
18368 * area. Reload any changed state for the boot CPU.
18369 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
18370 index 54ddaeb2..22c3bdc 100644
18371 --- a/arch/x86/kernel/signal.c
18372 +++ b/arch/x86/kernel/signal.c
18373 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
18374 * Align the stack pointer according to the i386 ABI,
18375 * i.e. so that on function entry ((sp + 4) & 15) == 0.
18376 */
18377 - sp = ((sp + 4) & -16ul) - 4;
18378 + sp = ((sp - 12) & -16ul) - 4;
18379 #else /* !CONFIG_X86_32 */
18380 sp = round_down(sp, 16) - 8;
18381 #endif
18382 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
18383 * Return an always-bogus address instead so we will die with SIGSEGV.
18384 */
18385 if (onsigstack && !likely(on_sig_stack(sp)))
18386 - return (void __user *)-1L;
18387 + return (__force void __user *)-1L;
18388
18389 /* save i387 state */
18390 if (used_math() && save_i387_xstate(*fpstate) < 0)
18391 - return (void __user *)-1L;
18392 + return (__force void __user *)-1L;
18393
18394 return (void __user *)sp;
18395 }
18396 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
18397 }
18398
18399 if (current->mm->context.vdso)
18400 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
18401 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
18402 else
18403 - restorer = &frame->retcode;
18404 + restorer = (void __user *)&frame->retcode;
18405 if (ka->sa.sa_flags & SA_RESTORER)
18406 restorer = ka->sa.sa_restorer;
18407
18408 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
18409 * reasons and because gdb uses it as a signature to notice
18410 * signal handler stack frames.
18411 */
18412 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
18413 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
18414
18415 if (err)
18416 return -EFAULT;
18417 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
18418 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
18419
18420 /* Set up to return from userspace. */
18421 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
18422 + if (current->mm->context.vdso)
18423 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
18424 + else
18425 + restorer = (void __user *)&frame->retcode;
18426 if (ka->sa.sa_flags & SA_RESTORER)
18427 restorer = ka->sa.sa_restorer;
18428 put_user_ex(restorer, &frame->pretcode);
18429 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
18430 * reasons and because gdb uses it as a signature to notice
18431 * signal handler stack frames.
18432 */
18433 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
18434 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
18435 } put_user_catch(err);
18436
18437 if (err)
18438 @@ -769,7 +772,7 @@ static void do_signal(struct pt_regs *regs)
18439 * X86_32: vm86 regs switched out by assembly code before reaching
18440 * here, so testing against kernel CS suffices.
18441 */
18442 - if (!user_mode(regs))
18443 + if (!user_mode_novm(regs))
18444 return;
18445
18446 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
18447 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
18448 index 9f548cb..caf76f7 100644
18449 --- a/arch/x86/kernel/smpboot.c
18450 +++ b/arch/x86/kernel/smpboot.c
18451 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
18452 set_idle_for_cpu(cpu, c_idle.idle);
18453 do_rest:
18454 per_cpu(current_task, cpu) = c_idle.idle;
18455 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
18456 #ifdef CONFIG_X86_32
18457 /* Stack for startup_32 can be just as for start_secondary onwards */
18458 irq_ctx_init(cpu);
18459 #else
18460 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
18461 initial_gs = per_cpu_offset(cpu);
18462 - per_cpu(kernel_stack, cpu) =
18463 - (unsigned long)task_stack_page(c_idle.idle) -
18464 - KERNEL_STACK_OFFSET + THREAD_SIZE;
18465 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
18466 #endif
18467 +
18468 + pax_open_kernel();
18469 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
18470 + pax_close_kernel();
18471 +
18472 initial_code = (unsigned long)start_secondary;
18473 stack_start = c_idle.idle->thread.sp;
18474
18475 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
18476
18477 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
18478
18479 +#ifdef CONFIG_PAX_PER_CPU_PGD
18480 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
18481 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
18482 + KERNEL_PGD_PTRS);
18483 +#endif
18484 +
18485 err = do_boot_cpu(apicid, cpu);
18486 if (err) {
18487 pr_debug("do_boot_cpu failed %d\n", err);
18488 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
18489 index c346d11..d43b163 100644
18490 --- a/arch/x86/kernel/step.c
18491 +++ b/arch/x86/kernel/step.c
18492 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
18493 struct desc_struct *desc;
18494 unsigned long base;
18495
18496 - seg &= ~7UL;
18497 + seg >>= 3;
18498
18499 mutex_lock(&child->mm->context.lock);
18500 - if (unlikely((seg >> 3) >= child->mm->context.size))
18501 + if (unlikely(seg >= child->mm->context.size))
18502 addr = -1L; /* bogus selector, access would fault */
18503 else {
18504 desc = child->mm->context.ldt + seg;
18505 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
18506 addr += base;
18507 }
18508 mutex_unlock(&child->mm->context.lock);
18509 - }
18510 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
18511 + addr = ktla_ktva(addr);
18512
18513 return addr;
18514 }
18515 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
18516 unsigned char opcode[15];
18517 unsigned long addr = convert_ip_to_linear(child, regs);
18518
18519 + if (addr == -EINVAL)
18520 + return 0;
18521 +
18522 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
18523 for (i = 0; i < copied; i++) {
18524 switch (opcode[i]) {
18525 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
18526 index 0b0cb5f..db6b9ed 100644
18527 --- a/arch/x86/kernel/sys_i386_32.c
18528 +++ b/arch/x86/kernel/sys_i386_32.c
18529 @@ -24,17 +24,224 @@
18530
18531 #include <asm/syscalls.h>
18532
18533 -/*
18534 - * Do a system call from kernel instead of calling sys_execve so we
18535 - * end up with proper pt_regs.
18536 - */
18537 -int kernel_execve(const char *filename,
18538 - const char *const argv[],
18539 - const char *const envp[])
18540 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
18541 {
18542 - long __res;
18543 - asm volatile ("int $0x80"
18544 - : "=a" (__res)
18545 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
18546 - return __res;
18547 + unsigned long pax_task_size = TASK_SIZE;
18548 +
18549 +#ifdef CONFIG_PAX_SEGMEXEC
18550 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
18551 + pax_task_size = SEGMEXEC_TASK_SIZE;
18552 +#endif
18553 +
18554 + if (len > pax_task_size || addr > pax_task_size - len)
18555 + return -EINVAL;
18556 +
18557 + return 0;
18558 +}
18559 +
18560 +unsigned long
18561 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
18562 + unsigned long len, unsigned long pgoff, unsigned long flags)
18563 +{
18564 + struct mm_struct *mm = current->mm;
18565 + struct vm_area_struct *vma;
18566 + unsigned long start_addr, pax_task_size = TASK_SIZE;
18567 +
18568 +#ifdef CONFIG_PAX_SEGMEXEC
18569 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
18570 + pax_task_size = SEGMEXEC_TASK_SIZE;
18571 +#endif
18572 +
18573 + pax_task_size -= PAGE_SIZE;
18574 +
18575 + if (len > pax_task_size)
18576 + return -ENOMEM;
18577 +
18578 + if (flags & MAP_FIXED)
18579 + return addr;
18580 +
18581 +#ifdef CONFIG_PAX_RANDMMAP
18582 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
18583 +#endif
18584 +
18585 + if (addr) {
18586 + addr = PAGE_ALIGN(addr);
18587 + if (pax_task_size - len >= addr) {
18588 + vma = find_vma(mm, addr);
18589 + if (check_heap_stack_gap(vma, addr, len))
18590 + return addr;
18591 + }
18592 + }
18593 + if (len > mm->cached_hole_size) {
18594 + start_addr = addr = mm->free_area_cache;
18595 + } else {
18596 + start_addr = addr = mm->mmap_base;
18597 + mm->cached_hole_size = 0;
18598 + }
18599 +
18600 +#ifdef CONFIG_PAX_PAGEEXEC
18601 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
18602 + start_addr = 0x00110000UL;
18603 +
18604 +#ifdef CONFIG_PAX_RANDMMAP
18605 + if (mm->pax_flags & MF_PAX_RANDMMAP)
18606 + start_addr += mm->delta_mmap & 0x03FFF000UL;
18607 +#endif
18608 +
18609 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
18610 + start_addr = addr = mm->mmap_base;
18611 + else
18612 + addr = start_addr;
18613 + }
18614 +#endif
18615 +
18616 +full_search:
18617 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
18618 + /* At this point: (!vma || addr < vma->vm_end). */
18619 + if (pax_task_size - len < addr) {
18620 + /*
18621 + * Start a new search - just in case we missed
18622 + * some holes.
18623 + */
18624 + if (start_addr != mm->mmap_base) {
18625 + start_addr = addr = mm->mmap_base;
18626 + mm->cached_hole_size = 0;
18627 + goto full_search;
18628 + }
18629 + return -ENOMEM;
18630 + }
18631 + if (check_heap_stack_gap(vma, addr, len))
18632 + break;
18633 + if (addr + mm->cached_hole_size < vma->vm_start)
18634 + mm->cached_hole_size = vma->vm_start - addr;
18635 + addr = vma->vm_end;
18636 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
18637 + start_addr = addr = mm->mmap_base;
18638 + mm->cached_hole_size = 0;
18639 + goto full_search;
18640 + }
18641 + }
18642 +
18643 + /*
18644 + * Remember the place where we stopped the search:
18645 + */
18646 + mm->free_area_cache = addr + len;
18647 + return addr;
18648 +}
18649 +
18650 +unsigned long
18651 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18652 + const unsigned long len, const unsigned long pgoff,
18653 + const unsigned long flags)
18654 +{
18655 + struct vm_area_struct *vma;
18656 + struct mm_struct *mm = current->mm;
18657 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
18658 +
18659 +#ifdef CONFIG_PAX_SEGMEXEC
18660 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
18661 + pax_task_size = SEGMEXEC_TASK_SIZE;
18662 +#endif
18663 +
18664 + pax_task_size -= PAGE_SIZE;
18665 +
18666 + /* requested length too big for entire address space */
18667 + if (len > pax_task_size)
18668 + return -ENOMEM;
18669 +
18670 + if (flags & MAP_FIXED)
18671 + return addr;
18672 +
18673 +#ifdef CONFIG_PAX_PAGEEXEC
18674 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
18675 + goto bottomup;
18676 +#endif
18677 +
18678 +#ifdef CONFIG_PAX_RANDMMAP
18679 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
18680 +#endif
18681 +
18682 + /* requesting a specific address */
18683 + if (addr) {
18684 + addr = PAGE_ALIGN(addr);
18685 + if (pax_task_size - len >= addr) {
18686 + vma = find_vma(mm, addr);
18687 + if (check_heap_stack_gap(vma, addr, len))
18688 + return addr;
18689 + }
18690 + }
18691 +
18692 + /* check if free_area_cache is useful for us */
18693 + if (len <= mm->cached_hole_size) {
18694 + mm->cached_hole_size = 0;
18695 + mm->free_area_cache = mm->mmap_base;
18696 + }
18697 +
18698 + /* either no address requested or can't fit in requested address hole */
18699 + addr = mm->free_area_cache;
18700 +
18701 + /* make sure it can fit in the remaining address space */
18702 + if (addr > len) {
18703 + vma = find_vma(mm, addr-len);
18704 + if (check_heap_stack_gap(vma, addr - len, len))
18705 + /* remember the address as a hint for next time */
18706 + return (mm->free_area_cache = addr-len);
18707 + }
18708 +
18709 + if (mm->mmap_base < len)
18710 + goto bottomup;
18711 +
18712 + addr = mm->mmap_base-len;
18713 +
18714 + do {
18715 + /*
18716 + * Lookup failure means no vma is above this address,
18717 + * else if new region fits below vma->vm_start,
18718 + * return with success:
18719 + */
18720 + vma = find_vma(mm, addr);
18721 + if (check_heap_stack_gap(vma, addr, len))
18722 + /* remember the address as a hint for next time */
18723 + return (mm->free_area_cache = addr);
18724 +
18725 + /* remember the largest hole we saw so far */
18726 + if (addr + mm->cached_hole_size < vma->vm_start)
18727 + mm->cached_hole_size = vma->vm_start - addr;
18728 +
18729 + /* try just below the current vma->vm_start */
18730 + addr = skip_heap_stack_gap(vma, len);
18731 + } while (!IS_ERR_VALUE(addr));
18732 +
18733 +bottomup:
18734 + /*
18735 + * A failed mmap() very likely causes application failure,
18736 + * so fall back to the bottom-up function here. This scenario
18737 + * can happen with large stack limits and large mmap()
18738 + * allocations.
18739 + */
18740 +
18741 +#ifdef CONFIG_PAX_SEGMEXEC
18742 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
18743 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
18744 + else
18745 +#endif
18746 +
18747 + mm->mmap_base = TASK_UNMAPPED_BASE;
18748 +
18749 +#ifdef CONFIG_PAX_RANDMMAP
18750 + if (mm->pax_flags & MF_PAX_RANDMMAP)
18751 + mm->mmap_base += mm->delta_mmap;
18752 +#endif
18753 +
18754 + mm->free_area_cache = mm->mmap_base;
18755 + mm->cached_hole_size = ~0UL;
18756 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
18757 + /*
18758 + * Restore the topdown base:
18759 + */
18760 + mm->mmap_base = base;
18761 + mm->free_area_cache = base;
18762 + mm->cached_hole_size = ~0UL;
18763 +
18764 + return addr;
18765 }
18766 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
18767 index 0514890..3dbebce 100644
18768 --- a/arch/x86/kernel/sys_x86_64.c
18769 +++ b/arch/x86/kernel/sys_x86_64.c
18770 @@ -95,8 +95,8 @@ out:
18771 return error;
18772 }
18773
18774 -static void find_start_end(unsigned long flags, unsigned long *begin,
18775 - unsigned long *end)
18776 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
18777 + unsigned long *begin, unsigned long *end)
18778 {
18779 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
18780 unsigned long new_begin;
18781 @@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
18782 *begin = new_begin;
18783 }
18784 } else {
18785 - *begin = TASK_UNMAPPED_BASE;
18786 + *begin = mm->mmap_base;
18787 *end = TASK_SIZE;
18788 }
18789 }
18790 @@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
18791 if (flags & MAP_FIXED)
18792 return addr;
18793
18794 - find_start_end(flags, &begin, &end);
18795 + find_start_end(mm, flags, &begin, &end);
18796
18797 if (len > end)
18798 return -ENOMEM;
18799
18800 +#ifdef CONFIG_PAX_RANDMMAP
18801 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
18802 +#endif
18803 +
18804 if (addr) {
18805 addr = PAGE_ALIGN(addr);
18806 vma = find_vma(mm, addr);
18807 - if (end - len >= addr &&
18808 - (!vma || addr + len <= vma->vm_start))
18809 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
18810 return addr;
18811 }
18812 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
18813 @@ -172,7 +175,7 @@ full_search:
18814 }
18815 return -ENOMEM;
18816 }
18817 - if (!vma || addr + len <= vma->vm_start) {
18818 + if (check_heap_stack_gap(vma, addr, len)) {
18819 /*
18820 * Remember the place where we stopped the search:
18821 */
18822 @@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18823 {
18824 struct vm_area_struct *vma;
18825 struct mm_struct *mm = current->mm;
18826 - unsigned long addr = addr0;
18827 + unsigned long base = mm->mmap_base, addr = addr0;
18828
18829 /* requested length too big for entire address space */
18830 if (len > TASK_SIZE)
18831 @@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18832 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
18833 goto bottomup;
18834
18835 +#ifdef CONFIG_PAX_RANDMMAP
18836 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
18837 +#endif
18838 +
18839 /* requesting a specific address */
18840 if (addr) {
18841 addr = PAGE_ALIGN(addr);
18842 - vma = find_vma(mm, addr);
18843 - if (TASK_SIZE - len >= addr &&
18844 - (!vma || addr + len <= vma->vm_start))
18845 - return addr;
18846 + if (TASK_SIZE - len >= addr) {
18847 + vma = find_vma(mm, addr);
18848 + if (check_heap_stack_gap(vma, addr, len))
18849 + return addr;
18850 + }
18851 }
18852
18853 /* check if free_area_cache is useful for us */
18854 @@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18855 ALIGN_TOPDOWN);
18856
18857 vma = find_vma(mm, tmp_addr);
18858 - if (!vma || tmp_addr + len <= vma->vm_start)
18859 + if (check_heap_stack_gap(vma, tmp_addr, len))
18860 /* remember the address as a hint for next time */
18861 return mm->free_area_cache = tmp_addr;
18862 }
18863 @@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18864 * return with success:
18865 */
18866 vma = find_vma(mm, addr);
18867 - if (!vma || addr+len <= vma->vm_start)
18868 + if (check_heap_stack_gap(vma, addr, len))
18869 /* remember the address as a hint for next time */
18870 return mm->free_area_cache = addr;
18871
18872 @@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18873 mm->cached_hole_size = vma->vm_start - addr;
18874
18875 /* try just below the current vma->vm_start */
18876 - addr = vma->vm_start-len;
18877 - } while (len < vma->vm_start);
18878 + addr = skip_heap_stack_gap(vma, len);
18879 + } while (!IS_ERR_VALUE(addr));
18880
18881 bottomup:
18882 /*
18883 @@ -270,13 +278,21 @@ bottomup:
18884 * can happen with large stack limits and large mmap()
18885 * allocations.
18886 */
18887 + mm->mmap_base = TASK_UNMAPPED_BASE;
18888 +
18889 +#ifdef CONFIG_PAX_RANDMMAP
18890 + if (mm->pax_flags & MF_PAX_RANDMMAP)
18891 + mm->mmap_base += mm->delta_mmap;
18892 +#endif
18893 +
18894 + mm->free_area_cache = mm->mmap_base;
18895 mm->cached_hole_size = ~0UL;
18896 - mm->free_area_cache = TASK_UNMAPPED_BASE;
18897 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
18898 /*
18899 * Restore the topdown base:
18900 */
18901 - mm->free_area_cache = mm->mmap_base;
18902 + mm->mmap_base = base;
18903 + mm->free_area_cache = base;
18904 mm->cached_hole_size = ~0UL;
18905
18906 return addr;
18907 diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
18908 index 9a0e312..e6f66f2 100644
18909 --- a/arch/x86/kernel/syscall_table_32.S
18910 +++ b/arch/x86/kernel/syscall_table_32.S
18911 @@ -1,3 +1,4 @@
18912 +.section .rodata,"a",@progbits
18913 ENTRY(sys_call_table)
18914 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
18915 .long sys_exit
18916 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
18917 index e2410e2..4fe3fbc 100644
18918 --- a/arch/x86/kernel/tboot.c
18919 +++ b/arch/x86/kernel/tboot.c
18920 @@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
18921
18922 void tboot_shutdown(u32 shutdown_type)
18923 {
18924 - void (*shutdown)(void);
18925 + void (* __noreturn shutdown)(void);
18926
18927 if (!tboot_enabled())
18928 return;
18929 @@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
18930
18931 switch_to_tboot_pt();
18932
18933 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
18934 + shutdown = (void *)tboot->shutdown_entry;
18935 shutdown();
18936
18937 /* should not reach here */
18938 @@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
18939 tboot_shutdown(acpi_shutdown_map[sleep_state]);
18940 }
18941
18942 -static atomic_t ap_wfs_count;
18943 +static atomic_unchecked_t ap_wfs_count;
18944
18945 static int tboot_wait_for_aps(int num_aps)
18946 {
18947 @@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
18948 {
18949 switch (action) {
18950 case CPU_DYING:
18951 - atomic_inc(&ap_wfs_count);
18952 + atomic_inc_unchecked(&ap_wfs_count);
18953 if (num_online_cpus() == 1)
18954 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
18955 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
18956 return NOTIFY_BAD;
18957 break;
18958 }
18959 @@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
18960
18961 tboot_create_trampoline();
18962
18963 - atomic_set(&ap_wfs_count, 0);
18964 + atomic_set_unchecked(&ap_wfs_count, 0);
18965 register_hotcpu_notifier(&tboot_cpu_notifier);
18966 return 0;
18967 }
18968 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
18969 index dd5fbf4..b7f2232 100644
18970 --- a/arch/x86/kernel/time.c
18971 +++ b/arch/x86/kernel/time.c
18972 @@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
18973 {
18974 unsigned long pc = instruction_pointer(regs);
18975
18976 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
18977 + if (!user_mode(regs) && in_lock_functions(pc)) {
18978 #ifdef CONFIG_FRAME_POINTER
18979 - return *(unsigned long *)(regs->bp + sizeof(long));
18980 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
18981 #else
18982 unsigned long *sp =
18983 (unsigned long *)kernel_stack_pointer(regs);
18984 @@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
18985 * or above a saved flags. Eflags has bits 22-31 zero,
18986 * kernel addresses don't.
18987 */
18988 +
18989 +#ifdef CONFIG_PAX_KERNEXEC
18990 + return ktla_ktva(sp[0]);
18991 +#else
18992 if (sp[0] >> 22)
18993 return sp[0];
18994 if (sp[1] >> 22)
18995 return sp[1];
18996 #endif
18997 +
18998 +#endif
18999 }
19000 return pc;
19001 }
19002 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19003 index 6bb7b85..dd853e1 100644
19004 --- a/arch/x86/kernel/tls.c
19005 +++ b/arch/x86/kernel/tls.c
19006 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19007 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19008 return -EINVAL;
19009
19010 +#ifdef CONFIG_PAX_SEGMEXEC
19011 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19012 + return -EINVAL;
19013 +#endif
19014 +
19015 set_tls_desc(p, idx, &info, 1);
19016
19017 return 0;
19018 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19019 index 451c0a7..e57f551 100644
19020 --- a/arch/x86/kernel/trampoline_32.S
19021 +++ b/arch/x86/kernel/trampoline_32.S
19022 @@ -32,6 +32,12 @@
19023 #include <asm/segment.h>
19024 #include <asm/page_types.h>
19025
19026 +#ifdef CONFIG_PAX_KERNEXEC
19027 +#define ta(X) (X)
19028 +#else
19029 +#define ta(X) ((X) - __PAGE_OFFSET)
19030 +#endif
19031 +
19032 #ifdef CONFIG_SMP
19033
19034 .section ".x86_trampoline","a"
19035 @@ -62,7 +68,7 @@ r_base = .
19036 inc %ax # protected mode (PE) bit
19037 lmsw %ax # into protected mode
19038 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19039 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19040 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
19041
19042 # These need to be in the same 64K segment as the above;
19043 # hence we don't use the boot_gdt_descr defined in head.S
19044 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19045 index 09ff517..df19fbff 100644
19046 --- a/arch/x86/kernel/trampoline_64.S
19047 +++ b/arch/x86/kernel/trampoline_64.S
19048 @@ -90,7 +90,7 @@ startup_32:
19049 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19050 movl %eax, %ds
19051
19052 - movl $X86_CR4_PAE, %eax
19053 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19054 movl %eax, %cr4 # Enable PAE mode
19055
19056 # Setup trampoline 4 level pagetables
19057 @@ -138,7 +138,7 @@ tidt:
19058 # so the kernel can live anywhere
19059 .balign 4
19060 tgdt:
19061 - .short tgdt_end - tgdt # gdt limit
19062 + .short tgdt_end - tgdt - 1 # gdt limit
19063 .long tgdt - r_base
19064 .short 0
19065 .quad 0x00cf9b000000ffff # __KERNEL32_CS
19066 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19067 index 31d9d0f..e244dd9 100644
19068 --- a/arch/x86/kernel/traps.c
19069 +++ b/arch/x86/kernel/traps.c
19070 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
19071
19072 /* Do we ignore FPU interrupts ? */
19073 char ignore_fpu_irq;
19074 -
19075 -/*
19076 - * The IDT has to be page-aligned to simplify the Pentium
19077 - * F0 0F bug workaround.
19078 - */
19079 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
19080 #endif
19081
19082 DECLARE_BITMAP(used_vectors, NR_VECTORS);
19083 @@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
19084 }
19085
19086 static void __kprobes
19087 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19088 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19089 long error_code, siginfo_t *info)
19090 {
19091 struct task_struct *tsk = current;
19092
19093 #ifdef CONFIG_X86_32
19094 - if (regs->flags & X86_VM_MASK) {
19095 + if (v8086_mode(regs)) {
19096 /*
19097 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
19098 * On nmi (interrupt 2), do_trap should not be called.
19099 @@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19100 }
19101 #endif
19102
19103 - if (!user_mode(regs))
19104 + if (!user_mode_novm(regs))
19105 goto kernel_trap;
19106
19107 #ifdef CONFIG_X86_32
19108 @@ -148,7 +142,7 @@ trap_signal:
19109 printk_ratelimit()) {
19110 printk(KERN_INFO
19111 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
19112 - tsk->comm, tsk->pid, str,
19113 + tsk->comm, task_pid_nr(tsk), str,
19114 regs->ip, regs->sp, error_code);
19115 print_vma_addr(" in ", regs->ip);
19116 printk("\n");
19117 @@ -165,8 +159,20 @@ kernel_trap:
19118 if (!fixup_exception(regs)) {
19119 tsk->thread.error_code = error_code;
19120 tsk->thread.trap_no = trapnr;
19121 +
19122 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19123 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
19124 + str = "PAX: suspicious stack segment fault";
19125 +#endif
19126 +
19127 die(str, regs, error_code);
19128 }
19129 +
19130 +#ifdef CONFIG_PAX_REFCOUNT
19131 + if (trapnr == 4)
19132 + pax_report_refcount_overflow(regs);
19133 +#endif
19134 +
19135 return;
19136
19137 #ifdef CONFIG_X86_32
19138 @@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
19139 conditional_sti(regs);
19140
19141 #ifdef CONFIG_X86_32
19142 - if (regs->flags & X86_VM_MASK)
19143 + if (v8086_mode(regs))
19144 goto gp_in_vm86;
19145 #endif
19146
19147 tsk = current;
19148 - if (!user_mode(regs))
19149 + if (!user_mode_novm(regs))
19150 goto gp_in_kernel;
19151
19152 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19153 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
19154 + struct mm_struct *mm = tsk->mm;
19155 + unsigned long limit;
19156 +
19157 + down_write(&mm->mmap_sem);
19158 + limit = mm->context.user_cs_limit;
19159 + if (limit < TASK_SIZE) {
19160 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
19161 + up_write(&mm->mmap_sem);
19162 + return;
19163 + }
19164 + up_write(&mm->mmap_sem);
19165 + }
19166 +#endif
19167 +
19168 tsk->thread.error_code = error_code;
19169 tsk->thread.trap_no = 13;
19170
19171 @@ -295,6 +317,13 @@ gp_in_kernel:
19172 if (notify_die(DIE_GPF, "general protection fault", regs,
19173 error_code, 13, SIGSEGV) == NOTIFY_STOP)
19174 return;
19175 +
19176 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19177 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
19178 + die("PAX: suspicious general protection fault", regs, error_code);
19179 + else
19180 +#endif
19181 +
19182 die("general protection fault", regs, error_code);
19183 }
19184
19185 @@ -414,7 +443,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19186 /* It's safe to allow irq's after DR6 has been saved */
19187 preempt_conditional_sti(regs);
19188
19189 - if (regs->flags & X86_VM_MASK) {
19190 + if (v8086_mode(regs)) {
19191 handle_vm86_trap((struct kernel_vm86_regs *) regs,
19192 error_code, 1);
19193 preempt_conditional_cli(regs);
19194 @@ -428,7 +457,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19195 * We already checked v86 mode above, so we can check for kernel mode
19196 * by just checking the CPL of CS.
19197 */
19198 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
19199 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
19200 tsk->thread.debugreg6 &= ~DR_STEP;
19201 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
19202 regs->flags &= ~X86_EFLAGS_TF;
19203 @@ -457,7 +486,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
19204 return;
19205 conditional_sti(regs);
19206
19207 - if (!user_mode_vm(regs))
19208 + if (!user_mode(regs))
19209 {
19210 if (!fixup_exception(regs)) {
19211 task->thread.error_code = error_code;
19212 @@ -569,8 +598,8 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
19213 void __math_state_restore(struct task_struct *tsk)
19214 {
19215 /* We need a safe address that is cheap to find and that is already
19216 - in L1. We've just brought in "tsk->thread.has_fpu", so use that */
19217 -#define safe_address (tsk->thread.has_fpu)
19218 + in L1. */
19219 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
19220
19221 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
19222 is pending. Clear the x87 state here by setting it to fixed
19223 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
19224 index b9242ba..50c5edd 100644
19225 --- a/arch/x86/kernel/verify_cpu.S
19226 +++ b/arch/x86/kernel/verify_cpu.S
19227 @@ -20,6 +20,7 @@
19228 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
19229 * arch/x86/kernel/trampoline_64.S: secondary processor verification
19230 * arch/x86/kernel/head_32.S: processor startup
19231 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
19232 *
19233 * verify_cpu, returns the status of longmode and SSE in register %eax.
19234 * 0: Success 1: Failure
19235 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
19236 index 863f875..4307295 100644
19237 --- a/arch/x86/kernel/vm86_32.c
19238 +++ b/arch/x86/kernel/vm86_32.c
19239 @@ -41,6 +41,7 @@
19240 #include <linux/ptrace.h>
19241 #include <linux/audit.h>
19242 #include <linux/stddef.h>
19243 +#include <linux/grsecurity.h>
19244
19245 #include <asm/uaccess.h>
19246 #include <asm/io.h>
19247 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
19248 do_exit(SIGSEGV);
19249 }
19250
19251 - tss = &per_cpu(init_tss, get_cpu());
19252 + tss = init_tss + get_cpu();
19253 current->thread.sp0 = current->thread.saved_sp0;
19254 current->thread.sysenter_cs = __KERNEL_CS;
19255 load_sp0(tss, &current->thread);
19256 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
19257 struct task_struct *tsk;
19258 int tmp, ret = -EPERM;
19259
19260 +#ifdef CONFIG_GRKERNSEC_VM86
19261 + if (!capable(CAP_SYS_RAWIO)) {
19262 + gr_handle_vm86();
19263 + goto out;
19264 + }
19265 +#endif
19266 +
19267 tsk = current;
19268 if (tsk->thread.saved_sp0)
19269 goto out;
19270 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
19271 int tmp, ret;
19272 struct vm86plus_struct __user *v86;
19273
19274 +#ifdef CONFIG_GRKERNSEC_VM86
19275 + if (!capable(CAP_SYS_RAWIO)) {
19276 + gr_handle_vm86();
19277 + ret = -EPERM;
19278 + goto out;
19279 + }
19280 +#endif
19281 +
19282 tsk = current;
19283 switch (cmd) {
19284 case VM86_REQUEST_IRQ:
19285 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
19286 tsk->thread.saved_fs = info->regs32->fs;
19287 tsk->thread.saved_gs = get_user_gs(info->regs32);
19288
19289 - tss = &per_cpu(init_tss, get_cpu());
19290 + tss = init_tss + get_cpu();
19291 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
19292 if (cpu_has_sep)
19293 tsk->thread.sysenter_cs = 0;
19294 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
19295 goto cannot_handle;
19296 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
19297 goto cannot_handle;
19298 - intr_ptr = (unsigned long __user *) (i << 2);
19299 + intr_ptr = (__force unsigned long __user *) (i << 2);
19300 if (get_user(segoffs, intr_ptr))
19301 goto cannot_handle;
19302 if ((segoffs >> 16) == BIOSSEG)
19303 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
19304 index 0f703f1..9e15f64 100644
19305 --- a/arch/x86/kernel/vmlinux.lds.S
19306 +++ b/arch/x86/kernel/vmlinux.lds.S
19307 @@ -26,6 +26,13 @@
19308 #include <asm/page_types.h>
19309 #include <asm/cache.h>
19310 #include <asm/boot.h>
19311 +#include <asm/segment.h>
19312 +
19313 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19314 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
19315 +#else
19316 +#define __KERNEL_TEXT_OFFSET 0
19317 +#endif
19318
19319 #undef i386 /* in case the preprocessor is a 32bit one */
19320
19321 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
19322
19323 PHDRS {
19324 text PT_LOAD FLAGS(5); /* R_E */
19325 +#ifdef CONFIG_X86_32
19326 + module PT_LOAD FLAGS(5); /* R_E */
19327 +#endif
19328 +#ifdef CONFIG_XEN
19329 + rodata PT_LOAD FLAGS(5); /* R_E */
19330 +#else
19331 + rodata PT_LOAD FLAGS(4); /* R__ */
19332 +#endif
19333 data PT_LOAD FLAGS(6); /* RW_ */
19334 -#ifdef CONFIG_X86_64
19335 + init.begin PT_LOAD FLAGS(6); /* RW_ */
19336 #ifdef CONFIG_SMP
19337 percpu PT_LOAD FLAGS(6); /* RW_ */
19338 #endif
19339 + text.init PT_LOAD FLAGS(5); /* R_E */
19340 + text.exit PT_LOAD FLAGS(5); /* R_E */
19341 init PT_LOAD FLAGS(7); /* RWE */
19342 -#endif
19343 note PT_NOTE FLAGS(0); /* ___ */
19344 }
19345
19346 SECTIONS
19347 {
19348 #ifdef CONFIG_X86_32
19349 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
19350 - phys_startup_32 = startup_32 - LOAD_OFFSET;
19351 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
19352 #else
19353 - . = __START_KERNEL;
19354 - phys_startup_64 = startup_64 - LOAD_OFFSET;
19355 + . = __START_KERNEL;
19356 #endif
19357
19358 /* Text and read-only data */
19359 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
19360 - _text = .;
19361 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
19362 /* bootstrapping code */
19363 +#ifdef CONFIG_X86_32
19364 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
19365 +#else
19366 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
19367 +#endif
19368 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
19369 + _text = .;
19370 HEAD_TEXT
19371 #ifdef CONFIG_X86_32
19372 . = ALIGN(PAGE_SIZE);
19373 @@ -108,13 +128,47 @@ SECTIONS
19374 IRQENTRY_TEXT
19375 *(.fixup)
19376 *(.gnu.warning)
19377 - /* End of text section */
19378 - _etext = .;
19379 } :text = 0x9090
19380
19381 - NOTES :text :note
19382 + . += __KERNEL_TEXT_OFFSET;
19383
19384 - EXCEPTION_TABLE(16) :text = 0x9090
19385 +#ifdef CONFIG_X86_32
19386 + . = ALIGN(PAGE_SIZE);
19387 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
19388 +
19389 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
19390 + MODULES_EXEC_VADDR = .;
19391 + BYTE(0)
19392 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
19393 + . = ALIGN(HPAGE_SIZE);
19394 + MODULES_EXEC_END = . - 1;
19395 +#endif
19396 +
19397 + } :module
19398 +#endif
19399 +
19400 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
19401 + /* End of text section */
19402 + _etext = . - __KERNEL_TEXT_OFFSET;
19403 + }
19404 +
19405 +#ifdef CONFIG_X86_32
19406 + . = ALIGN(PAGE_SIZE);
19407 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
19408 + *(.idt)
19409 + . = ALIGN(PAGE_SIZE);
19410 + *(.empty_zero_page)
19411 + *(.initial_pg_fixmap)
19412 + *(.initial_pg_pmd)
19413 + *(.initial_page_table)
19414 + *(.swapper_pg_dir)
19415 + } :rodata
19416 +#endif
19417 +
19418 + . = ALIGN(PAGE_SIZE);
19419 + NOTES :rodata :note
19420 +
19421 + EXCEPTION_TABLE(16) :rodata
19422
19423 #if defined(CONFIG_DEBUG_RODATA)
19424 /* .text should occupy whole number of pages */
19425 @@ -126,16 +180,20 @@ SECTIONS
19426
19427 /* Data */
19428 .data : AT(ADDR(.data) - LOAD_OFFSET) {
19429 +
19430 +#ifdef CONFIG_PAX_KERNEXEC
19431 + . = ALIGN(HPAGE_SIZE);
19432 +#else
19433 + . = ALIGN(PAGE_SIZE);
19434 +#endif
19435 +
19436 /* Start of data section */
19437 _sdata = .;
19438
19439 /* init_task */
19440 INIT_TASK_DATA(THREAD_SIZE)
19441
19442 -#ifdef CONFIG_X86_32
19443 - /* 32 bit has nosave before _edata */
19444 NOSAVE_DATA
19445 -#endif
19446
19447 PAGE_ALIGNED_DATA(PAGE_SIZE)
19448
19449 @@ -176,12 +234,19 @@ SECTIONS
19450 #endif /* CONFIG_X86_64 */
19451
19452 /* Init code and data - will be freed after init */
19453 - . = ALIGN(PAGE_SIZE);
19454 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
19455 + BYTE(0)
19456 +
19457 +#ifdef CONFIG_PAX_KERNEXEC
19458 + . = ALIGN(HPAGE_SIZE);
19459 +#else
19460 + . = ALIGN(PAGE_SIZE);
19461 +#endif
19462 +
19463 __init_begin = .; /* paired with __init_end */
19464 - }
19465 + } :init.begin
19466
19467 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
19468 +#ifdef CONFIG_SMP
19469 /*
19470 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
19471 * output PHDR, so the next output section - .init.text - should
19472 @@ -190,12 +255,27 @@ SECTIONS
19473 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
19474 #endif
19475
19476 - INIT_TEXT_SECTION(PAGE_SIZE)
19477 -#ifdef CONFIG_X86_64
19478 - :init
19479 -#endif
19480 + . = ALIGN(PAGE_SIZE);
19481 + init_begin = .;
19482 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
19483 + VMLINUX_SYMBOL(_sinittext) = .;
19484 + INIT_TEXT
19485 + VMLINUX_SYMBOL(_einittext) = .;
19486 + . = ALIGN(PAGE_SIZE);
19487 + } :text.init
19488
19489 - INIT_DATA_SECTION(16)
19490 + /*
19491 + * .exit.text is discard at runtime, not link time, to deal with
19492 + * references from .altinstructions and .eh_frame
19493 + */
19494 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
19495 + EXIT_TEXT
19496 + . = ALIGN(16);
19497 + } :text.exit
19498 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
19499 +
19500 + . = ALIGN(PAGE_SIZE);
19501 + INIT_DATA_SECTION(16) :init
19502
19503 /*
19504 * Code and data for a variety of lowlevel trampolines, to be
19505 @@ -269,19 +349,12 @@ SECTIONS
19506 }
19507
19508 . = ALIGN(8);
19509 - /*
19510 - * .exit.text is discard at runtime, not link time, to deal with
19511 - * references from .altinstructions and .eh_frame
19512 - */
19513 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
19514 - EXIT_TEXT
19515 - }
19516
19517 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
19518 EXIT_DATA
19519 }
19520
19521 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
19522 +#ifndef CONFIG_SMP
19523 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
19524 #endif
19525
19526 @@ -300,16 +373,10 @@ SECTIONS
19527 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
19528 __smp_locks = .;
19529 *(.smp_locks)
19530 - . = ALIGN(PAGE_SIZE);
19531 __smp_locks_end = .;
19532 + . = ALIGN(PAGE_SIZE);
19533 }
19534
19535 -#ifdef CONFIG_X86_64
19536 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
19537 - NOSAVE_DATA
19538 - }
19539 -#endif
19540 -
19541 /* BSS */
19542 . = ALIGN(PAGE_SIZE);
19543 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
19544 @@ -325,6 +392,7 @@ SECTIONS
19545 __brk_base = .;
19546 . += 64 * 1024; /* 64k alignment slop space */
19547 *(.brk_reservation) /* areas brk users have reserved */
19548 + . = ALIGN(HPAGE_SIZE);
19549 __brk_limit = .;
19550 }
19551
19552 @@ -351,13 +419,12 @@ SECTIONS
19553 * for the boot processor.
19554 */
19555 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
19556 -INIT_PER_CPU(gdt_page);
19557 INIT_PER_CPU(irq_stack_union);
19558
19559 /*
19560 * Build-time check on the image size:
19561 */
19562 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
19563 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
19564 "kernel image bigger than KERNEL_IMAGE_SIZE");
19565
19566 #ifdef CONFIG_SMP
19567 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
19568 index e4d4a22..47ee71f 100644
19569 --- a/arch/x86/kernel/vsyscall_64.c
19570 +++ b/arch/x86/kernel/vsyscall_64.c
19571 @@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
19572 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
19573 };
19574
19575 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
19576 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
19577
19578 static int __init vsyscall_setup(char *str)
19579 {
19580 if (str) {
19581 if (!strcmp("emulate", str))
19582 vsyscall_mode = EMULATE;
19583 - else if (!strcmp("native", str))
19584 - vsyscall_mode = NATIVE;
19585 else if (!strcmp("none", str))
19586 vsyscall_mode = NONE;
19587 else
19588 @@ -178,7 +176,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
19589
19590 tsk = current;
19591 if (seccomp_mode(&tsk->seccomp))
19592 - do_exit(SIGKILL);
19593 + do_group_exit(SIGKILL);
19594
19595 switch (vsyscall_nr) {
19596 case 0:
19597 @@ -220,8 +218,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
19598 return true;
19599
19600 sigsegv:
19601 - force_sig(SIGSEGV, current);
19602 - return true;
19603 + do_group_exit(SIGKILL);
19604 }
19605
19606 /*
19607 @@ -274,10 +271,7 @@ void __init map_vsyscall(void)
19608 extern char __vvar_page;
19609 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
19610
19611 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
19612 - vsyscall_mode == NATIVE
19613 - ? PAGE_KERNEL_VSYSCALL
19614 - : PAGE_KERNEL_VVAR);
19615 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
19616 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
19617 (unsigned long)VSYSCALL_START);
19618
19619 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
19620 index 9796c2f..f686fbf 100644
19621 --- a/arch/x86/kernel/x8664_ksyms_64.c
19622 +++ b/arch/x86/kernel/x8664_ksyms_64.c
19623 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
19624 EXPORT_SYMBOL(copy_user_generic_string);
19625 EXPORT_SYMBOL(copy_user_generic_unrolled);
19626 EXPORT_SYMBOL(__copy_user_nocache);
19627 -EXPORT_SYMBOL(_copy_from_user);
19628 -EXPORT_SYMBOL(_copy_to_user);
19629
19630 EXPORT_SYMBOL(copy_page);
19631 EXPORT_SYMBOL(clear_page);
19632 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
19633 index 7110911..e8cdee5 100644
19634 --- a/arch/x86/kernel/xsave.c
19635 +++ b/arch/x86/kernel/xsave.c
19636 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
19637 fx_sw_user->xstate_size > fx_sw_user->extended_size)
19638 return -EINVAL;
19639
19640 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
19641 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
19642 fx_sw_user->extended_size -
19643 FP_XSTATE_MAGIC2_SIZE));
19644 if (err)
19645 @@ -266,7 +266,7 @@ fx_only:
19646 * the other extended state.
19647 */
19648 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
19649 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
19650 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
19651 }
19652
19653 /*
19654 @@ -295,7 +295,7 @@ int restore_i387_xstate(void __user *buf)
19655 if (use_xsave())
19656 err = restore_user_xstate(buf);
19657 else
19658 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
19659 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
19660 buf);
19661 if (unlikely(err)) {
19662 /*
19663 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
19664 index f1e3be1..588efc8 100644
19665 --- a/arch/x86/kvm/emulate.c
19666 +++ b/arch/x86/kvm/emulate.c
19667 @@ -249,6 +249,7 @@ struct gprefix {
19668
19669 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
19670 do { \
19671 + unsigned long _tmp; \
19672 __asm__ __volatile__ ( \
19673 _PRE_EFLAGS("0", "4", "2") \
19674 _op _suffix " %"_x"3,%1; " \
19675 @@ -263,8 +264,6 @@ struct gprefix {
19676 /* Raw emulation: instruction has two explicit operands. */
19677 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
19678 do { \
19679 - unsigned long _tmp; \
19680 - \
19681 switch ((ctxt)->dst.bytes) { \
19682 case 2: \
19683 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
19684 @@ -280,7 +279,6 @@ struct gprefix {
19685
19686 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
19687 do { \
19688 - unsigned long _tmp; \
19689 switch ((ctxt)->dst.bytes) { \
19690 case 1: \
19691 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
19692 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
19693 index 54abb40..a192606 100644
19694 --- a/arch/x86/kvm/lapic.c
19695 +++ b/arch/x86/kvm/lapic.c
19696 @@ -53,7 +53,7 @@
19697 #define APIC_BUS_CYCLE_NS 1
19698
19699 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
19700 -#define apic_debug(fmt, arg...)
19701 +#define apic_debug(fmt, arg...) do {} while (0)
19702
19703 #define APIC_LVT_NUM 6
19704 /* 14 is the version for Xeon and Pentium 8.4.8*/
19705 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
19706 index f1b36cf..af8a124 100644
19707 --- a/arch/x86/kvm/mmu.c
19708 +++ b/arch/x86/kvm/mmu.c
19709 @@ -3555,7 +3555,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
19710
19711 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
19712
19713 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
19714 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
19715
19716 /*
19717 * Assume that the pte write on a page table of the same type
19718 @@ -3587,7 +3587,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
19719 }
19720
19721 spin_lock(&vcpu->kvm->mmu_lock);
19722 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
19723 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
19724 gentry = 0;
19725 kvm_mmu_free_some_pages(vcpu);
19726 ++vcpu->kvm->stat.mmu_pte_write;
19727 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
19728 index 9299410..ade2f9b 100644
19729 --- a/arch/x86/kvm/paging_tmpl.h
19730 +++ b/arch/x86/kvm/paging_tmpl.h
19731 @@ -197,7 +197,7 @@ retry_walk:
19732 if (unlikely(kvm_is_error_hva(host_addr)))
19733 goto error;
19734
19735 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
19736 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
19737 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
19738 goto error;
19739
19740 @@ -705,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
19741 if (need_flush)
19742 kvm_flush_remote_tlbs(vcpu->kvm);
19743
19744 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
19745 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
19746
19747 spin_unlock(&vcpu->kvm->mmu_lock);
19748
19749 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
19750 index 94a4672..5c6b853 100644
19751 --- a/arch/x86/kvm/svm.c
19752 +++ b/arch/x86/kvm/svm.c
19753 @@ -3405,7 +3405,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
19754 int cpu = raw_smp_processor_id();
19755
19756 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
19757 +
19758 + pax_open_kernel();
19759 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
19760 + pax_close_kernel();
19761 +
19762 load_TR_desc();
19763 }
19764
19765 @@ -3783,6 +3787,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
19766 #endif
19767 #endif
19768
19769 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19770 + __set_fs(current_thread_info()->addr_limit);
19771 +#endif
19772 +
19773 reload_tss(vcpu);
19774
19775 local_irq_disable();
19776 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
19777 index 4ea7678..b3a7084 100644
19778 --- a/arch/x86/kvm/vmx.c
19779 +++ b/arch/x86/kvm/vmx.c
19780 @@ -1305,7 +1305,11 @@ static void reload_tss(void)
19781 struct desc_struct *descs;
19782
19783 descs = (void *)gdt->address;
19784 +
19785 + pax_open_kernel();
19786 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
19787 + pax_close_kernel();
19788 +
19789 load_TR_desc();
19790 }
19791
19792 @@ -2633,8 +2637,11 @@ static __init int hardware_setup(void)
19793 if (!cpu_has_vmx_flexpriority())
19794 flexpriority_enabled = 0;
19795
19796 - if (!cpu_has_vmx_tpr_shadow())
19797 - kvm_x86_ops->update_cr8_intercept = NULL;
19798 + if (!cpu_has_vmx_tpr_shadow()) {
19799 + pax_open_kernel();
19800 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
19801 + pax_close_kernel();
19802 + }
19803
19804 if (enable_ept && !cpu_has_vmx_ept_2m_page())
19805 kvm_disable_largepages();
19806 @@ -3648,7 +3655,7 @@ static void vmx_set_constant_host_state(void)
19807 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
19808
19809 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
19810 - vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
19811 + vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
19812
19813 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
19814 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
19815 @@ -6169,6 +6176,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
19816 "jmp .Lkvm_vmx_return \n\t"
19817 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
19818 ".Lkvm_vmx_return: "
19819 +
19820 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19821 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
19822 + ".Lkvm_vmx_return2: "
19823 +#endif
19824 +
19825 /* Save guest registers, load host registers, keep flags */
19826 "mov %0, %c[wordsize](%%"R"sp) \n\t"
19827 "pop %0 \n\t"
19828 @@ -6217,6 +6230,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
19829 #endif
19830 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
19831 [wordsize]"i"(sizeof(ulong))
19832 +
19833 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19834 + ,[cs]"i"(__KERNEL_CS)
19835 +#endif
19836 +
19837 : "cc", "memory"
19838 , R"ax", R"bx", R"di", R"si"
19839 #ifdef CONFIG_X86_64
19840 @@ -6245,7 +6263,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
19841 }
19842 }
19843
19844 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
19845 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
19846 +
19847 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19848 + loadsegment(fs, __KERNEL_PERCPU);
19849 +#endif
19850 +
19851 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19852 + __set_fs(current_thread_info()->addr_limit);
19853 +#endif
19854 +
19855 vmx->loaded_vmcs->launched = 1;
19856
19857 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
19858 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
19859 index 4c938da..4ddef65 100644
19860 --- a/arch/x86/kvm/x86.c
19861 +++ b/arch/x86/kvm/x86.c
19862 @@ -1345,8 +1345,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
19863 {
19864 struct kvm *kvm = vcpu->kvm;
19865 int lm = is_long_mode(vcpu);
19866 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
19867 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
19868 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
19869 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
19870 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
19871 : kvm->arch.xen_hvm_config.blob_size_32;
19872 u32 page_num = data & ~PAGE_MASK;
19873 @@ -2165,6 +2165,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
19874 if (n < msr_list.nmsrs)
19875 goto out;
19876 r = -EFAULT;
19877 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
19878 + goto out;
19879 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
19880 num_msrs_to_save * sizeof(u32)))
19881 goto out;
19882 @@ -2340,15 +2342,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
19883 struct kvm_cpuid2 *cpuid,
19884 struct kvm_cpuid_entry2 __user *entries)
19885 {
19886 - int r;
19887 + int r, i;
19888
19889 r = -E2BIG;
19890 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
19891 goto out;
19892 r = -EFAULT;
19893 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
19894 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
19895 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
19896 goto out;
19897 + for (i = 0; i < cpuid->nent; ++i) {
19898 + struct kvm_cpuid_entry2 cpuid_entry;
19899 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
19900 + goto out;
19901 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
19902 + }
19903 vcpu->arch.cpuid_nent = cpuid->nent;
19904 kvm_apic_set_version(vcpu);
19905 kvm_x86_ops->cpuid_update(vcpu);
19906 @@ -2363,15 +2370,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
19907 struct kvm_cpuid2 *cpuid,
19908 struct kvm_cpuid_entry2 __user *entries)
19909 {
19910 - int r;
19911 + int r, i;
19912
19913 r = -E2BIG;
19914 if (cpuid->nent < vcpu->arch.cpuid_nent)
19915 goto out;
19916 r = -EFAULT;
19917 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
19918 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
19919 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
19920 goto out;
19921 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
19922 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
19923 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
19924 + goto out;
19925 + }
19926 return 0;
19927
19928 out:
19929 @@ -2746,7 +2757,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
19930 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
19931 struct kvm_interrupt *irq)
19932 {
19933 - if (irq->irq < 0 || irq->irq >= 256)
19934 + if (irq->irq >= 256)
19935 return -EINVAL;
19936 if (irqchip_in_kernel(vcpu->kvm))
19937 return -ENXIO;
19938 @@ -5162,7 +5173,7 @@ static void kvm_set_mmio_spte_mask(void)
19939 kvm_mmu_set_mmio_spte_mask(mask);
19940 }
19941
19942 -int kvm_arch_init(void *opaque)
19943 +int kvm_arch_init(const void *opaque)
19944 {
19945 int r;
19946 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
19947 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
19948 index cf4603b..7cdde38 100644
19949 --- a/arch/x86/lguest/boot.c
19950 +++ b/arch/x86/lguest/boot.c
19951 @@ -1195,9 +1195,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
19952 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
19953 * Launcher to reboot us.
19954 */
19955 -static void lguest_restart(char *reason)
19956 +static __noreturn void lguest_restart(char *reason)
19957 {
19958 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
19959 + BUG();
19960 }
19961
19962 /*G:050
19963 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
19964 index 042f682..c92afb6 100644
19965 --- a/arch/x86/lib/atomic64_32.c
19966 +++ b/arch/x86/lib/atomic64_32.c
19967 @@ -8,18 +8,30 @@
19968
19969 long long atomic64_read_cx8(long long, const atomic64_t *v);
19970 EXPORT_SYMBOL(atomic64_read_cx8);
19971 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
19972 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
19973 long long atomic64_set_cx8(long long, const atomic64_t *v);
19974 EXPORT_SYMBOL(atomic64_set_cx8);
19975 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
19976 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
19977 long long atomic64_xchg_cx8(long long, unsigned high);
19978 EXPORT_SYMBOL(atomic64_xchg_cx8);
19979 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
19980 EXPORT_SYMBOL(atomic64_add_return_cx8);
19981 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
19982 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
19983 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
19984 EXPORT_SYMBOL(atomic64_sub_return_cx8);
19985 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
19986 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
19987 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
19988 EXPORT_SYMBOL(atomic64_inc_return_cx8);
19989 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
19990 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
19991 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
19992 EXPORT_SYMBOL(atomic64_dec_return_cx8);
19993 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
19994 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
19995 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
19996 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
19997 int atomic64_inc_not_zero_cx8(atomic64_t *v);
19998 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
19999 #ifndef CONFIG_X86_CMPXCHG64
20000 long long atomic64_read_386(long long, const atomic64_t *v);
20001 EXPORT_SYMBOL(atomic64_read_386);
20002 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
20003 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
20004 long long atomic64_set_386(long long, const atomic64_t *v);
20005 EXPORT_SYMBOL(atomic64_set_386);
20006 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
20007 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
20008 long long atomic64_xchg_386(long long, unsigned high);
20009 EXPORT_SYMBOL(atomic64_xchg_386);
20010 long long atomic64_add_return_386(long long a, atomic64_t *v);
20011 EXPORT_SYMBOL(atomic64_add_return_386);
20012 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20013 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
20014 long long atomic64_sub_return_386(long long a, atomic64_t *v);
20015 EXPORT_SYMBOL(atomic64_sub_return_386);
20016 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20017 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
20018 long long atomic64_inc_return_386(long long a, atomic64_t *v);
20019 EXPORT_SYMBOL(atomic64_inc_return_386);
20020 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20021 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
20022 long long atomic64_dec_return_386(long long a, atomic64_t *v);
20023 EXPORT_SYMBOL(atomic64_dec_return_386);
20024 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20025 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
20026 long long atomic64_add_386(long long a, atomic64_t *v);
20027 EXPORT_SYMBOL(atomic64_add_386);
20028 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
20029 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
20030 long long atomic64_sub_386(long long a, atomic64_t *v);
20031 EXPORT_SYMBOL(atomic64_sub_386);
20032 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
20033 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
20034 long long atomic64_inc_386(long long a, atomic64_t *v);
20035 EXPORT_SYMBOL(atomic64_inc_386);
20036 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
20037 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
20038 long long atomic64_dec_386(long long a, atomic64_t *v);
20039 EXPORT_SYMBOL(atomic64_dec_386);
20040 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
20041 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
20042 long long atomic64_dec_if_positive_386(atomic64_t *v);
20043 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
20044 int atomic64_inc_not_zero_386(atomic64_t *v);
20045 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
20046 index e8e7e0d..56fd1b0 100644
20047 --- a/arch/x86/lib/atomic64_386_32.S
20048 +++ b/arch/x86/lib/atomic64_386_32.S
20049 @@ -48,6 +48,10 @@ BEGIN(read)
20050 movl (v), %eax
20051 movl 4(v), %edx
20052 RET_ENDP
20053 +BEGIN(read_unchecked)
20054 + movl (v), %eax
20055 + movl 4(v), %edx
20056 +RET_ENDP
20057 #undef v
20058
20059 #define v %esi
20060 @@ -55,6 +59,10 @@ BEGIN(set)
20061 movl %ebx, (v)
20062 movl %ecx, 4(v)
20063 RET_ENDP
20064 +BEGIN(set_unchecked)
20065 + movl %ebx, (v)
20066 + movl %ecx, 4(v)
20067 +RET_ENDP
20068 #undef v
20069
20070 #define v %esi
20071 @@ -70,6 +78,20 @@ RET_ENDP
20072 BEGIN(add)
20073 addl %eax, (v)
20074 adcl %edx, 4(v)
20075 +
20076 +#ifdef CONFIG_PAX_REFCOUNT
20077 + jno 0f
20078 + subl %eax, (v)
20079 + sbbl %edx, 4(v)
20080 + int $4
20081 +0:
20082 + _ASM_EXTABLE(0b, 0b)
20083 +#endif
20084 +
20085 +RET_ENDP
20086 +BEGIN(add_unchecked)
20087 + addl %eax, (v)
20088 + adcl %edx, 4(v)
20089 RET_ENDP
20090 #undef v
20091
20092 @@ -77,6 +99,24 @@ RET_ENDP
20093 BEGIN(add_return)
20094 addl (v), %eax
20095 adcl 4(v), %edx
20096 +
20097 +#ifdef CONFIG_PAX_REFCOUNT
20098 + into
20099 +1234:
20100 + _ASM_EXTABLE(1234b, 2f)
20101 +#endif
20102 +
20103 + movl %eax, (v)
20104 + movl %edx, 4(v)
20105 +
20106 +#ifdef CONFIG_PAX_REFCOUNT
20107 +2:
20108 +#endif
20109 +
20110 +RET_ENDP
20111 +BEGIN(add_return_unchecked)
20112 + addl (v), %eax
20113 + adcl 4(v), %edx
20114 movl %eax, (v)
20115 movl %edx, 4(v)
20116 RET_ENDP
20117 @@ -86,6 +126,20 @@ RET_ENDP
20118 BEGIN(sub)
20119 subl %eax, (v)
20120 sbbl %edx, 4(v)
20121 +
20122 +#ifdef CONFIG_PAX_REFCOUNT
20123 + jno 0f
20124 + addl %eax, (v)
20125 + adcl %edx, 4(v)
20126 + int $4
20127 +0:
20128 + _ASM_EXTABLE(0b, 0b)
20129 +#endif
20130 +
20131 +RET_ENDP
20132 +BEGIN(sub_unchecked)
20133 + subl %eax, (v)
20134 + sbbl %edx, 4(v)
20135 RET_ENDP
20136 #undef v
20137
20138 @@ -96,6 +150,27 @@ BEGIN(sub_return)
20139 sbbl $0, %edx
20140 addl (v), %eax
20141 adcl 4(v), %edx
20142 +
20143 +#ifdef CONFIG_PAX_REFCOUNT
20144 + into
20145 +1234:
20146 + _ASM_EXTABLE(1234b, 2f)
20147 +#endif
20148 +
20149 + movl %eax, (v)
20150 + movl %edx, 4(v)
20151 +
20152 +#ifdef CONFIG_PAX_REFCOUNT
20153 +2:
20154 +#endif
20155 +
20156 +RET_ENDP
20157 +BEGIN(sub_return_unchecked)
20158 + negl %edx
20159 + negl %eax
20160 + sbbl $0, %edx
20161 + addl (v), %eax
20162 + adcl 4(v), %edx
20163 movl %eax, (v)
20164 movl %edx, 4(v)
20165 RET_ENDP
20166 @@ -105,6 +180,20 @@ RET_ENDP
20167 BEGIN(inc)
20168 addl $1, (v)
20169 adcl $0, 4(v)
20170 +
20171 +#ifdef CONFIG_PAX_REFCOUNT
20172 + jno 0f
20173 + subl $1, (v)
20174 + sbbl $0, 4(v)
20175 + int $4
20176 +0:
20177 + _ASM_EXTABLE(0b, 0b)
20178 +#endif
20179 +
20180 +RET_ENDP
20181 +BEGIN(inc_unchecked)
20182 + addl $1, (v)
20183 + adcl $0, 4(v)
20184 RET_ENDP
20185 #undef v
20186
20187 @@ -114,6 +203,26 @@ BEGIN(inc_return)
20188 movl 4(v), %edx
20189 addl $1, %eax
20190 adcl $0, %edx
20191 +
20192 +#ifdef CONFIG_PAX_REFCOUNT
20193 + into
20194 +1234:
20195 + _ASM_EXTABLE(1234b, 2f)
20196 +#endif
20197 +
20198 + movl %eax, (v)
20199 + movl %edx, 4(v)
20200 +
20201 +#ifdef CONFIG_PAX_REFCOUNT
20202 +2:
20203 +#endif
20204 +
20205 +RET_ENDP
20206 +BEGIN(inc_return_unchecked)
20207 + movl (v), %eax
20208 + movl 4(v), %edx
20209 + addl $1, %eax
20210 + adcl $0, %edx
20211 movl %eax, (v)
20212 movl %edx, 4(v)
20213 RET_ENDP
20214 @@ -123,6 +232,20 @@ RET_ENDP
20215 BEGIN(dec)
20216 subl $1, (v)
20217 sbbl $0, 4(v)
20218 +
20219 +#ifdef CONFIG_PAX_REFCOUNT
20220 + jno 0f
20221 + addl $1, (v)
20222 + adcl $0, 4(v)
20223 + int $4
20224 +0:
20225 + _ASM_EXTABLE(0b, 0b)
20226 +#endif
20227 +
20228 +RET_ENDP
20229 +BEGIN(dec_unchecked)
20230 + subl $1, (v)
20231 + sbbl $0, 4(v)
20232 RET_ENDP
20233 #undef v
20234
20235 @@ -132,6 +255,26 @@ BEGIN(dec_return)
20236 movl 4(v), %edx
20237 subl $1, %eax
20238 sbbl $0, %edx
20239 +
20240 +#ifdef CONFIG_PAX_REFCOUNT
20241 + into
20242 +1234:
20243 + _ASM_EXTABLE(1234b, 2f)
20244 +#endif
20245 +
20246 + movl %eax, (v)
20247 + movl %edx, 4(v)
20248 +
20249 +#ifdef CONFIG_PAX_REFCOUNT
20250 +2:
20251 +#endif
20252 +
20253 +RET_ENDP
20254 +BEGIN(dec_return_unchecked)
20255 + movl (v), %eax
20256 + movl 4(v), %edx
20257 + subl $1, %eax
20258 + sbbl $0, %edx
20259 movl %eax, (v)
20260 movl %edx, 4(v)
20261 RET_ENDP
20262 @@ -143,6 +286,13 @@ BEGIN(add_unless)
20263 adcl %edx, %edi
20264 addl (v), %eax
20265 adcl 4(v), %edx
20266 +
20267 +#ifdef CONFIG_PAX_REFCOUNT
20268 + into
20269 +1234:
20270 + _ASM_EXTABLE(1234b, 2f)
20271 +#endif
20272 +
20273 cmpl %eax, %esi
20274 je 3f
20275 1:
20276 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
20277 1:
20278 addl $1, %eax
20279 adcl $0, %edx
20280 +
20281 +#ifdef CONFIG_PAX_REFCOUNT
20282 + into
20283 +1234:
20284 + _ASM_EXTABLE(1234b, 2f)
20285 +#endif
20286 +
20287 movl %eax, (v)
20288 movl %edx, 4(v)
20289 movl $1, %eax
20290 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
20291 movl 4(v), %edx
20292 subl $1, %eax
20293 sbbl $0, %edx
20294 +
20295 +#ifdef CONFIG_PAX_REFCOUNT
20296 + into
20297 +1234:
20298 + _ASM_EXTABLE(1234b, 1f)
20299 +#endif
20300 +
20301 js 1f
20302 movl %eax, (v)
20303 movl %edx, 4(v)
20304 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
20305 index 391a083..d658e9f 100644
20306 --- a/arch/x86/lib/atomic64_cx8_32.S
20307 +++ b/arch/x86/lib/atomic64_cx8_32.S
20308 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
20309 CFI_STARTPROC
20310
20311 read64 %ecx
20312 + pax_force_retaddr
20313 ret
20314 CFI_ENDPROC
20315 ENDPROC(atomic64_read_cx8)
20316
20317 +ENTRY(atomic64_read_unchecked_cx8)
20318 + CFI_STARTPROC
20319 +
20320 + read64 %ecx
20321 + pax_force_retaddr
20322 + ret
20323 + CFI_ENDPROC
20324 +ENDPROC(atomic64_read_unchecked_cx8)
20325 +
20326 ENTRY(atomic64_set_cx8)
20327 CFI_STARTPROC
20328
20329 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
20330 cmpxchg8b (%esi)
20331 jne 1b
20332
20333 + pax_force_retaddr
20334 ret
20335 CFI_ENDPROC
20336 ENDPROC(atomic64_set_cx8)
20337
20338 +ENTRY(atomic64_set_unchecked_cx8)
20339 + CFI_STARTPROC
20340 +
20341 +1:
20342 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
20343 + * are atomic on 586 and newer */
20344 + cmpxchg8b (%esi)
20345 + jne 1b
20346 +
20347 + pax_force_retaddr
20348 + ret
20349 + CFI_ENDPROC
20350 +ENDPROC(atomic64_set_unchecked_cx8)
20351 +
20352 ENTRY(atomic64_xchg_cx8)
20353 CFI_STARTPROC
20354
20355 @@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
20356 cmpxchg8b (%esi)
20357 jne 1b
20358
20359 + pax_force_retaddr
20360 ret
20361 CFI_ENDPROC
20362 ENDPROC(atomic64_xchg_cx8)
20363
20364 -.macro addsub_return func ins insc
20365 -ENTRY(atomic64_\func\()_return_cx8)
20366 +.macro addsub_return func ins insc unchecked=""
20367 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
20368 CFI_STARTPROC
20369 SAVE ebp
20370 SAVE ebx
20371 @@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
20372 movl %edx, %ecx
20373 \ins\()l %esi, %ebx
20374 \insc\()l %edi, %ecx
20375 +
20376 +.ifb \unchecked
20377 +#ifdef CONFIG_PAX_REFCOUNT
20378 + into
20379 +2:
20380 + _ASM_EXTABLE(2b, 3f)
20381 +#endif
20382 +.endif
20383 +
20384 LOCK_PREFIX
20385 cmpxchg8b (%ebp)
20386 jne 1b
20387 -
20388 -10:
20389 movl %ebx, %eax
20390 movl %ecx, %edx
20391 +
20392 +.ifb \unchecked
20393 +#ifdef CONFIG_PAX_REFCOUNT
20394 +3:
20395 +#endif
20396 +.endif
20397 +
20398 RESTORE edi
20399 RESTORE esi
20400 RESTORE ebx
20401 RESTORE ebp
20402 + pax_force_retaddr
20403 ret
20404 CFI_ENDPROC
20405 -ENDPROC(atomic64_\func\()_return_cx8)
20406 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
20407 .endm
20408
20409 addsub_return add add adc
20410 addsub_return sub sub sbb
20411 +addsub_return add add adc _unchecked
20412 +addsub_return sub sub sbb _unchecked
20413
20414 -.macro incdec_return func ins insc
20415 -ENTRY(atomic64_\func\()_return_cx8)
20416 +.macro incdec_return func ins insc unchecked
20417 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
20418 CFI_STARTPROC
20419 SAVE ebx
20420
20421 @@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
20422 movl %edx, %ecx
20423 \ins\()l $1, %ebx
20424 \insc\()l $0, %ecx
20425 +
20426 +.ifb \unchecked
20427 +#ifdef CONFIG_PAX_REFCOUNT
20428 + into
20429 +2:
20430 + _ASM_EXTABLE(2b, 3f)
20431 +#endif
20432 +.endif
20433 +
20434 LOCK_PREFIX
20435 cmpxchg8b (%esi)
20436 jne 1b
20437
20438 -10:
20439 movl %ebx, %eax
20440 movl %ecx, %edx
20441 +
20442 +.ifb \unchecked
20443 +#ifdef CONFIG_PAX_REFCOUNT
20444 +3:
20445 +#endif
20446 +.endif
20447 +
20448 RESTORE ebx
20449 + pax_force_retaddr
20450 ret
20451 CFI_ENDPROC
20452 -ENDPROC(atomic64_\func\()_return_cx8)
20453 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
20454 .endm
20455
20456 incdec_return inc add adc
20457 incdec_return dec sub sbb
20458 +incdec_return inc add adc _unchecked
20459 +incdec_return dec sub sbb _unchecked
20460
20461 ENTRY(atomic64_dec_if_positive_cx8)
20462 CFI_STARTPROC
20463 @@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
20464 movl %edx, %ecx
20465 subl $1, %ebx
20466 sbb $0, %ecx
20467 +
20468 +#ifdef CONFIG_PAX_REFCOUNT
20469 + into
20470 +1234:
20471 + _ASM_EXTABLE(1234b, 2f)
20472 +#endif
20473 +
20474 js 2f
20475 LOCK_PREFIX
20476 cmpxchg8b (%esi)
20477 @@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
20478 movl %ebx, %eax
20479 movl %ecx, %edx
20480 RESTORE ebx
20481 + pax_force_retaddr
20482 ret
20483 CFI_ENDPROC
20484 ENDPROC(atomic64_dec_if_positive_cx8)
20485 @@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
20486 movl %edx, %ecx
20487 addl %esi, %ebx
20488 adcl %edi, %ecx
20489 +
20490 +#ifdef CONFIG_PAX_REFCOUNT
20491 + into
20492 +1234:
20493 + _ASM_EXTABLE(1234b, 3f)
20494 +#endif
20495 +
20496 LOCK_PREFIX
20497 cmpxchg8b (%ebp)
20498 jne 1b
20499 @@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
20500 CFI_ADJUST_CFA_OFFSET -8
20501 RESTORE ebx
20502 RESTORE ebp
20503 + pax_force_retaddr
20504 ret
20505 4:
20506 cmpl %edx, 4(%esp)
20507 @@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
20508 movl %edx, %ecx
20509 addl $1, %ebx
20510 adcl $0, %ecx
20511 +
20512 +#ifdef CONFIG_PAX_REFCOUNT
20513 + into
20514 +1234:
20515 + _ASM_EXTABLE(1234b, 3f)
20516 +#endif
20517 +
20518 LOCK_PREFIX
20519 cmpxchg8b (%esi)
20520 jne 1b
20521 @@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
20522 movl $1, %eax
20523 3:
20524 RESTORE ebx
20525 + pax_force_retaddr
20526 ret
20527 4:
20528 testl %edx, %edx
20529 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
20530 index 78d16a5..fbcf666 100644
20531 --- a/arch/x86/lib/checksum_32.S
20532 +++ b/arch/x86/lib/checksum_32.S
20533 @@ -28,7 +28,8 @@
20534 #include <linux/linkage.h>
20535 #include <asm/dwarf2.h>
20536 #include <asm/errno.h>
20537 -
20538 +#include <asm/segment.h>
20539 +
20540 /*
20541 * computes a partial checksum, e.g. for TCP/UDP fragments
20542 */
20543 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
20544
20545 #define ARGBASE 16
20546 #define FP 12
20547 -
20548 -ENTRY(csum_partial_copy_generic)
20549 +
20550 +ENTRY(csum_partial_copy_generic_to_user)
20551 CFI_STARTPROC
20552 +
20553 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20554 + pushl_cfi %gs
20555 + popl_cfi %es
20556 + jmp csum_partial_copy_generic
20557 +#endif
20558 +
20559 +ENTRY(csum_partial_copy_generic_from_user)
20560 +
20561 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20562 + pushl_cfi %gs
20563 + popl_cfi %ds
20564 +#endif
20565 +
20566 +ENTRY(csum_partial_copy_generic)
20567 subl $4,%esp
20568 CFI_ADJUST_CFA_OFFSET 4
20569 pushl_cfi %edi
20570 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
20571 jmp 4f
20572 SRC(1: movw (%esi), %bx )
20573 addl $2, %esi
20574 -DST( movw %bx, (%edi) )
20575 +DST( movw %bx, %es:(%edi) )
20576 addl $2, %edi
20577 addw %bx, %ax
20578 adcl $0, %eax
20579 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
20580 SRC(1: movl (%esi), %ebx )
20581 SRC( movl 4(%esi), %edx )
20582 adcl %ebx, %eax
20583 -DST( movl %ebx, (%edi) )
20584 +DST( movl %ebx, %es:(%edi) )
20585 adcl %edx, %eax
20586 -DST( movl %edx, 4(%edi) )
20587 +DST( movl %edx, %es:4(%edi) )
20588
20589 SRC( movl 8(%esi), %ebx )
20590 SRC( movl 12(%esi), %edx )
20591 adcl %ebx, %eax
20592 -DST( movl %ebx, 8(%edi) )
20593 +DST( movl %ebx, %es:8(%edi) )
20594 adcl %edx, %eax
20595 -DST( movl %edx, 12(%edi) )
20596 +DST( movl %edx, %es:12(%edi) )
20597
20598 SRC( movl 16(%esi), %ebx )
20599 SRC( movl 20(%esi), %edx )
20600 adcl %ebx, %eax
20601 -DST( movl %ebx, 16(%edi) )
20602 +DST( movl %ebx, %es:16(%edi) )
20603 adcl %edx, %eax
20604 -DST( movl %edx, 20(%edi) )
20605 +DST( movl %edx, %es:20(%edi) )
20606
20607 SRC( movl 24(%esi), %ebx )
20608 SRC( movl 28(%esi), %edx )
20609 adcl %ebx, %eax
20610 -DST( movl %ebx, 24(%edi) )
20611 +DST( movl %ebx, %es:24(%edi) )
20612 adcl %edx, %eax
20613 -DST( movl %edx, 28(%edi) )
20614 +DST( movl %edx, %es:28(%edi) )
20615
20616 lea 32(%esi), %esi
20617 lea 32(%edi), %edi
20618 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
20619 shrl $2, %edx # This clears CF
20620 SRC(3: movl (%esi), %ebx )
20621 adcl %ebx, %eax
20622 -DST( movl %ebx, (%edi) )
20623 +DST( movl %ebx, %es:(%edi) )
20624 lea 4(%esi), %esi
20625 lea 4(%edi), %edi
20626 dec %edx
20627 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
20628 jb 5f
20629 SRC( movw (%esi), %cx )
20630 leal 2(%esi), %esi
20631 -DST( movw %cx, (%edi) )
20632 +DST( movw %cx, %es:(%edi) )
20633 leal 2(%edi), %edi
20634 je 6f
20635 shll $16,%ecx
20636 SRC(5: movb (%esi), %cl )
20637 -DST( movb %cl, (%edi) )
20638 +DST( movb %cl, %es:(%edi) )
20639 6: addl %ecx, %eax
20640 adcl $0, %eax
20641 7:
20642 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
20643
20644 6001:
20645 movl ARGBASE+20(%esp), %ebx # src_err_ptr
20646 - movl $-EFAULT, (%ebx)
20647 + movl $-EFAULT, %ss:(%ebx)
20648
20649 # zero the complete destination - computing the rest
20650 # is too much work
20651 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
20652
20653 6002:
20654 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
20655 - movl $-EFAULT,(%ebx)
20656 + movl $-EFAULT,%ss:(%ebx)
20657 jmp 5000b
20658
20659 .previous
20660
20661 + pushl_cfi %ss
20662 + popl_cfi %ds
20663 + pushl_cfi %ss
20664 + popl_cfi %es
20665 popl_cfi %ebx
20666 CFI_RESTORE ebx
20667 popl_cfi %esi
20668 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
20669 popl_cfi %ecx # equivalent to addl $4,%esp
20670 ret
20671 CFI_ENDPROC
20672 -ENDPROC(csum_partial_copy_generic)
20673 +ENDPROC(csum_partial_copy_generic_to_user)
20674
20675 #else
20676
20677 /* Version for PentiumII/PPro */
20678
20679 #define ROUND1(x) \
20680 + nop; nop; nop; \
20681 SRC(movl x(%esi), %ebx ) ; \
20682 addl %ebx, %eax ; \
20683 - DST(movl %ebx, x(%edi) ) ;
20684 + DST(movl %ebx, %es:x(%edi)) ;
20685
20686 #define ROUND(x) \
20687 + nop; nop; nop; \
20688 SRC(movl x(%esi), %ebx ) ; \
20689 adcl %ebx, %eax ; \
20690 - DST(movl %ebx, x(%edi) ) ;
20691 + DST(movl %ebx, %es:x(%edi)) ;
20692
20693 #define ARGBASE 12
20694 -
20695 -ENTRY(csum_partial_copy_generic)
20696 +
20697 +ENTRY(csum_partial_copy_generic_to_user)
20698 CFI_STARTPROC
20699 +
20700 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20701 + pushl_cfi %gs
20702 + popl_cfi %es
20703 + jmp csum_partial_copy_generic
20704 +#endif
20705 +
20706 +ENTRY(csum_partial_copy_generic_from_user)
20707 +
20708 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20709 + pushl_cfi %gs
20710 + popl_cfi %ds
20711 +#endif
20712 +
20713 +ENTRY(csum_partial_copy_generic)
20714 pushl_cfi %ebx
20715 CFI_REL_OFFSET ebx, 0
20716 pushl_cfi %edi
20717 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
20718 subl %ebx, %edi
20719 lea -1(%esi),%edx
20720 andl $-32,%edx
20721 - lea 3f(%ebx,%ebx), %ebx
20722 + lea 3f(%ebx,%ebx,2), %ebx
20723 testl %esi, %esi
20724 jmp *%ebx
20725 1: addl $64,%esi
20726 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
20727 jb 5f
20728 SRC( movw (%esi), %dx )
20729 leal 2(%esi), %esi
20730 -DST( movw %dx, (%edi) )
20731 +DST( movw %dx, %es:(%edi) )
20732 leal 2(%edi), %edi
20733 je 6f
20734 shll $16,%edx
20735 5:
20736 SRC( movb (%esi), %dl )
20737 -DST( movb %dl, (%edi) )
20738 +DST( movb %dl, %es:(%edi) )
20739 6: addl %edx, %eax
20740 adcl $0, %eax
20741 7:
20742 .section .fixup, "ax"
20743 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
20744 - movl $-EFAULT, (%ebx)
20745 + movl $-EFAULT, %ss:(%ebx)
20746 # zero the complete destination (computing the rest is too much work)
20747 movl ARGBASE+8(%esp),%edi # dst
20748 movl ARGBASE+12(%esp),%ecx # len
20749 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
20750 rep; stosb
20751 jmp 7b
20752 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
20753 - movl $-EFAULT, (%ebx)
20754 + movl $-EFAULT, %ss:(%ebx)
20755 jmp 7b
20756 .previous
20757
20758 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20759 + pushl_cfi %ss
20760 + popl_cfi %ds
20761 + pushl_cfi %ss
20762 + popl_cfi %es
20763 +#endif
20764 +
20765 popl_cfi %esi
20766 CFI_RESTORE esi
20767 popl_cfi %edi
20768 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
20769 CFI_RESTORE ebx
20770 ret
20771 CFI_ENDPROC
20772 -ENDPROC(csum_partial_copy_generic)
20773 +ENDPROC(csum_partial_copy_generic_to_user)
20774
20775 #undef ROUND
20776 #undef ROUND1
20777 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
20778 index f2145cf..cea889d 100644
20779 --- a/arch/x86/lib/clear_page_64.S
20780 +++ b/arch/x86/lib/clear_page_64.S
20781 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
20782 movl $4096/8,%ecx
20783 xorl %eax,%eax
20784 rep stosq
20785 + pax_force_retaddr
20786 ret
20787 CFI_ENDPROC
20788 ENDPROC(clear_page_c)
20789 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
20790 movl $4096,%ecx
20791 xorl %eax,%eax
20792 rep stosb
20793 + pax_force_retaddr
20794 ret
20795 CFI_ENDPROC
20796 ENDPROC(clear_page_c_e)
20797 @@ -43,6 +45,7 @@ ENTRY(clear_page)
20798 leaq 64(%rdi),%rdi
20799 jnz .Lloop
20800 nop
20801 + pax_force_retaddr
20802 ret
20803 CFI_ENDPROC
20804 .Lclear_page_end:
20805 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
20806
20807 #include <asm/cpufeature.h>
20808
20809 - .section .altinstr_replacement,"ax"
20810 + .section .altinstr_replacement,"a"
20811 1: .byte 0xeb /* jmp <disp8> */
20812 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
20813 2: .byte 0xeb /* jmp <disp8> */
20814 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
20815 index 1e572c5..2a162cd 100644
20816 --- a/arch/x86/lib/cmpxchg16b_emu.S
20817 +++ b/arch/x86/lib/cmpxchg16b_emu.S
20818 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
20819
20820 popf
20821 mov $1, %al
20822 + pax_force_retaddr
20823 ret
20824
20825 not_same:
20826 popf
20827 xor %al,%al
20828 + pax_force_retaddr
20829 ret
20830
20831 CFI_ENDPROC
20832 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
20833 index 01c805b..dccb07f 100644
20834 --- a/arch/x86/lib/copy_page_64.S
20835 +++ b/arch/x86/lib/copy_page_64.S
20836 @@ -9,6 +9,7 @@ copy_page_c:
20837 CFI_STARTPROC
20838 movl $4096/8,%ecx
20839 rep movsq
20840 + pax_force_retaddr
20841 ret
20842 CFI_ENDPROC
20843 ENDPROC(copy_page_c)
20844 @@ -39,7 +40,7 @@ ENTRY(copy_page)
20845 movq 16 (%rsi), %rdx
20846 movq 24 (%rsi), %r8
20847 movq 32 (%rsi), %r9
20848 - movq 40 (%rsi), %r10
20849 + movq 40 (%rsi), %r13
20850 movq 48 (%rsi), %r11
20851 movq 56 (%rsi), %r12
20852
20853 @@ -50,7 +51,7 @@ ENTRY(copy_page)
20854 movq %rdx, 16 (%rdi)
20855 movq %r8, 24 (%rdi)
20856 movq %r9, 32 (%rdi)
20857 - movq %r10, 40 (%rdi)
20858 + movq %r13, 40 (%rdi)
20859 movq %r11, 48 (%rdi)
20860 movq %r12, 56 (%rdi)
20861
20862 @@ -69,7 +70,7 @@ ENTRY(copy_page)
20863 movq 16 (%rsi), %rdx
20864 movq 24 (%rsi), %r8
20865 movq 32 (%rsi), %r9
20866 - movq 40 (%rsi), %r10
20867 + movq 40 (%rsi), %r13
20868 movq 48 (%rsi), %r11
20869 movq 56 (%rsi), %r12
20870
20871 @@ -78,7 +79,7 @@ ENTRY(copy_page)
20872 movq %rdx, 16 (%rdi)
20873 movq %r8, 24 (%rdi)
20874 movq %r9, 32 (%rdi)
20875 - movq %r10, 40 (%rdi)
20876 + movq %r13, 40 (%rdi)
20877 movq %r11, 48 (%rdi)
20878 movq %r12, 56 (%rdi)
20879
20880 @@ -95,6 +96,7 @@ ENTRY(copy_page)
20881 CFI_RESTORE r13
20882 addq $3*8,%rsp
20883 CFI_ADJUST_CFA_OFFSET -3*8
20884 + pax_force_retaddr
20885 ret
20886 .Lcopy_page_end:
20887 CFI_ENDPROC
20888 @@ -105,7 +107,7 @@ ENDPROC(copy_page)
20889
20890 #include <asm/cpufeature.h>
20891
20892 - .section .altinstr_replacement,"ax"
20893 + .section .altinstr_replacement,"a"
20894 1: .byte 0xeb /* jmp <disp8> */
20895 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
20896 2:
20897 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
20898 index 0248402..821c786 100644
20899 --- a/arch/x86/lib/copy_user_64.S
20900 +++ b/arch/x86/lib/copy_user_64.S
20901 @@ -16,6 +16,7 @@
20902 #include <asm/thread_info.h>
20903 #include <asm/cpufeature.h>
20904 #include <asm/alternative-asm.h>
20905 +#include <asm/pgtable.h>
20906
20907 /*
20908 * By placing feature2 after feature1 in altinstructions section, we logically
20909 @@ -29,7 +30,7 @@
20910 .byte 0xe9 /* 32bit jump */
20911 .long \orig-1f /* by default jump to orig */
20912 1:
20913 - .section .altinstr_replacement,"ax"
20914 + .section .altinstr_replacement,"a"
20915 2: .byte 0xe9 /* near jump with 32bit immediate */
20916 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
20917 3: .byte 0xe9 /* near jump with 32bit immediate */
20918 @@ -71,47 +72,20 @@
20919 #endif
20920 .endm
20921
20922 -/* Standard copy_to_user with segment limit checking */
20923 -ENTRY(_copy_to_user)
20924 - CFI_STARTPROC
20925 - GET_THREAD_INFO(%rax)
20926 - movq %rdi,%rcx
20927 - addq %rdx,%rcx
20928 - jc bad_to_user
20929 - cmpq TI_addr_limit(%rax),%rcx
20930 - ja bad_to_user
20931 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
20932 - copy_user_generic_unrolled,copy_user_generic_string, \
20933 - copy_user_enhanced_fast_string
20934 - CFI_ENDPROC
20935 -ENDPROC(_copy_to_user)
20936 -
20937 -/* Standard copy_from_user with segment limit checking */
20938 -ENTRY(_copy_from_user)
20939 - CFI_STARTPROC
20940 - GET_THREAD_INFO(%rax)
20941 - movq %rsi,%rcx
20942 - addq %rdx,%rcx
20943 - jc bad_from_user
20944 - cmpq TI_addr_limit(%rax),%rcx
20945 - ja bad_from_user
20946 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
20947 - copy_user_generic_unrolled,copy_user_generic_string, \
20948 - copy_user_enhanced_fast_string
20949 - CFI_ENDPROC
20950 -ENDPROC(_copy_from_user)
20951 -
20952 .section .fixup,"ax"
20953 /* must zero dest */
20954 ENTRY(bad_from_user)
20955 bad_from_user:
20956 CFI_STARTPROC
20957 + testl %edx,%edx
20958 + js bad_to_user
20959 movl %edx,%ecx
20960 xorl %eax,%eax
20961 rep
20962 stosb
20963 bad_to_user:
20964 movl %edx,%eax
20965 + pax_force_retaddr
20966 ret
20967 CFI_ENDPROC
20968 ENDPROC(bad_from_user)
20969 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
20970 jz 17f
20971 1: movq (%rsi),%r8
20972 2: movq 1*8(%rsi),%r9
20973 -3: movq 2*8(%rsi),%r10
20974 +3: movq 2*8(%rsi),%rax
20975 4: movq 3*8(%rsi),%r11
20976 5: movq %r8,(%rdi)
20977 6: movq %r9,1*8(%rdi)
20978 -7: movq %r10,2*8(%rdi)
20979 +7: movq %rax,2*8(%rdi)
20980 8: movq %r11,3*8(%rdi)
20981 9: movq 4*8(%rsi),%r8
20982 10: movq 5*8(%rsi),%r9
20983 -11: movq 6*8(%rsi),%r10
20984 +11: movq 6*8(%rsi),%rax
20985 12: movq 7*8(%rsi),%r11
20986 13: movq %r8,4*8(%rdi)
20987 14: movq %r9,5*8(%rdi)
20988 -15: movq %r10,6*8(%rdi)
20989 +15: movq %rax,6*8(%rdi)
20990 16: movq %r11,7*8(%rdi)
20991 leaq 64(%rsi),%rsi
20992 leaq 64(%rdi),%rdi
20993 @@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
20994 decl %ecx
20995 jnz 21b
20996 23: xor %eax,%eax
20997 + pax_force_retaddr
20998 ret
20999
21000 .section .fixup,"ax"
21001 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
21002 3: rep
21003 movsb
21004 4: xorl %eax,%eax
21005 + pax_force_retaddr
21006 ret
21007
21008 .section .fixup,"ax"
21009 @@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
21010 1: rep
21011 movsb
21012 2: xorl %eax,%eax
21013 + pax_force_retaddr
21014 ret
21015
21016 .section .fixup,"ax"
21017 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
21018 index cb0c112..e3a6895 100644
21019 --- a/arch/x86/lib/copy_user_nocache_64.S
21020 +++ b/arch/x86/lib/copy_user_nocache_64.S
21021 @@ -8,12 +8,14 @@
21022
21023 #include <linux/linkage.h>
21024 #include <asm/dwarf2.h>
21025 +#include <asm/alternative-asm.h>
21026
21027 #define FIX_ALIGNMENT 1
21028
21029 #include <asm/current.h>
21030 #include <asm/asm-offsets.h>
21031 #include <asm/thread_info.h>
21032 +#include <asm/pgtable.h>
21033
21034 .macro ALIGN_DESTINATION
21035 #ifdef FIX_ALIGNMENT
21036 @@ -50,6 +52,15 @@
21037 */
21038 ENTRY(__copy_user_nocache)
21039 CFI_STARTPROC
21040 +
21041 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21042 + mov $PAX_USER_SHADOW_BASE,%rcx
21043 + cmp %rcx,%rsi
21044 + jae 1f
21045 + add %rcx,%rsi
21046 +1:
21047 +#endif
21048 +
21049 cmpl $8,%edx
21050 jb 20f /* less then 8 bytes, go to byte copy loop */
21051 ALIGN_DESTINATION
21052 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
21053 jz 17f
21054 1: movq (%rsi),%r8
21055 2: movq 1*8(%rsi),%r9
21056 -3: movq 2*8(%rsi),%r10
21057 +3: movq 2*8(%rsi),%rax
21058 4: movq 3*8(%rsi),%r11
21059 5: movnti %r8,(%rdi)
21060 6: movnti %r9,1*8(%rdi)
21061 -7: movnti %r10,2*8(%rdi)
21062 +7: movnti %rax,2*8(%rdi)
21063 8: movnti %r11,3*8(%rdi)
21064 9: movq 4*8(%rsi),%r8
21065 10: movq 5*8(%rsi),%r9
21066 -11: movq 6*8(%rsi),%r10
21067 +11: movq 6*8(%rsi),%rax
21068 12: movq 7*8(%rsi),%r11
21069 13: movnti %r8,4*8(%rdi)
21070 14: movnti %r9,5*8(%rdi)
21071 -15: movnti %r10,6*8(%rdi)
21072 +15: movnti %rax,6*8(%rdi)
21073 16: movnti %r11,7*8(%rdi)
21074 leaq 64(%rsi),%rsi
21075 leaq 64(%rdi),%rdi
21076 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
21077 jnz 21b
21078 23: xorl %eax,%eax
21079 sfence
21080 + pax_force_retaddr
21081 ret
21082
21083 .section .fixup,"ax"
21084 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
21085 index fb903b7..c92b7f7 100644
21086 --- a/arch/x86/lib/csum-copy_64.S
21087 +++ b/arch/x86/lib/csum-copy_64.S
21088 @@ -8,6 +8,7 @@
21089 #include <linux/linkage.h>
21090 #include <asm/dwarf2.h>
21091 #include <asm/errno.h>
21092 +#include <asm/alternative-asm.h>
21093
21094 /*
21095 * Checksum copy with exception handling.
21096 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
21097 CFI_RESTORE rbp
21098 addq $7*8, %rsp
21099 CFI_ADJUST_CFA_OFFSET -7*8
21100 + pax_force_retaddr 0, 1
21101 ret
21102 CFI_RESTORE_STATE
21103
21104 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
21105 index 459b58a..9570bc7 100644
21106 --- a/arch/x86/lib/csum-wrappers_64.c
21107 +++ b/arch/x86/lib/csum-wrappers_64.c
21108 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
21109 len -= 2;
21110 }
21111 }
21112 - isum = csum_partial_copy_generic((__force const void *)src,
21113 +
21114 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21115 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21116 + src += PAX_USER_SHADOW_BASE;
21117 +#endif
21118 +
21119 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
21120 dst, len, isum, errp, NULL);
21121 if (unlikely(*errp))
21122 goto out_err;
21123 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
21124 }
21125
21126 *errp = 0;
21127 - return csum_partial_copy_generic(src, (void __force *)dst,
21128 +
21129 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21130 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
21131 + dst += PAX_USER_SHADOW_BASE;
21132 +#endif
21133 +
21134 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
21135 len, isum, NULL, errp);
21136 }
21137 EXPORT_SYMBOL(csum_partial_copy_to_user);
21138 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
21139 index 51f1504..ddac4c1 100644
21140 --- a/arch/x86/lib/getuser.S
21141 +++ b/arch/x86/lib/getuser.S
21142 @@ -33,15 +33,38 @@
21143 #include <asm/asm-offsets.h>
21144 #include <asm/thread_info.h>
21145 #include <asm/asm.h>
21146 +#include <asm/segment.h>
21147 +#include <asm/pgtable.h>
21148 +#include <asm/alternative-asm.h>
21149 +
21150 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21151 +#define __copyuser_seg gs;
21152 +#else
21153 +#define __copyuser_seg
21154 +#endif
21155
21156 .text
21157 ENTRY(__get_user_1)
21158 CFI_STARTPROC
21159 +
21160 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21161 GET_THREAD_INFO(%_ASM_DX)
21162 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21163 jae bad_get_user
21164 -1: movzb (%_ASM_AX),%edx
21165 +
21166 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21167 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21168 + cmp %_ASM_DX,%_ASM_AX
21169 + jae 1234f
21170 + add %_ASM_DX,%_ASM_AX
21171 +1234:
21172 +#endif
21173 +
21174 +#endif
21175 +
21176 +1: __copyuser_seg movzb (%_ASM_AX),%edx
21177 xor %eax,%eax
21178 + pax_force_retaddr
21179 ret
21180 CFI_ENDPROC
21181 ENDPROC(__get_user_1)
21182 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
21183 ENTRY(__get_user_2)
21184 CFI_STARTPROC
21185 add $1,%_ASM_AX
21186 +
21187 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21188 jc bad_get_user
21189 GET_THREAD_INFO(%_ASM_DX)
21190 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21191 jae bad_get_user
21192 -2: movzwl -1(%_ASM_AX),%edx
21193 +
21194 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21195 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21196 + cmp %_ASM_DX,%_ASM_AX
21197 + jae 1234f
21198 + add %_ASM_DX,%_ASM_AX
21199 +1234:
21200 +#endif
21201 +
21202 +#endif
21203 +
21204 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
21205 xor %eax,%eax
21206 + pax_force_retaddr
21207 ret
21208 CFI_ENDPROC
21209 ENDPROC(__get_user_2)
21210 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
21211 ENTRY(__get_user_4)
21212 CFI_STARTPROC
21213 add $3,%_ASM_AX
21214 +
21215 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21216 jc bad_get_user
21217 GET_THREAD_INFO(%_ASM_DX)
21218 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21219 jae bad_get_user
21220 -3: mov -3(%_ASM_AX),%edx
21221 +
21222 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21223 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21224 + cmp %_ASM_DX,%_ASM_AX
21225 + jae 1234f
21226 + add %_ASM_DX,%_ASM_AX
21227 +1234:
21228 +#endif
21229 +
21230 +#endif
21231 +
21232 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
21233 xor %eax,%eax
21234 + pax_force_retaddr
21235 ret
21236 CFI_ENDPROC
21237 ENDPROC(__get_user_4)
21238 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
21239 GET_THREAD_INFO(%_ASM_DX)
21240 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21241 jae bad_get_user
21242 +
21243 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21244 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21245 + cmp %_ASM_DX,%_ASM_AX
21246 + jae 1234f
21247 + add %_ASM_DX,%_ASM_AX
21248 +1234:
21249 +#endif
21250 +
21251 4: movq -7(%_ASM_AX),%_ASM_DX
21252 xor %eax,%eax
21253 + pax_force_retaddr
21254 ret
21255 CFI_ENDPROC
21256 ENDPROC(__get_user_8)
21257 @@ -91,6 +152,7 @@ bad_get_user:
21258 CFI_STARTPROC
21259 xor %edx,%edx
21260 mov $(-EFAULT),%_ASM_AX
21261 + pax_force_retaddr
21262 ret
21263 CFI_ENDPROC
21264 END(bad_get_user)
21265 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
21266 index 374562e..a75830b 100644
21267 --- a/arch/x86/lib/insn.c
21268 +++ b/arch/x86/lib/insn.c
21269 @@ -21,6 +21,11 @@
21270 #include <linux/string.h>
21271 #include <asm/inat.h>
21272 #include <asm/insn.h>
21273 +#ifdef __KERNEL__
21274 +#include <asm/pgtable_types.h>
21275 +#else
21276 +#define ktla_ktva(addr) addr
21277 +#endif
21278
21279 /* Verify next sizeof(t) bytes can be on the same instruction */
21280 #define validate_next(t, insn, n) \
21281 @@ -49,8 +54,8 @@
21282 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
21283 {
21284 memset(insn, 0, sizeof(*insn));
21285 - insn->kaddr = kaddr;
21286 - insn->next_byte = kaddr;
21287 + insn->kaddr = ktla_ktva(kaddr);
21288 + insn->next_byte = ktla_ktva(kaddr);
21289 insn->x86_64 = x86_64 ? 1 : 0;
21290 insn->opnd_bytes = 4;
21291 if (x86_64)
21292 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
21293 index 05a95e7..326f2fa 100644
21294 --- a/arch/x86/lib/iomap_copy_64.S
21295 +++ b/arch/x86/lib/iomap_copy_64.S
21296 @@ -17,6 +17,7 @@
21297
21298 #include <linux/linkage.h>
21299 #include <asm/dwarf2.h>
21300 +#include <asm/alternative-asm.h>
21301
21302 /*
21303 * override generic version in lib/iomap_copy.c
21304 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
21305 CFI_STARTPROC
21306 movl %edx,%ecx
21307 rep movsd
21308 + pax_force_retaddr
21309 ret
21310 CFI_ENDPROC
21311 ENDPROC(__iowrite32_copy)
21312 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
21313 index efbf2a0..8893637 100644
21314 --- a/arch/x86/lib/memcpy_64.S
21315 +++ b/arch/x86/lib/memcpy_64.S
21316 @@ -34,6 +34,7 @@
21317 rep movsq
21318 movl %edx, %ecx
21319 rep movsb
21320 + pax_force_retaddr
21321 ret
21322 .Lmemcpy_e:
21323 .previous
21324 @@ -51,6 +52,7 @@
21325
21326 movl %edx, %ecx
21327 rep movsb
21328 + pax_force_retaddr
21329 ret
21330 .Lmemcpy_e_e:
21331 .previous
21332 @@ -81,13 +83,13 @@ ENTRY(memcpy)
21333 */
21334 movq 0*8(%rsi), %r8
21335 movq 1*8(%rsi), %r9
21336 - movq 2*8(%rsi), %r10
21337 + movq 2*8(%rsi), %rcx
21338 movq 3*8(%rsi), %r11
21339 leaq 4*8(%rsi), %rsi
21340
21341 movq %r8, 0*8(%rdi)
21342 movq %r9, 1*8(%rdi)
21343 - movq %r10, 2*8(%rdi)
21344 + movq %rcx, 2*8(%rdi)
21345 movq %r11, 3*8(%rdi)
21346 leaq 4*8(%rdi), %rdi
21347 jae .Lcopy_forward_loop
21348 @@ -110,12 +112,12 @@ ENTRY(memcpy)
21349 subq $0x20, %rdx
21350 movq -1*8(%rsi), %r8
21351 movq -2*8(%rsi), %r9
21352 - movq -3*8(%rsi), %r10
21353 + movq -3*8(%rsi), %rcx
21354 movq -4*8(%rsi), %r11
21355 leaq -4*8(%rsi), %rsi
21356 movq %r8, -1*8(%rdi)
21357 movq %r9, -2*8(%rdi)
21358 - movq %r10, -3*8(%rdi)
21359 + movq %rcx, -3*8(%rdi)
21360 movq %r11, -4*8(%rdi)
21361 leaq -4*8(%rdi), %rdi
21362 jae .Lcopy_backward_loop
21363 @@ -135,12 +137,13 @@ ENTRY(memcpy)
21364 */
21365 movq 0*8(%rsi), %r8
21366 movq 1*8(%rsi), %r9
21367 - movq -2*8(%rsi, %rdx), %r10
21368 + movq -2*8(%rsi, %rdx), %rcx
21369 movq -1*8(%rsi, %rdx), %r11
21370 movq %r8, 0*8(%rdi)
21371 movq %r9, 1*8(%rdi)
21372 - movq %r10, -2*8(%rdi, %rdx)
21373 + movq %rcx, -2*8(%rdi, %rdx)
21374 movq %r11, -1*8(%rdi, %rdx)
21375 + pax_force_retaddr
21376 retq
21377 .p2align 4
21378 .Lless_16bytes:
21379 @@ -153,6 +156,7 @@ ENTRY(memcpy)
21380 movq -1*8(%rsi, %rdx), %r9
21381 movq %r8, 0*8(%rdi)
21382 movq %r9, -1*8(%rdi, %rdx)
21383 + pax_force_retaddr
21384 retq
21385 .p2align 4
21386 .Lless_8bytes:
21387 @@ -166,6 +170,7 @@ ENTRY(memcpy)
21388 movl -4(%rsi, %rdx), %r8d
21389 movl %ecx, (%rdi)
21390 movl %r8d, -4(%rdi, %rdx)
21391 + pax_force_retaddr
21392 retq
21393 .p2align 4
21394 .Lless_3bytes:
21395 @@ -183,6 +188,7 @@ ENTRY(memcpy)
21396 jnz .Lloop_1
21397
21398 .Lend:
21399 + pax_force_retaddr
21400 retq
21401 CFI_ENDPROC
21402 ENDPROC(memcpy)
21403 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
21404 index ee16461..c39c199 100644
21405 --- a/arch/x86/lib/memmove_64.S
21406 +++ b/arch/x86/lib/memmove_64.S
21407 @@ -61,13 +61,13 @@ ENTRY(memmove)
21408 5:
21409 sub $0x20, %rdx
21410 movq 0*8(%rsi), %r11
21411 - movq 1*8(%rsi), %r10
21412 + movq 1*8(%rsi), %rcx
21413 movq 2*8(%rsi), %r9
21414 movq 3*8(%rsi), %r8
21415 leaq 4*8(%rsi), %rsi
21416
21417 movq %r11, 0*8(%rdi)
21418 - movq %r10, 1*8(%rdi)
21419 + movq %rcx, 1*8(%rdi)
21420 movq %r9, 2*8(%rdi)
21421 movq %r8, 3*8(%rdi)
21422 leaq 4*8(%rdi), %rdi
21423 @@ -81,10 +81,10 @@ ENTRY(memmove)
21424 4:
21425 movq %rdx, %rcx
21426 movq -8(%rsi, %rdx), %r11
21427 - lea -8(%rdi, %rdx), %r10
21428 + lea -8(%rdi, %rdx), %r9
21429 shrq $3, %rcx
21430 rep movsq
21431 - movq %r11, (%r10)
21432 + movq %r11, (%r9)
21433 jmp 13f
21434 .Lmemmove_end_forward:
21435
21436 @@ -95,14 +95,14 @@ ENTRY(memmove)
21437 7:
21438 movq %rdx, %rcx
21439 movq (%rsi), %r11
21440 - movq %rdi, %r10
21441 + movq %rdi, %r9
21442 leaq -8(%rsi, %rdx), %rsi
21443 leaq -8(%rdi, %rdx), %rdi
21444 shrq $3, %rcx
21445 std
21446 rep movsq
21447 cld
21448 - movq %r11, (%r10)
21449 + movq %r11, (%r9)
21450 jmp 13f
21451
21452 /*
21453 @@ -127,13 +127,13 @@ ENTRY(memmove)
21454 8:
21455 subq $0x20, %rdx
21456 movq -1*8(%rsi), %r11
21457 - movq -2*8(%rsi), %r10
21458 + movq -2*8(%rsi), %rcx
21459 movq -3*8(%rsi), %r9
21460 movq -4*8(%rsi), %r8
21461 leaq -4*8(%rsi), %rsi
21462
21463 movq %r11, -1*8(%rdi)
21464 - movq %r10, -2*8(%rdi)
21465 + movq %rcx, -2*8(%rdi)
21466 movq %r9, -3*8(%rdi)
21467 movq %r8, -4*8(%rdi)
21468 leaq -4*8(%rdi), %rdi
21469 @@ -151,11 +151,11 @@ ENTRY(memmove)
21470 * Move data from 16 bytes to 31 bytes.
21471 */
21472 movq 0*8(%rsi), %r11
21473 - movq 1*8(%rsi), %r10
21474 + movq 1*8(%rsi), %rcx
21475 movq -2*8(%rsi, %rdx), %r9
21476 movq -1*8(%rsi, %rdx), %r8
21477 movq %r11, 0*8(%rdi)
21478 - movq %r10, 1*8(%rdi)
21479 + movq %rcx, 1*8(%rdi)
21480 movq %r9, -2*8(%rdi, %rdx)
21481 movq %r8, -1*8(%rdi, %rdx)
21482 jmp 13f
21483 @@ -167,9 +167,9 @@ ENTRY(memmove)
21484 * Move data from 8 bytes to 15 bytes.
21485 */
21486 movq 0*8(%rsi), %r11
21487 - movq -1*8(%rsi, %rdx), %r10
21488 + movq -1*8(%rsi, %rdx), %r9
21489 movq %r11, 0*8(%rdi)
21490 - movq %r10, -1*8(%rdi, %rdx)
21491 + movq %r9, -1*8(%rdi, %rdx)
21492 jmp 13f
21493 10:
21494 cmpq $4, %rdx
21495 @@ -178,9 +178,9 @@ ENTRY(memmove)
21496 * Move data from 4 bytes to 7 bytes.
21497 */
21498 movl (%rsi), %r11d
21499 - movl -4(%rsi, %rdx), %r10d
21500 + movl -4(%rsi, %rdx), %r9d
21501 movl %r11d, (%rdi)
21502 - movl %r10d, -4(%rdi, %rdx)
21503 + movl %r9d, -4(%rdi, %rdx)
21504 jmp 13f
21505 11:
21506 cmp $2, %rdx
21507 @@ -189,9 +189,9 @@ ENTRY(memmove)
21508 * Move data from 2 bytes to 3 bytes.
21509 */
21510 movw (%rsi), %r11w
21511 - movw -2(%rsi, %rdx), %r10w
21512 + movw -2(%rsi, %rdx), %r9w
21513 movw %r11w, (%rdi)
21514 - movw %r10w, -2(%rdi, %rdx)
21515 + movw %r9w, -2(%rdi, %rdx)
21516 jmp 13f
21517 12:
21518 cmp $1, %rdx
21519 @@ -202,6 +202,7 @@ ENTRY(memmove)
21520 movb (%rsi), %r11b
21521 movb %r11b, (%rdi)
21522 13:
21523 + pax_force_retaddr
21524 retq
21525 CFI_ENDPROC
21526
21527 @@ -210,6 +211,7 @@ ENTRY(memmove)
21528 /* Forward moving data. */
21529 movq %rdx, %rcx
21530 rep movsb
21531 + pax_force_retaddr
21532 retq
21533 .Lmemmove_end_forward_efs:
21534 .previous
21535 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
21536 index 79bd454..dff325a 100644
21537 --- a/arch/x86/lib/memset_64.S
21538 +++ b/arch/x86/lib/memset_64.S
21539 @@ -31,6 +31,7 @@
21540 movl %r8d,%ecx
21541 rep stosb
21542 movq %r9,%rax
21543 + pax_force_retaddr
21544 ret
21545 .Lmemset_e:
21546 .previous
21547 @@ -53,6 +54,7 @@
21548 movl %edx,%ecx
21549 rep stosb
21550 movq %r9,%rax
21551 + pax_force_retaddr
21552 ret
21553 .Lmemset_e_e:
21554 .previous
21555 @@ -60,13 +62,13 @@
21556 ENTRY(memset)
21557 ENTRY(__memset)
21558 CFI_STARTPROC
21559 - movq %rdi,%r10
21560 movq %rdx,%r11
21561
21562 /* expand byte value */
21563 movzbl %sil,%ecx
21564 movabs $0x0101010101010101,%rax
21565 mul %rcx /* with rax, clobbers rdx */
21566 + movq %rdi,%rdx
21567
21568 /* align dst */
21569 movl %edi,%r9d
21570 @@ -120,7 +122,8 @@ ENTRY(__memset)
21571 jnz .Lloop_1
21572
21573 .Lende:
21574 - movq %r10,%rax
21575 + movq %rdx,%rax
21576 + pax_force_retaddr
21577 ret
21578
21579 CFI_RESTORE_STATE
21580 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
21581 index c9f2d9b..e7fd2c0 100644
21582 --- a/arch/x86/lib/mmx_32.c
21583 +++ b/arch/x86/lib/mmx_32.c
21584 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
21585 {
21586 void *p;
21587 int i;
21588 + unsigned long cr0;
21589
21590 if (unlikely(in_interrupt()))
21591 return __memcpy(to, from, len);
21592 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
21593 kernel_fpu_begin();
21594
21595 __asm__ __volatile__ (
21596 - "1: prefetch (%0)\n" /* This set is 28 bytes */
21597 - " prefetch 64(%0)\n"
21598 - " prefetch 128(%0)\n"
21599 - " prefetch 192(%0)\n"
21600 - " prefetch 256(%0)\n"
21601 + "1: prefetch (%1)\n" /* This set is 28 bytes */
21602 + " prefetch 64(%1)\n"
21603 + " prefetch 128(%1)\n"
21604 + " prefetch 192(%1)\n"
21605 + " prefetch 256(%1)\n"
21606 "2: \n"
21607 ".section .fixup, \"ax\"\n"
21608 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21609 + "3: \n"
21610 +
21611 +#ifdef CONFIG_PAX_KERNEXEC
21612 + " movl %%cr0, %0\n"
21613 + " movl %0, %%eax\n"
21614 + " andl $0xFFFEFFFF, %%eax\n"
21615 + " movl %%eax, %%cr0\n"
21616 +#endif
21617 +
21618 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21619 +
21620 +#ifdef CONFIG_PAX_KERNEXEC
21621 + " movl %0, %%cr0\n"
21622 +#endif
21623 +
21624 " jmp 2b\n"
21625 ".previous\n"
21626 _ASM_EXTABLE(1b, 3b)
21627 - : : "r" (from));
21628 + : "=&r" (cr0) : "r" (from) : "ax");
21629
21630 for ( ; i > 5; i--) {
21631 __asm__ __volatile__ (
21632 - "1: prefetch 320(%0)\n"
21633 - "2: movq (%0), %%mm0\n"
21634 - " movq 8(%0), %%mm1\n"
21635 - " movq 16(%0), %%mm2\n"
21636 - " movq 24(%0), %%mm3\n"
21637 - " movq %%mm0, (%1)\n"
21638 - " movq %%mm1, 8(%1)\n"
21639 - " movq %%mm2, 16(%1)\n"
21640 - " movq %%mm3, 24(%1)\n"
21641 - " movq 32(%0), %%mm0\n"
21642 - " movq 40(%0), %%mm1\n"
21643 - " movq 48(%0), %%mm2\n"
21644 - " movq 56(%0), %%mm3\n"
21645 - " movq %%mm0, 32(%1)\n"
21646 - " movq %%mm1, 40(%1)\n"
21647 - " movq %%mm2, 48(%1)\n"
21648 - " movq %%mm3, 56(%1)\n"
21649 + "1: prefetch 320(%1)\n"
21650 + "2: movq (%1), %%mm0\n"
21651 + " movq 8(%1), %%mm1\n"
21652 + " movq 16(%1), %%mm2\n"
21653 + " movq 24(%1), %%mm3\n"
21654 + " movq %%mm0, (%2)\n"
21655 + " movq %%mm1, 8(%2)\n"
21656 + " movq %%mm2, 16(%2)\n"
21657 + " movq %%mm3, 24(%2)\n"
21658 + " movq 32(%1), %%mm0\n"
21659 + " movq 40(%1), %%mm1\n"
21660 + " movq 48(%1), %%mm2\n"
21661 + " movq 56(%1), %%mm3\n"
21662 + " movq %%mm0, 32(%2)\n"
21663 + " movq %%mm1, 40(%2)\n"
21664 + " movq %%mm2, 48(%2)\n"
21665 + " movq %%mm3, 56(%2)\n"
21666 ".section .fixup, \"ax\"\n"
21667 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21668 + "3:\n"
21669 +
21670 +#ifdef CONFIG_PAX_KERNEXEC
21671 + " movl %%cr0, %0\n"
21672 + " movl %0, %%eax\n"
21673 + " andl $0xFFFEFFFF, %%eax\n"
21674 + " movl %%eax, %%cr0\n"
21675 +#endif
21676 +
21677 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21678 +
21679 +#ifdef CONFIG_PAX_KERNEXEC
21680 + " movl %0, %%cr0\n"
21681 +#endif
21682 +
21683 " jmp 2b\n"
21684 ".previous\n"
21685 _ASM_EXTABLE(1b, 3b)
21686 - : : "r" (from), "r" (to) : "memory");
21687 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
21688
21689 from += 64;
21690 to += 64;
21691 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
21692 static void fast_copy_page(void *to, void *from)
21693 {
21694 int i;
21695 + unsigned long cr0;
21696
21697 kernel_fpu_begin();
21698
21699 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
21700 * but that is for later. -AV
21701 */
21702 __asm__ __volatile__(
21703 - "1: prefetch (%0)\n"
21704 - " prefetch 64(%0)\n"
21705 - " prefetch 128(%0)\n"
21706 - " prefetch 192(%0)\n"
21707 - " prefetch 256(%0)\n"
21708 + "1: prefetch (%1)\n"
21709 + " prefetch 64(%1)\n"
21710 + " prefetch 128(%1)\n"
21711 + " prefetch 192(%1)\n"
21712 + " prefetch 256(%1)\n"
21713 "2: \n"
21714 ".section .fixup, \"ax\"\n"
21715 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21716 + "3: \n"
21717 +
21718 +#ifdef CONFIG_PAX_KERNEXEC
21719 + " movl %%cr0, %0\n"
21720 + " movl %0, %%eax\n"
21721 + " andl $0xFFFEFFFF, %%eax\n"
21722 + " movl %%eax, %%cr0\n"
21723 +#endif
21724 +
21725 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21726 +
21727 +#ifdef CONFIG_PAX_KERNEXEC
21728 + " movl %0, %%cr0\n"
21729 +#endif
21730 +
21731 " jmp 2b\n"
21732 ".previous\n"
21733 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
21734 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
21735
21736 for (i = 0; i < (4096-320)/64; i++) {
21737 __asm__ __volatile__ (
21738 - "1: prefetch 320(%0)\n"
21739 - "2: movq (%0), %%mm0\n"
21740 - " movntq %%mm0, (%1)\n"
21741 - " movq 8(%0), %%mm1\n"
21742 - " movntq %%mm1, 8(%1)\n"
21743 - " movq 16(%0), %%mm2\n"
21744 - " movntq %%mm2, 16(%1)\n"
21745 - " movq 24(%0), %%mm3\n"
21746 - " movntq %%mm3, 24(%1)\n"
21747 - " movq 32(%0), %%mm4\n"
21748 - " movntq %%mm4, 32(%1)\n"
21749 - " movq 40(%0), %%mm5\n"
21750 - " movntq %%mm5, 40(%1)\n"
21751 - " movq 48(%0), %%mm6\n"
21752 - " movntq %%mm6, 48(%1)\n"
21753 - " movq 56(%0), %%mm7\n"
21754 - " movntq %%mm7, 56(%1)\n"
21755 + "1: prefetch 320(%1)\n"
21756 + "2: movq (%1), %%mm0\n"
21757 + " movntq %%mm0, (%2)\n"
21758 + " movq 8(%1), %%mm1\n"
21759 + " movntq %%mm1, 8(%2)\n"
21760 + " movq 16(%1), %%mm2\n"
21761 + " movntq %%mm2, 16(%2)\n"
21762 + " movq 24(%1), %%mm3\n"
21763 + " movntq %%mm3, 24(%2)\n"
21764 + " movq 32(%1), %%mm4\n"
21765 + " movntq %%mm4, 32(%2)\n"
21766 + " movq 40(%1), %%mm5\n"
21767 + " movntq %%mm5, 40(%2)\n"
21768 + " movq 48(%1), %%mm6\n"
21769 + " movntq %%mm6, 48(%2)\n"
21770 + " movq 56(%1), %%mm7\n"
21771 + " movntq %%mm7, 56(%2)\n"
21772 ".section .fixup, \"ax\"\n"
21773 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21774 + "3:\n"
21775 +
21776 +#ifdef CONFIG_PAX_KERNEXEC
21777 + " movl %%cr0, %0\n"
21778 + " movl %0, %%eax\n"
21779 + " andl $0xFFFEFFFF, %%eax\n"
21780 + " movl %%eax, %%cr0\n"
21781 +#endif
21782 +
21783 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21784 +
21785 +#ifdef CONFIG_PAX_KERNEXEC
21786 + " movl %0, %%cr0\n"
21787 +#endif
21788 +
21789 " jmp 2b\n"
21790 ".previous\n"
21791 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
21792 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
21793
21794 from += 64;
21795 to += 64;
21796 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
21797 static void fast_copy_page(void *to, void *from)
21798 {
21799 int i;
21800 + unsigned long cr0;
21801
21802 kernel_fpu_begin();
21803
21804 __asm__ __volatile__ (
21805 - "1: prefetch (%0)\n"
21806 - " prefetch 64(%0)\n"
21807 - " prefetch 128(%0)\n"
21808 - " prefetch 192(%0)\n"
21809 - " prefetch 256(%0)\n"
21810 + "1: prefetch (%1)\n"
21811 + " prefetch 64(%1)\n"
21812 + " prefetch 128(%1)\n"
21813 + " prefetch 192(%1)\n"
21814 + " prefetch 256(%1)\n"
21815 "2: \n"
21816 ".section .fixup, \"ax\"\n"
21817 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21818 + "3: \n"
21819 +
21820 +#ifdef CONFIG_PAX_KERNEXEC
21821 + " movl %%cr0, %0\n"
21822 + " movl %0, %%eax\n"
21823 + " andl $0xFFFEFFFF, %%eax\n"
21824 + " movl %%eax, %%cr0\n"
21825 +#endif
21826 +
21827 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21828 +
21829 +#ifdef CONFIG_PAX_KERNEXEC
21830 + " movl %0, %%cr0\n"
21831 +#endif
21832 +
21833 " jmp 2b\n"
21834 ".previous\n"
21835 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
21836 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
21837
21838 for (i = 0; i < 4096/64; i++) {
21839 __asm__ __volatile__ (
21840 - "1: prefetch 320(%0)\n"
21841 - "2: movq (%0), %%mm0\n"
21842 - " movq 8(%0), %%mm1\n"
21843 - " movq 16(%0), %%mm2\n"
21844 - " movq 24(%0), %%mm3\n"
21845 - " movq %%mm0, (%1)\n"
21846 - " movq %%mm1, 8(%1)\n"
21847 - " movq %%mm2, 16(%1)\n"
21848 - " movq %%mm3, 24(%1)\n"
21849 - " movq 32(%0), %%mm0\n"
21850 - " movq 40(%0), %%mm1\n"
21851 - " movq 48(%0), %%mm2\n"
21852 - " movq 56(%0), %%mm3\n"
21853 - " movq %%mm0, 32(%1)\n"
21854 - " movq %%mm1, 40(%1)\n"
21855 - " movq %%mm2, 48(%1)\n"
21856 - " movq %%mm3, 56(%1)\n"
21857 + "1: prefetch 320(%1)\n"
21858 + "2: movq (%1), %%mm0\n"
21859 + " movq 8(%1), %%mm1\n"
21860 + " movq 16(%1), %%mm2\n"
21861 + " movq 24(%1), %%mm3\n"
21862 + " movq %%mm0, (%2)\n"
21863 + " movq %%mm1, 8(%2)\n"
21864 + " movq %%mm2, 16(%2)\n"
21865 + " movq %%mm3, 24(%2)\n"
21866 + " movq 32(%1), %%mm0\n"
21867 + " movq 40(%1), %%mm1\n"
21868 + " movq 48(%1), %%mm2\n"
21869 + " movq 56(%1), %%mm3\n"
21870 + " movq %%mm0, 32(%2)\n"
21871 + " movq %%mm1, 40(%2)\n"
21872 + " movq %%mm2, 48(%2)\n"
21873 + " movq %%mm3, 56(%2)\n"
21874 ".section .fixup, \"ax\"\n"
21875 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21876 + "3:\n"
21877 +
21878 +#ifdef CONFIG_PAX_KERNEXEC
21879 + " movl %%cr0, %0\n"
21880 + " movl %0, %%eax\n"
21881 + " andl $0xFFFEFFFF, %%eax\n"
21882 + " movl %%eax, %%cr0\n"
21883 +#endif
21884 +
21885 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
21886 +
21887 +#ifdef CONFIG_PAX_KERNEXEC
21888 + " movl %0, %%cr0\n"
21889 +#endif
21890 +
21891 " jmp 2b\n"
21892 ".previous\n"
21893 _ASM_EXTABLE(1b, 3b)
21894 - : : "r" (from), "r" (to) : "memory");
21895 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
21896
21897 from += 64;
21898 to += 64;
21899 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
21900 index 69fa106..adda88b 100644
21901 --- a/arch/x86/lib/msr-reg.S
21902 +++ b/arch/x86/lib/msr-reg.S
21903 @@ -3,6 +3,7 @@
21904 #include <asm/dwarf2.h>
21905 #include <asm/asm.h>
21906 #include <asm/msr.h>
21907 +#include <asm/alternative-asm.h>
21908
21909 #ifdef CONFIG_X86_64
21910 /*
21911 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
21912 CFI_STARTPROC
21913 pushq_cfi %rbx
21914 pushq_cfi %rbp
21915 - movq %rdi, %r10 /* Save pointer */
21916 + movq %rdi, %r9 /* Save pointer */
21917 xorl %r11d, %r11d /* Return value */
21918 movl (%rdi), %eax
21919 movl 4(%rdi), %ecx
21920 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
21921 movl 28(%rdi), %edi
21922 CFI_REMEMBER_STATE
21923 1: \op
21924 -2: movl %eax, (%r10)
21925 +2: movl %eax, (%r9)
21926 movl %r11d, %eax /* Return value */
21927 - movl %ecx, 4(%r10)
21928 - movl %edx, 8(%r10)
21929 - movl %ebx, 12(%r10)
21930 - movl %ebp, 20(%r10)
21931 - movl %esi, 24(%r10)
21932 - movl %edi, 28(%r10)
21933 + movl %ecx, 4(%r9)
21934 + movl %edx, 8(%r9)
21935 + movl %ebx, 12(%r9)
21936 + movl %ebp, 20(%r9)
21937 + movl %esi, 24(%r9)
21938 + movl %edi, 28(%r9)
21939 popq_cfi %rbp
21940 popq_cfi %rbx
21941 + pax_force_retaddr
21942 ret
21943 3:
21944 CFI_RESTORE_STATE
21945 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
21946 index 36b0d15..d381858 100644
21947 --- a/arch/x86/lib/putuser.S
21948 +++ b/arch/x86/lib/putuser.S
21949 @@ -15,7 +15,9 @@
21950 #include <asm/thread_info.h>
21951 #include <asm/errno.h>
21952 #include <asm/asm.h>
21953 -
21954 +#include <asm/segment.h>
21955 +#include <asm/pgtable.h>
21956 +#include <asm/alternative-asm.h>
21957
21958 /*
21959 * __put_user_X
21960 @@ -29,52 +31,119 @@
21961 * as they get called from within inline assembly.
21962 */
21963
21964 -#define ENTER CFI_STARTPROC ; \
21965 - GET_THREAD_INFO(%_ASM_BX)
21966 -#define EXIT ret ; \
21967 +#define ENTER CFI_STARTPROC
21968 +#define EXIT pax_force_retaddr; ret ; \
21969 CFI_ENDPROC
21970
21971 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21972 +#define _DEST %_ASM_CX,%_ASM_BX
21973 +#else
21974 +#define _DEST %_ASM_CX
21975 +#endif
21976 +
21977 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21978 +#define __copyuser_seg gs;
21979 +#else
21980 +#define __copyuser_seg
21981 +#endif
21982 +
21983 .text
21984 ENTRY(__put_user_1)
21985 ENTER
21986 +
21987 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21988 + GET_THREAD_INFO(%_ASM_BX)
21989 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
21990 jae bad_put_user
21991 -1: movb %al,(%_ASM_CX)
21992 +
21993 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21994 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
21995 + cmp %_ASM_BX,%_ASM_CX
21996 + jb 1234f
21997 + xor %ebx,%ebx
21998 +1234:
21999 +#endif
22000 +
22001 +#endif
22002 +
22003 +1: __copyuser_seg movb %al,(_DEST)
22004 xor %eax,%eax
22005 EXIT
22006 ENDPROC(__put_user_1)
22007
22008 ENTRY(__put_user_2)
22009 ENTER
22010 +
22011 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22012 + GET_THREAD_INFO(%_ASM_BX)
22013 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22014 sub $1,%_ASM_BX
22015 cmp %_ASM_BX,%_ASM_CX
22016 jae bad_put_user
22017 -2: movw %ax,(%_ASM_CX)
22018 +
22019 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22020 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22021 + cmp %_ASM_BX,%_ASM_CX
22022 + jb 1234f
22023 + xor %ebx,%ebx
22024 +1234:
22025 +#endif
22026 +
22027 +#endif
22028 +
22029 +2: __copyuser_seg movw %ax,(_DEST)
22030 xor %eax,%eax
22031 EXIT
22032 ENDPROC(__put_user_2)
22033
22034 ENTRY(__put_user_4)
22035 ENTER
22036 +
22037 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22038 + GET_THREAD_INFO(%_ASM_BX)
22039 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22040 sub $3,%_ASM_BX
22041 cmp %_ASM_BX,%_ASM_CX
22042 jae bad_put_user
22043 -3: movl %eax,(%_ASM_CX)
22044 +
22045 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22046 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22047 + cmp %_ASM_BX,%_ASM_CX
22048 + jb 1234f
22049 + xor %ebx,%ebx
22050 +1234:
22051 +#endif
22052 +
22053 +#endif
22054 +
22055 +3: __copyuser_seg movl %eax,(_DEST)
22056 xor %eax,%eax
22057 EXIT
22058 ENDPROC(__put_user_4)
22059
22060 ENTRY(__put_user_8)
22061 ENTER
22062 +
22063 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22064 + GET_THREAD_INFO(%_ASM_BX)
22065 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22066 sub $7,%_ASM_BX
22067 cmp %_ASM_BX,%_ASM_CX
22068 jae bad_put_user
22069 -4: mov %_ASM_AX,(%_ASM_CX)
22070 +
22071 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22072 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22073 + cmp %_ASM_BX,%_ASM_CX
22074 + jb 1234f
22075 + xor %ebx,%ebx
22076 +1234:
22077 +#endif
22078 +
22079 +#endif
22080 +
22081 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
22082 #ifdef CONFIG_X86_32
22083 -5: movl %edx,4(%_ASM_CX)
22084 +5: __copyuser_seg movl %edx,4(_DEST)
22085 #endif
22086 xor %eax,%eax
22087 EXIT
22088 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
22089 index 1cad221..de671ee 100644
22090 --- a/arch/x86/lib/rwlock.S
22091 +++ b/arch/x86/lib/rwlock.S
22092 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
22093 FRAME
22094 0: LOCK_PREFIX
22095 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22096 +
22097 +#ifdef CONFIG_PAX_REFCOUNT
22098 + jno 1234f
22099 + LOCK_PREFIX
22100 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22101 + int $4
22102 +1234:
22103 + _ASM_EXTABLE(1234b, 1234b)
22104 +#endif
22105 +
22106 1: rep; nop
22107 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
22108 jne 1b
22109 LOCK_PREFIX
22110 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22111 +
22112 +#ifdef CONFIG_PAX_REFCOUNT
22113 + jno 1234f
22114 + LOCK_PREFIX
22115 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22116 + int $4
22117 +1234:
22118 + _ASM_EXTABLE(1234b, 1234b)
22119 +#endif
22120 +
22121 jnz 0b
22122 ENDFRAME
22123 + pax_force_retaddr
22124 ret
22125 CFI_ENDPROC
22126 END(__write_lock_failed)
22127 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
22128 FRAME
22129 0: LOCK_PREFIX
22130 READ_LOCK_SIZE(inc) (%__lock_ptr)
22131 +
22132 +#ifdef CONFIG_PAX_REFCOUNT
22133 + jno 1234f
22134 + LOCK_PREFIX
22135 + READ_LOCK_SIZE(dec) (%__lock_ptr)
22136 + int $4
22137 +1234:
22138 + _ASM_EXTABLE(1234b, 1234b)
22139 +#endif
22140 +
22141 1: rep; nop
22142 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
22143 js 1b
22144 LOCK_PREFIX
22145 READ_LOCK_SIZE(dec) (%__lock_ptr)
22146 +
22147 +#ifdef CONFIG_PAX_REFCOUNT
22148 + jno 1234f
22149 + LOCK_PREFIX
22150 + READ_LOCK_SIZE(inc) (%__lock_ptr)
22151 + int $4
22152 +1234:
22153 + _ASM_EXTABLE(1234b, 1234b)
22154 +#endif
22155 +
22156 js 0b
22157 ENDFRAME
22158 + pax_force_retaddr
22159 ret
22160 CFI_ENDPROC
22161 END(__read_lock_failed)
22162 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
22163 index 5dff5f0..cadebf4 100644
22164 --- a/arch/x86/lib/rwsem.S
22165 +++ b/arch/x86/lib/rwsem.S
22166 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
22167 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22168 CFI_RESTORE __ASM_REG(dx)
22169 restore_common_regs
22170 + pax_force_retaddr
22171 ret
22172 CFI_ENDPROC
22173 ENDPROC(call_rwsem_down_read_failed)
22174 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
22175 movq %rax,%rdi
22176 call rwsem_down_write_failed
22177 restore_common_regs
22178 + pax_force_retaddr
22179 ret
22180 CFI_ENDPROC
22181 ENDPROC(call_rwsem_down_write_failed)
22182 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
22183 movq %rax,%rdi
22184 call rwsem_wake
22185 restore_common_regs
22186 -1: ret
22187 +1: pax_force_retaddr
22188 + ret
22189 CFI_ENDPROC
22190 ENDPROC(call_rwsem_wake)
22191
22192 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
22193 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22194 CFI_RESTORE __ASM_REG(dx)
22195 restore_common_regs
22196 + pax_force_retaddr
22197 ret
22198 CFI_ENDPROC
22199 ENDPROC(call_rwsem_downgrade_wake)
22200 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
22201 index a63efd6..ccecad8 100644
22202 --- a/arch/x86/lib/thunk_64.S
22203 +++ b/arch/x86/lib/thunk_64.S
22204 @@ -8,6 +8,7 @@
22205 #include <linux/linkage.h>
22206 #include <asm/dwarf2.h>
22207 #include <asm/calling.h>
22208 +#include <asm/alternative-asm.h>
22209
22210 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
22211 .macro THUNK name, func, put_ret_addr_in_rdi=0
22212 @@ -41,5 +42,6 @@
22213 SAVE_ARGS
22214 restore:
22215 RESTORE_ARGS
22216 + pax_force_retaddr
22217 ret
22218 CFI_ENDPROC
22219 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
22220 index e218d5d..35679b4 100644
22221 --- a/arch/x86/lib/usercopy_32.c
22222 +++ b/arch/x86/lib/usercopy_32.c
22223 @@ -43,7 +43,7 @@ do { \
22224 __asm__ __volatile__( \
22225 " testl %1,%1\n" \
22226 " jz 2f\n" \
22227 - "0: lodsb\n" \
22228 + "0: "__copyuser_seg"lodsb\n" \
22229 " stosb\n" \
22230 " testb %%al,%%al\n" \
22231 " jz 1f\n" \
22232 @@ -128,10 +128,12 @@ do { \
22233 int __d0; \
22234 might_fault(); \
22235 __asm__ __volatile__( \
22236 + __COPYUSER_SET_ES \
22237 "0: rep; stosl\n" \
22238 " movl %2,%0\n" \
22239 "1: rep; stosb\n" \
22240 "2:\n" \
22241 + __COPYUSER_RESTORE_ES \
22242 ".section .fixup,\"ax\"\n" \
22243 "3: lea 0(%2,%0,4),%0\n" \
22244 " jmp 2b\n" \
22245 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
22246 might_fault();
22247
22248 __asm__ __volatile__(
22249 + __COPYUSER_SET_ES
22250 " testl %0, %0\n"
22251 " jz 3f\n"
22252 " andl %0,%%ecx\n"
22253 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
22254 " subl %%ecx,%0\n"
22255 " addl %0,%%eax\n"
22256 "1:\n"
22257 + __COPYUSER_RESTORE_ES
22258 ".section .fixup,\"ax\"\n"
22259 "2: xorl %%eax,%%eax\n"
22260 " jmp 1b\n"
22261 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
22262
22263 #ifdef CONFIG_X86_INTEL_USERCOPY
22264 static unsigned long
22265 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
22266 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
22267 {
22268 int d0, d1;
22269 __asm__ __volatile__(
22270 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22271 " .align 2,0x90\n"
22272 "3: movl 0(%4), %%eax\n"
22273 "4: movl 4(%4), %%edx\n"
22274 - "5: movl %%eax, 0(%3)\n"
22275 - "6: movl %%edx, 4(%3)\n"
22276 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
22277 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
22278 "7: movl 8(%4), %%eax\n"
22279 "8: movl 12(%4),%%edx\n"
22280 - "9: movl %%eax, 8(%3)\n"
22281 - "10: movl %%edx, 12(%3)\n"
22282 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
22283 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
22284 "11: movl 16(%4), %%eax\n"
22285 "12: movl 20(%4), %%edx\n"
22286 - "13: movl %%eax, 16(%3)\n"
22287 - "14: movl %%edx, 20(%3)\n"
22288 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
22289 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
22290 "15: movl 24(%4), %%eax\n"
22291 "16: movl 28(%4), %%edx\n"
22292 - "17: movl %%eax, 24(%3)\n"
22293 - "18: movl %%edx, 28(%3)\n"
22294 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
22295 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
22296 "19: movl 32(%4), %%eax\n"
22297 "20: movl 36(%4), %%edx\n"
22298 - "21: movl %%eax, 32(%3)\n"
22299 - "22: movl %%edx, 36(%3)\n"
22300 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
22301 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
22302 "23: movl 40(%4), %%eax\n"
22303 "24: movl 44(%4), %%edx\n"
22304 - "25: movl %%eax, 40(%3)\n"
22305 - "26: movl %%edx, 44(%3)\n"
22306 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
22307 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
22308 "27: movl 48(%4), %%eax\n"
22309 "28: movl 52(%4), %%edx\n"
22310 - "29: movl %%eax, 48(%3)\n"
22311 - "30: movl %%edx, 52(%3)\n"
22312 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
22313 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
22314 "31: movl 56(%4), %%eax\n"
22315 "32: movl 60(%4), %%edx\n"
22316 - "33: movl %%eax, 56(%3)\n"
22317 - "34: movl %%edx, 60(%3)\n"
22318 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
22319 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
22320 " addl $-64, %0\n"
22321 " addl $64, %4\n"
22322 " addl $64, %3\n"
22323 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22324 " shrl $2, %0\n"
22325 " andl $3, %%eax\n"
22326 " cld\n"
22327 + __COPYUSER_SET_ES
22328 "99: rep; movsl\n"
22329 "36: movl %%eax, %0\n"
22330 "37: rep; movsb\n"
22331 "100:\n"
22332 + __COPYUSER_RESTORE_ES
22333 + ".section .fixup,\"ax\"\n"
22334 + "101: lea 0(%%eax,%0,4),%0\n"
22335 + " jmp 100b\n"
22336 + ".previous\n"
22337 + ".section __ex_table,\"a\"\n"
22338 + " .align 4\n"
22339 + " .long 1b,100b\n"
22340 + " .long 2b,100b\n"
22341 + " .long 3b,100b\n"
22342 + " .long 4b,100b\n"
22343 + " .long 5b,100b\n"
22344 + " .long 6b,100b\n"
22345 + " .long 7b,100b\n"
22346 + " .long 8b,100b\n"
22347 + " .long 9b,100b\n"
22348 + " .long 10b,100b\n"
22349 + " .long 11b,100b\n"
22350 + " .long 12b,100b\n"
22351 + " .long 13b,100b\n"
22352 + " .long 14b,100b\n"
22353 + " .long 15b,100b\n"
22354 + " .long 16b,100b\n"
22355 + " .long 17b,100b\n"
22356 + " .long 18b,100b\n"
22357 + " .long 19b,100b\n"
22358 + " .long 20b,100b\n"
22359 + " .long 21b,100b\n"
22360 + " .long 22b,100b\n"
22361 + " .long 23b,100b\n"
22362 + " .long 24b,100b\n"
22363 + " .long 25b,100b\n"
22364 + " .long 26b,100b\n"
22365 + " .long 27b,100b\n"
22366 + " .long 28b,100b\n"
22367 + " .long 29b,100b\n"
22368 + " .long 30b,100b\n"
22369 + " .long 31b,100b\n"
22370 + " .long 32b,100b\n"
22371 + " .long 33b,100b\n"
22372 + " .long 34b,100b\n"
22373 + " .long 35b,100b\n"
22374 + " .long 36b,100b\n"
22375 + " .long 37b,100b\n"
22376 + " .long 99b,101b\n"
22377 + ".previous"
22378 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
22379 + : "1"(to), "2"(from), "0"(size)
22380 + : "eax", "edx", "memory");
22381 + return size;
22382 +}
22383 +
22384 +static unsigned long
22385 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
22386 +{
22387 + int d0, d1;
22388 + __asm__ __volatile__(
22389 + " .align 2,0x90\n"
22390 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
22391 + " cmpl $67, %0\n"
22392 + " jbe 3f\n"
22393 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
22394 + " .align 2,0x90\n"
22395 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
22396 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
22397 + "5: movl %%eax, 0(%3)\n"
22398 + "6: movl %%edx, 4(%3)\n"
22399 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
22400 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
22401 + "9: movl %%eax, 8(%3)\n"
22402 + "10: movl %%edx, 12(%3)\n"
22403 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
22404 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
22405 + "13: movl %%eax, 16(%3)\n"
22406 + "14: movl %%edx, 20(%3)\n"
22407 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
22408 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
22409 + "17: movl %%eax, 24(%3)\n"
22410 + "18: movl %%edx, 28(%3)\n"
22411 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
22412 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
22413 + "21: movl %%eax, 32(%3)\n"
22414 + "22: movl %%edx, 36(%3)\n"
22415 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
22416 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
22417 + "25: movl %%eax, 40(%3)\n"
22418 + "26: movl %%edx, 44(%3)\n"
22419 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
22420 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
22421 + "29: movl %%eax, 48(%3)\n"
22422 + "30: movl %%edx, 52(%3)\n"
22423 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
22424 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
22425 + "33: movl %%eax, 56(%3)\n"
22426 + "34: movl %%edx, 60(%3)\n"
22427 + " addl $-64, %0\n"
22428 + " addl $64, %4\n"
22429 + " addl $64, %3\n"
22430 + " cmpl $63, %0\n"
22431 + " ja 1b\n"
22432 + "35: movl %0, %%eax\n"
22433 + " shrl $2, %0\n"
22434 + " andl $3, %%eax\n"
22435 + " cld\n"
22436 + "99: rep; "__copyuser_seg" movsl\n"
22437 + "36: movl %%eax, %0\n"
22438 + "37: rep; "__copyuser_seg" movsb\n"
22439 + "100:\n"
22440 ".section .fixup,\"ax\"\n"
22441 "101: lea 0(%%eax,%0,4),%0\n"
22442 " jmp 100b\n"
22443 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
22444 int d0, d1;
22445 __asm__ __volatile__(
22446 " .align 2,0x90\n"
22447 - "0: movl 32(%4), %%eax\n"
22448 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
22449 " cmpl $67, %0\n"
22450 " jbe 2f\n"
22451 - "1: movl 64(%4), %%eax\n"
22452 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
22453 " .align 2,0x90\n"
22454 - "2: movl 0(%4), %%eax\n"
22455 - "21: movl 4(%4), %%edx\n"
22456 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
22457 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
22458 " movl %%eax, 0(%3)\n"
22459 " movl %%edx, 4(%3)\n"
22460 - "3: movl 8(%4), %%eax\n"
22461 - "31: movl 12(%4),%%edx\n"
22462 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
22463 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
22464 " movl %%eax, 8(%3)\n"
22465 " movl %%edx, 12(%3)\n"
22466 - "4: movl 16(%4), %%eax\n"
22467 - "41: movl 20(%4), %%edx\n"
22468 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
22469 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
22470 " movl %%eax, 16(%3)\n"
22471 " movl %%edx, 20(%3)\n"
22472 - "10: movl 24(%4), %%eax\n"
22473 - "51: movl 28(%4), %%edx\n"
22474 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
22475 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
22476 " movl %%eax, 24(%3)\n"
22477 " movl %%edx, 28(%3)\n"
22478 - "11: movl 32(%4), %%eax\n"
22479 - "61: movl 36(%4), %%edx\n"
22480 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
22481 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
22482 " movl %%eax, 32(%3)\n"
22483 " movl %%edx, 36(%3)\n"
22484 - "12: movl 40(%4), %%eax\n"
22485 - "71: movl 44(%4), %%edx\n"
22486 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
22487 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
22488 " movl %%eax, 40(%3)\n"
22489 " movl %%edx, 44(%3)\n"
22490 - "13: movl 48(%4), %%eax\n"
22491 - "81: movl 52(%4), %%edx\n"
22492 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
22493 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
22494 " movl %%eax, 48(%3)\n"
22495 " movl %%edx, 52(%3)\n"
22496 - "14: movl 56(%4), %%eax\n"
22497 - "91: movl 60(%4), %%edx\n"
22498 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
22499 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
22500 " movl %%eax, 56(%3)\n"
22501 " movl %%edx, 60(%3)\n"
22502 " addl $-64, %0\n"
22503 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
22504 " shrl $2, %0\n"
22505 " andl $3, %%eax\n"
22506 " cld\n"
22507 - "6: rep; movsl\n"
22508 + "6: rep; "__copyuser_seg" movsl\n"
22509 " movl %%eax,%0\n"
22510 - "7: rep; movsb\n"
22511 + "7: rep; "__copyuser_seg" movsb\n"
22512 "8:\n"
22513 ".section .fixup,\"ax\"\n"
22514 "9: lea 0(%%eax,%0,4),%0\n"
22515 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
22516
22517 __asm__ __volatile__(
22518 " .align 2,0x90\n"
22519 - "0: movl 32(%4), %%eax\n"
22520 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
22521 " cmpl $67, %0\n"
22522 " jbe 2f\n"
22523 - "1: movl 64(%4), %%eax\n"
22524 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
22525 " .align 2,0x90\n"
22526 - "2: movl 0(%4), %%eax\n"
22527 - "21: movl 4(%4), %%edx\n"
22528 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
22529 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
22530 " movnti %%eax, 0(%3)\n"
22531 " movnti %%edx, 4(%3)\n"
22532 - "3: movl 8(%4), %%eax\n"
22533 - "31: movl 12(%4),%%edx\n"
22534 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
22535 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
22536 " movnti %%eax, 8(%3)\n"
22537 " movnti %%edx, 12(%3)\n"
22538 - "4: movl 16(%4), %%eax\n"
22539 - "41: movl 20(%4), %%edx\n"
22540 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
22541 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
22542 " movnti %%eax, 16(%3)\n"
22543 " movnti %%edx, 20(%3)\n"
22544 - "10: movl 24(%4), %%eax\n"
22545 - "51: movl 28(%4), %%edx\n"
22546 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
22547 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
22548 " movnti %%eax, 24(%3)\n"
22549 " movnti %%edx, 28(%3)\n"
22550 - "11: movl 32(%4), %%eax\n"
22551 - "61: movl 36(%4), %%edx\n"
22552 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
22553 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
22554 " movnti %%eax, 32(%3)\n"
22555 " movnti %%edx, 36(%3)\n"
22556 - "12: movl 40(%4), %%eax\n"
22557 - "71: movl 44(%4), %%edx\n"
22558 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
22559 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
22560 " movnti %%eax, 40(%3)\n"
22561 " movnti %%edx, 44(%3)\n"
22562 - "13: movl 48(%4), %%eax\n"
22563 - "81: movl 52(%4), %%edx\n"
22564 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
22565 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
22566 " movnti %%eax, 48(%3)\n"
22567 " movnti %%edx, 52(%3)\n"
22568 - "14: movl 56(%4), %%eax\n"
22569 - "91: movl 60(%4), %%edx\n"
22570 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
22571 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
22572 " movnti %%eax, 56(%3)\n"
22573 " movnti %%edx, 60(%3)\n"
22574 " addl $-64, %0\n"
22575 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
22576 " shrl $2, %0\n"
22577 " andl $3, %%eax\n"
22578 " cld\n"
22579 - "6: rep; movsl\n"
22580 + "6: rep; "__copyuser_seg" movsl\n"
22581 " movl %%eax,%0\n"
22582 - "7: rep; movsb\n"
22583 + "7: rep; "__copyuser_seg" movsb\n"
22584 "8:\n"
22585 ".section .fixup,\"ax\"\n"
22586 "9: lea 0(%%eax,%0,4),%0\n"
22587 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
22588
22589 __asm__ __volatile__(
22590 " .align 2,0x90\n"
22591 - "0: movl 32(%4), %%eax\n"
22592 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
22593 " cmpl $67, %0\n"
22594 " jbe 2f\n"
22595 - "1: movl 64(%4), %%eax\n"
22596 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
22597 " .align 2,0x90\n"
22598 - "2: movl 0(%4), %%eax\n"
22599 - "21: movl 4(%4), %%edx\n"
22600 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
22601 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
22602 " movnti %%eax, 0(%3)\n"
22603 " movnti %%edx, 4(%3)\n"
22604 - "3: movl 8(%4), %%eax\n"
22605 - "31: movl 12(%4),%%edx\n"
22606 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
22607 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
22608 " movnti %%eax, 8(%3)\n"
22609 " movnti %%edx, 12(%3)\n"
22610 - "4: movl 16(%4), %%eax\n"
22611 - "41: movl 20(%4), %%edx\n"
22612 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
22613 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
22614 " movnti %%eax, 16(%3)\n"
22615 " movnti %%edx, 20(%3)\n"
22616 - "10: movl 24(%4), %%eax\n"
22617 - "51: movl 28(%4), %%edx\n"
22618 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
22619 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
22620 " movnti %%eax, 24(%3)\n"
22621 " movnti %%edx, 28(%3)\n"
22622 - "11: movl 32(%4), %%eax\n"
22623 - "61: movl 36(%4), %%edx\n"
22624 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
22625 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
22626 " movnti %%eax, 32(%3)\n"
22627 " movnti %%edx, 36(%3)\n"
22628 - "12: movl 40(%4), %%eax\n"
22629 - "71: movl 44(%4), %%edx\n"
22630 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
22631 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
22632 " movnti %%eax, 40(%3)\n"
22633 " movnti %%edx, 44(%3)\n"
22634 - "13: movl 48(%4), %%eax\n"
22635 - "81: movl 52(%4), %%edx\n"
22636 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
22637 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
22638 " movnti %%eax, 48(%3)\n"
22639 " movnti %%edx, 52(%3)\n"
22640 - "14: movl 56(%4), %%eax\n"
22641 - "91: movl 60(%4), %%edx\n"
22642 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
22643 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
22644 " movnti %%eax, 56(%3)\n"
22645 " movnti %%edx, 60(%3)\n"
22646 " addl $-64, %0\n"
22647 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
22648 " shrl $2, %0\n"
22649 " andl $3, %%eax\n"
22650 " cld\n"
22651 - "6: rep; movsl\n"
22652 + "6: rep; "__copyuser_seg" movsl\n"
22653 " movl %%eax,%0\n"
22654 - "7: rep; movsb\n"
22655 + "7: rep; "__copyuser_seg" movsb\n"
22656 "8:\n"
22657 ".section .fixup,\"ax\"\n"
22658 "9: lea 0(%%eax,%0,4),%0\n"
22659 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
22660 */
22661 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
22662 unsigned long size);
22663 -unsigned long __copy_user_intel(void __user *to, const void *from,
22664 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
22665 + unsigned long size);
22666 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
22667 unsigned long size);
22668 unsigned long __copy_user_zeroing_intel_nocache(void *to,
22669 const void __user *from, unsigned long size);
22670 #endif /* CONFIG_X86_INTEL_USERCOPY */
22671
22672 /* Generic arbitrary sized copy. */
22673 -#define __copy_user(to, from, size) \
22674 +#define __copy_user(to, from, size, prefix, set, restore) \
22675 do { \
22676 int __d0, __d1, __d2; \
22677 __asm__ __volatile__( \
22678 + set \
22679 " cmp $7,%0\n" \
22680 " jbe 1f\n" \
22681 " movl %1,%0\n" \
22682 " negl %0\n" \
22683 " andl $7,%0\n" \
22684 " subl %0,%3\n" \
22685 - "4: rep; movsb\n" \
22686 + "4: rep; "prefix"movsb\n" \
22687 " movl %3,%0\n" \
22688 " shrl $2,%0\n" \
22689 " andl $3,%3\n" \
22690 " .align 2,0x90\n" \
22691 - "0: rep; movsl\n" \
22692 + "0: rep; "prefix"movsl\n" \
22693 " movl %3,%0\n" \
22694 - "1: rep; movsb\n" \
22695 + "1: rep; "prefix"movsb\n" \
22696 "2:\n" \
22697 + restore \
22698 ".section .fixup,\"ax\"\n" \
22699 "5: addl %3,%0\n" \
22700 " jmp 2b\n" \
22701 @@ -682,14 +799,14 @@ do { \
22702 " negl %0\n" \
22703 " andl $7,%0\n" \
22704 " subl %0,%3\n" \
22705 - "4: rep; movsb\n" \
22706 + "4: rep; "__copyuser_seg"movsb\n" \
22707 " movl %3,%0\n" \
22708 " shrl $2,%0\n" \
22709 " andl $3,%3\n" \
22710 " .align 2,0x90\n" \
22711 - "0: rep; movsl\n" \
22712 + "0: rep; "__copyuser_seg"movsl\n" \
22713 " movl %3,%0\n" \
22714 - "1: rep; movsb\n" \
22715 + "1: rep; "__copyuser_seg"movsb\n" \
22716 "2:\n" \
22717 ".section .fixup,\"ax\"\n" \
22718 "5: addl %3,%0\n" \
22719 @@ -775,9 +892,9 @@ survive:
22720 }
22721 #endif
22722 if (movsl_is_ok(to, from, n))
22723 - __copy_user(to, from, n);
22724 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
22725 else
22726 - n = __copy_user_intel(to, from, n);
22727 + n = __generic_copy_to_user_intel(to, from, n);
22728 return n;
22729 }
22730 EXPORT_SYMBOL(__copy_to_user_ll);
22731 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
22732 unsigned long n)
22733 {
22734 if (movsl_is_ok(to, from, n))
22735 - __copy_user(to, from, n);
22736 + __copy_user(to, from, n, __copyuser_seg, "", "");
22737 else
22738 - n = __copy_user_intel((void __user *)to,
22739 - (const void *)from, n);
22740 + n = __generic_copy_from_user_intel(to, from, n);
22741 return n;
22742 }
22743 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
22744 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
22745 if (n > 64 && cpu_has_xmm2)
22746 n = __copy_user_intel_nocache(to, from, n);
22747 else
22748 - __copy_user(to, from, n);
22749 + __copy_user(to, from, n, __copyuser_seg, "", "");
22750 #else
22751 - __copy_user(to, from, n);
22752 + __copy_user(to, from, n, __copyuser_seg, "", "");
22753 #endif
22754 return n;
22755 }
22756 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
22757
22758 -/**
22759 - * copy_to_user: - Copy a block of data into user space.
22760 - * @to: Destination address, in user space.
22761 - * @from: Source address, in kernel space.
22762 - * @n: Number of bytes to copy.
22763 - *
22764 - * Context: User context only. This function may sleep.
22765 - *
22766 - * Copy data from kernel space to user space.
22767 - *
22768 - * Returns number of bytes that could not be copied.
22769 - * On success, this will be zero.
22770 - */
22771 -unsigned long
22772 -copy_to_user(void __user *to, const void *from, unsigned long n)
22773 -{
22774 - if (access_ok(VERIFY_WRITE, to, n))
22775 - n = __copy_to_user(to, from, n);
22776 - return n;
22777 -}
22778 -EXPORT_SYMBOL(copy_to_user);
22779 -
22780 -/**
22781 - * copy_from_user: - Copy a block of data from user space.
22782 - * @to: Destination address, in kernel space.
22783 - * @from: Source address, in user space.
22784 - * @n: Number of bytes to copy.
22785 - *
22786 - * Context: User context only. This function may sleep.
22787 - *
22788 - * Copy data from user space to kernel space.
22789 - *
22790 - * Returns number of bytes that could not be copied.
22791 - * On success, this will be zero.
22792 - *
22793 - * If some data could not be copied, this function will pad the copied
22794 - * data to the requested size using zero bytes.
22795 - */
22796 -unsigned long
22797 -_copy_from_user(void *to, const void __user *from, unsigned long n)
22798 -{
22799 - if (access_ok(VERIFY_READ, from, n))
22800 - n = __copy_from_user(to, from, n);
22801 - else
22802 - memset(to, 0, n);
22803 - return n;
22804 -}
22805 -EXPORT_SYMBOL(_copy_from_user);
22806 -
22807 void copy_from_user_overflow(void)
22808 {
22809 WARN(1, "Buffer overflow detected!\n");
22810 }
22811 EXPORT_SYMBOL(copy_from_user_overflow);
22812 +
22813 +void copy_to_user_overflow(void)
22814 +{
22815 + WARN(1, "Buffer overflow detected!\n");
22816 +}
22817 +EXPORT_SYMBOL(copy_to_user_overflow);
22818 +
22819 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22820 +void __set_fs(mm_segment_t x)
22821 +{
22822 + switch (x.seg) {
22823 + case 0:
22824 + loadsegment(gs, 0);
22825 + break;
22826 + case TASK_SIZE_MAX:
22827 + loadsegment(gs, __USER_DS);
22828 + break;
22829 + case -1UL:
22830 + loadsegment(gs, __KERNEL_DS);
22831 + break;
22832 + default:
22833 + BUG();
22834 + }
22835 + return;
22836 +}
22837 +EXPORT_SYMBOL(__set_fs);
22838 +
22839 +void set_fs(mm_segment_t x)
22840 +{
22841 + current_thread_info()->addr_limit = x;
22842 + __set_fs(x);
22843 +}
22844 +EXPORT_SYMBOL(set_fs);
22845 +#endif
22846 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
22847 index b7c2849..8633ad8 100644
22848 --- a/arch/x86/lib/usercopy_64.c
22849 +++ b/arch/x86/lib/usercopy_64.c
22850 @@ -42,6 +42,12 @@ long
22851 __strncpy_from_user(char *dst, const char __user *src, long count)
22852 {
22853 long res;
22854 +
22855 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22856 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
22857 + src += PAX_USER_SHADOW_BASE;
22858 +#endif
22859 +
22860 __do_strncpy_from_user(dst, src, count, res);
22861 return res;
22862 }
22863 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
22864 {
22865 long __d0;
22866 might_fault();
22867 +
22868 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22869 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
22870 + addr += PAX_USER_SHADOW_BASE;
22871 +#endif
22872 +
22873 /* no memory constraint because it doesn't change any memory gcc knows
22874 about */
22875 asm volatile(
22876 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
22877 }
22878 EXPORT_SYMBOL(strlen_user);
22879
22880 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
22881 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
22882 {
22883 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
22884 - return copy_user_generic((__force void *)to, (__force void *)from, len);
22885 - }
22886 - return len;
22887 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
22888 +
22889 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22890 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
22891 + to += PAX_USER_SHADOW_BASE;
22892 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
22893 + from += PAX_USER_SHADOW_BASE;
22894 +#endif
22895 +
22896 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
22897 + }
22898 + return len;
22899 }
22900 EXPORT_SYMBOL(copy_in_user);
22901
22902 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
22903 * it is not necessary to optimize tail handling.
22904 */
22905 unsigned long
22906 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
22907 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
22908 {
22909 char c;
22910 unsigned zero_len;
22911 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
22912 index d0474ad..36e9257 100644
22913 --- a/arch/x86/mm/extable.c
22914 +++ b/arch/x86/mm/extable.c
22915 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
22916 const struct exception_table_entry *fixup;
22917
22918 #ifdef CONFIG_PNPBIOS
22919 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
22920 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
22921 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
22922 extern u32 pnp_bios_is_utter_crap;
22923 pnp_bios_is_utter_crap = 1;
22924 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
22925 index 5db0490..2ddce45 100644
22926 --- a/arch/x86/mm/fault.c
22927 +++ b/arch/x86/mm/fault.c
22928 @@ -13,11 +13,18 @@
22929 #include <linux/perf_event.h> /* perf_sw_event */
22930 #include <linux/hugetlb.h> /* hstate_index_to_shift */
22931 #include <linux/prefetch.h> /* prefetchw */
22932 +#include <linux/unistd.h>
22933 +#include <linux/compiler.h>
22934
22935 #include <asm/traps.h> /* dotraplinkage, ... */
22936 #include <asm/pgalloc.h> /* pgd_*(), ... */
22937 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
22938 #include <asm/fixmap.h> /* VSYSCALL_START */
22939 +#include <asm/tlbflush.h>
22940 +
22941 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22942 +#include <asm/stacktrace.h>
22943 +#endif
22944
22945 /*
22946 * Page fault error code bits:
22947 @@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
22948 int ret = 0;
22949
22950 /* kprobe_running() needs smp_processor_id() */
22951 - if (kprobes_built_in() && !user_mode_vm(regs)) {
22952 + if (kprobes_built_in() && !user_mode(regs)) {
22953 preempt_disable();
22954 if (kprobe_running() && kprobe_fault_handler(regs, 14))
22955 ret = 1;
22956 @@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
22957 return !instr_lo || (instr_lo>>1) == 1;
22958 case 0x00:
22959 /* Prefetch instruction is 0x0F0D or 0x0F18 */
22960 - if (probe_kernel_address(instr, opcode))
22961 + if (user_mode(regs)) {
22962 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
22963 + return 0;
22964 + } else if (probe_kernel_address(instr, opcode))
22965 return 0;
22966
22967 *prefetch = (instr_lo == 0xF) &&
22968 @@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
22969 while (instr < max_instr) {
22970 unsigned char opcode;
22971
22972 - if (probe_kernel_address(instr, opcode))
22973 + if (user_mode(regs)) {
22974 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
22975 + break;
22976 + } else if (probe_kernel_address(instr, opcode))
22977 break;
22978
22979 instr++;
22980 @@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
22981 force_sig_info(si_signo, &info, tsk);
22982 }
22983
22984 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22985 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
22986 +#endif
22987 +
22988 +#ifdef CONFIG_PAX_EMUTRAMP
22989 +static int pax_handle_fetch_fault(struct pt_regs *regs);
22990 +#endif
22991 +
22992 +#ifdef CONFIG_PAX_PAGEEXEC
22993 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
22994 +{
22995 + pgd_t *pgd;
22996 + pud_t *pud;
22997 + pmd_t *pmd;
22998 +
22999 + pgd = pgd_offset(mm, address);
23000 + if (!pgd_present(*pgd))
23001 + return NULL;
23002 + pud = pud_offset(pgd, address);
23003 + if (!pud_present(*pud))
23004 + return NULL;
23005 + pmd = pmd_offset(pud, address);
23006 + if (!pmd_present(*pmd))
23007 + return NULL;
23008 + return pmd;
23009 +}
23010 +#endif
23011 +
23012 DEFINE_SPINLOCK(pgd_lock);
23013 LIST_HEAD(pgd_list);
23014
23015 @@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
23016 for (address = VMALLOC_START & PMD_MASK;
23017 address >= TASK_SIZE && address < FIXADDR_TOP;
23018 address += PMD_SIZE) {
23019 +
23020 +#ifdef CONFIG_PAX_PER_CPU_PGD
23021 + unsigned long cpu;
23022 +#else
23023 struct page *page;
23024 +#endif
23025
23026 spin_lock(&pgd_lock);
23027 +
23028 +#ifdef CONFIG_PAX_PER_CPU_PGD
23029 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23030 + pgd_t *pgd = get_cpu_pgd(cpu);
23031 + pmd_t *ret;
23032 +#else
23033 list_for_each_entry(page, &pgd_list, lru) {
23034 + pgd_t *pgd = page_address(page);
23035 spinlock_t *pgt_lock;
23036 pmd_t *ret;
23037
23038 @@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
23039 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23040
23041 spin_lock(pgt_lock);
23042 - ret = vmalloc_sync_one(page_address(page), address);
23043 +#endif
23044 +
23045 + ret = vmalloc_sync_one(pgd, address);
23046 +
23047 +#ifndef CONFIG_PAX_PER_CPU_PGD
23048 spin_unlock(pgt_lock);
23049 +#endif
23050
23051 if (!ret)
23052 break;
23053 @@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23054 * an interrupt in the middle of a task switch..
23055 */
23056 pgd_paddr = read_cr3();
23057 +
23058 +#ifdef CONFIG_PAX_PER_CPU_PGD
23059 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23060 +#endif
23061 +
23062 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23063 if (!pmd_k)
23064 return -1;
23065 @@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23066 * happen within a race in page table update. In the later
23067 * case just flush:
23068 */
23069 +
23070 +#ifdef CONFIG_PAX_PER_CPU_PGD
23071 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23072 + pgd = pgd_offset_cpu(smp_processor_id(), address);
23073 +#else
23074 pgd = pgd_offset(current->active_mm, address);
23075 +#endif
23076 +
23077 pgd_ref = pgd_offset_k(address);
23078 if (pgd_none(*pgd_ref))
23079 return -1;
23080 @@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23081 static int is_errata100(struct pt_regs *regs, unsigned long address)
23082 {
23083 #ifdef CONFIG_X86_64
23084 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23085 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23086 return 1;
23087 #endif
23088 return 0;
23089 @@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23090 }
23091
23092 static const char nx_warning[] = KERN_CRIT
23093 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23094 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23095
23096 static void
23097 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23098 @@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23099 if (!oops_may_print())
23100 return;
23101
23102 - if (error_code & PF_INSTR) {
23103 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
23104 unsigned int level;
23105
23106 pte_t *pte = lookup_address(address, &level);
23107
23108 if (pte && pte_present(*pte) && !pte_exec(*pte))
23109 - printk(nx_warning, current_uid());
23110 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
23111 }
23112
23113 +#ifdef CONFIG_PAX_KERNEXEC
23114 + if (init_mm.start_code <= address && address < init_mm.end_code) {
23115 + if (current->signal->curr_ip)
23116 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23117 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
23118 + else
23119 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23120 + current->comm, task_pid_nr(current), current_uid(), current_euid());
23121 + }
23122 +#endif
23123 +
23124 printk(KERN_ALERT "BUG: unable to handle kernel ");
23125 if (address < PAGE_SIZE)
23126 printk(KERN_CONT "NULL pointer dereference");
23127 @@ -739,6 +820,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23128 }
23129 #endif
23130
23131 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23132 + if (pax_is_fetch_fault(regs, error_code, address)) {
23133 +
23134 +#ifdef CONFIG_PAX_EMUTRAMP
23135 + switch (pax_handle_fetch_fault(regs)) {
23136 + case 2:
23137 + return;
23138 + }
23139 +#endif
23140 +
23141 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23142 + do_group_exit(SIGKILL);
23143 + }
23144 +#endif
23145 +
23146 if (unlikely(show_unhandled_signals))
23147 show_signal_msg(regs, error_code, address, tsk);
23148
23149 @@ -835,7 +931,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
23150 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
23151 printk(KERN_ERR
23152 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
23153 - tsk->comm, tsk->pid, address);
23154 + tsk->comm, task_pid_nr(tsk), address);
23155 code = BUS_MCEERR_AR;
23156 }
23157 #endif
23158 @@ -890,6 +986,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
23159 return 1;
23160 }
23161
23162 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23163 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
23164 +{
23165 + pte_t *pte;
23166 + pmd_t *pmd;
23167 + spinlock_t *ptl;
23168 + unsigned char pte_mask;
23169 +
23170 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
23171 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
23172 + return 0;
23173 +
23174 + /* PaX: it's our fault, let's handle it if we can */
23175 +
23176 + /* PaX: take a look at read faults before acquiring any locks */
23177 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
23178 + /* instruction fetch attempt from a protected page in user mode */
23179 + up_read(&mm->mmap_sem);
23180 +
23181 +#ifdef CONFIG_PAX_EMUTRAMP
23182 + switch (pax_handle_fetch_fault(regs)) {
23183 + case 2:
23184 + return 1;
23185 + }
23186 +#endif
23187 +
23188 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23189 + do_group_exit(SIGKILL);
23190 + }
23191 +
23192 + pmd = pax_get_pmd(mm, address);
23193 + if (unlikely(!pmd))
23194 + return 0;
23195 +
23196 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
23197 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
23198 + pte_unmap_unlock(pte, ptl);
23199 + return 0;
23200 + }
23201 +
23202 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
23203 + /* write attempt to a protected page in user mode */
23204 + pte_unmap_unlock(pte, ptl);
23205 + return 0;
23206 + }
23207 +
23208 +#ifdef CONFIG_SMP
23209 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
23210 +#else
23211 + if (likely(address > get_limit(regs->cs)))
23212 +#endif
23213 + {
23214 + set_pte(pte, pte_mkread(*pte));
23215 + __flush_tlb_one(address);
23216 + pte_unmap_unlock(pte, ptl);
23217 + up_read(&mm->mmap_sem);
23218 + return 1;
23219 + }
23220 +
23221 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
23222 +
23223 + /*
23224 + * PaX: fill DTLB with user rights and retry
23225 + */
23226 + __asm__ __volatile__ (
23227 + "orb %2,(%1)\n"
23228 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
23229 +/*
23230 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
23231 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
23232 + * page fault when examined during a TLB load attempt. this is true not only
23233 + * for PTEs holding a non-present entry but also present entries that will
23234 + * raise a page fault (such as those set up by PaX, or the copy-on-write
23235 + * mechanism). in effect it means that we do *not* need to flush the TLBs
23236 + * for our target pages since their PTEs are simply not in the TLBs at all.
23237 +
23238 + * the best thing in omitting it is that we gain around 15-20% speed in the
23239 + * fast path of the page fault handler and can get rid of tracing since we
23240 + * can no longer flush unintended entries.
23241 + */
23242 + "invlpg (%0)\n"
23243 +#endif
23244 + __copyuser_seg"testb $0,(%0)\n"
23245 + "xorb %3,(%1)\n"
23246 + :
23247 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
23248 + : "memory", "cc");
23249 + pte_unmap_unlock(pte, ptl);
23250 + up_read(&mm->mmap_sem);
23251 + return 1;
23252 +}
23253 +#endif
23254 +
23255 /*
23256 * Handle a spurious fault caused by a stale TLB entry.
23257 *
23258 @@ -962,6 +1151,9 @@ int show_unhandled_signals = 1;
23259 static inline int
23260 access_error(unsigned long error_code, struct vm_area_struct *vma)
23261 {
23262 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
23263 + return 1;
23264 +
23265 if (error_code & PF_WRITE) {
23266 /* write, present and write, not present: */
23267 if (unlikely(!(vma->vm_flags & VM_WRITE)))
23268 @@ -995,18 +1187,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23269 {
23270 struct vm_area_struct *vma;
23271 struct task_struct *tsk;
23272 - unsigned long address;
23273 struct mm_struct *mm;
23274 int fault;
23275 int write = error_code & PF_WRITE;
23276 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
23277 (write ? FAULT_FLAG_WRITE : 0);
23278
23279 - tsk = current;
23280 - mm = tsk->mm;
23281 -
23282 /* Get the faulting address: */
23283 - address = read_cr2();
23284 + unsigned long address = read_cr2();
23285 +
23286 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23287 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
23288 + if (!search_exception_tables(regs->ip)) {
23289 + bad_area_nosemaphore(regs, error_code, address);
23290 + return;
23291 + }
23292 + if (address < PAX_USER_SHADOW_BASE) {
23293 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23294 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
23295 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
23296 + } else
23297 + address -= PAX_USER_SHADOW_BASE;
23298 + }
23299 +#endif
23300 +
23301 + tsk = current;
23302 + mm = tsk->mm;
23303
23304 /*
23305 * Detect and handle instructions that would cause a page fault for
23306 @@ -1067,7 +1273,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23307 * User-mode registers count as a user access even for any
23308 * potential system fault or CPU buglet:
23309 */
23310 - if (user_mode_vm(regs)) {
23311 + if (user_mode(regs)) {
23312 local_irq_enable();
23313 error_code |= PF_USER;
23314 } else {
23315 @@ -1122,6 +1328,11 @@ retry:
23316 might_sleep();
23317 }
23318
23319 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23320 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
23321 + return;
23322 +#endif
23323 +
23324 vma = find_vma(mm, address);
23325 if (unlikely(!vma)) {
23326 bad_area(regs, error_code, address);
23327 @@ -1133,18 +1344,24 @@ retry:
23328 bad_area(regs, error_code, address);
23329 return;
23330 }
23331 - if (error_code & PF_USER) {
23332 - /*
23333 - * Accessing the stack below %sp is always a bug.
23334 - * The large cushion allows instructions like enter
23335 - * and pusha to work. ("enter $65535, $31" pushes
23336 - * 32 pointers and then decrements %sp by 65535.)
23337 - */
23338 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
23339 - bad_area(regs, error_code, address);
23340 - return;
23341 - }
23342 + /*
23343 + * Accessing the stack below %sp is always a bug.
23344 + * The large cushion allows instructions like enter
23345 + * and pusha to work. ("enter $65535, $31" pushes
23346 + * 32 pointers and then decrements %sp by 65535.)
23347 + */
23348 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
23349 + bad_area(regs, error_code, address);
23350 + return;
23351 }
23352 +
23353 +#ifdef CONFIG_PAX_SEGMEXEC
23354 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
23355 + bad_area(regs, error_code, address);
23356 + return;
23357 + }
23358 +#endif
23359 +
23360 if (unlikely(expand_stack(vma, address))) {
23361 bad_area(regs, error_code, address);
23362 return;
23363 @@ -1199,3 +1416,292 @@ good_area:
23364
23365 up_read(&mm->mmap_sem);
23366 }
23367 +
23368 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23369 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
23370 +{
23371 + struct mm_struct *mm = current->mm;
23372 + unsigned long ip = regs->ip;
23373 +
23374 + if (v8086_mode(regs))
23375 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
23376 +
23377 +#ifdef CONFIG_PAX_PAGEEXEC
23378 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
23379 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
23380 + return true;
23381 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
23382 + return true;
23383 + return false;
23384 + }
23385 +#endif
23386 +
23387 +#ifdef CONFIG_PAX_SEGMEXEC
23388 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
23389 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
23390 + return true;
23391 + return false;
23392 + }
23393 +#endif
23394 +
23395 + return false;
23396 +}
23397 +#endif
23398 +
23399 +#ifdef CONFIG_PAX_EMUTRAMP
23400 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
23401 +{
23402 + int err;
23403 +
23404 + do { /* PaX: libffi trampoline emulation */
23405 + unsigned char mov, jmp;
23406 + unsigned int addr1, addr2;
23407 +
23408 +#ifdef CONFIG_X86_64
23409 + if ((regs->ip + 9) >> 32)
23410 + break;
23411 +#endif
23412 +
23413 + err = get_user(mov, (unsigned char __user *)regs->ip);
23414 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
23415 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
23416 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
23417 +
23418 + if (err)
23419 + break;
23420 +
23421 + if (mov == 0xB8 && jmp == 0xE9) {
23422 + regs->ax = addr1;
23423 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
23424 + return 2;
23425 + }
23426 + } while (0);
23427 +
23428 + do { /* PaX: gcc trampoline emulation #1 */
23429 + unsigned char mov1, mov2;
23430 + unsigned short jmp;
23431 + unsigned int addr1, addr2;
23432 +
23433 +#ifdef CONFIG_X86_64
23434 + if ((regs->ip + 11) >> 32)
23435 + break;
23436 +#endif
23437 +
23438 + err = get_user(mov1, (unsigned char __user *)regs->ip);
23439 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
23440 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
23441 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
23442 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
23443 +
23444 + if (err)
23445 + break;
23446 +
23447 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
23448 + regs->cx = addr1;
23449 + regs->ax = addr2;
23450 + regs->ip = addr2;
23451 + return 2;
23452 + }
23453 + } while (0);
23454 +
23455 + do { /* PaX: gcc trampoline emulation #2 */
23456 + unsigned char mov, jmp;
23457 + unsigned int addr1, addr2;
23458 +
23459 +#ifdef CONFIG_X86_64
23460 + if ((regs->ip + 9) >> 32)
23461 + break;
23462 +#endif
23463 +
23464 + err = get_user(mov, (unsigned char __user *)regs->ip);
23465 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
23466 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
23467 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
23468 +
23469 + if (err)
23470 + break;
23471 +
23472 + if (mov == 0xB9 && jmp == 0xE9) {
23473 + regs->cx = addr1;
23474 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
23475 + return 2;
23476 + }
23477 + } while (0);
23478 +
23479 + return 1; /* PaX in action */
23480 +}
23481 +
23482 +#ifdef CONFIG_X86_64
23483 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
23484 +{
23485 + int err;
23486 +
23487 + do { /* PaX: libffi trampoline emulation */
23488 + unsigned short mov1, mov2, jmp1;
23489 + unsigned char stcclc, jmp2;
23490 + unsigned long addr1, addr2;
23491 +
23492 + err = get_user(mov1, (unsigned short __user *)regs->ip);
23493 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
23494 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
23495 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
23496 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
23497 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
23498 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
23499 +
23500 + if (err)
23501 + break;
23502 +
23503 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
23504 + regs->r11 = addr1;
23505 + regs->r10 = addr2;
23506 + if (stcclc == 0xF8)
23507 + regs->flags &= ~X86_EFLAGS_CF;
23508 + else
23509 + regs->flags |= X86_EFLAGS_CF;
23510 + regs->ip = addr1;
23511 + return 2;
23512 + }
23513 + } while (0);
23514 +
23515 + do { /* PaX: gcc trampoline emulation #1 */
23516 + unsigned short mov1, mov2, jmp1;
23517 + unsigned char jmp2;
23518 + unsigned int addr1;
23519 + unsigned long addr2;
23520 +
23521 + err = get_user(mov1, (unsigned short __user *)regs->ip);
23522 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
23523 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
23524 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
23525 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
23526 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
23527 +
23528 + if (err)
23529 + break;
23530 +
23531 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
23532 + regs->r11 = addr1;
23533 + regs->r10 = addr2;
23534 + regs->ip = addr1;
23535 + return 2;
23536 + }
23537 + } while (0);
23538 +
23539 + do { /* PaX: gcc trampoline emulation #2 */
23540 + unsigned short mov1, mov2, jmp1;
23541 + unsigned char jmp2;
23542 + unsigned long addr1, addr2;
23543 +
23544 + err = get_user(mov1, (unsigned short __user *)regs->ip);
23545 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
23546 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
23547 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
23548 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
23549 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
23550 +
23551 + if (err)
23552 + break;
23553 +
23554 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
23555 + regs->r11 = addr1;
23556 + regs->r10 = addr2;
23557 + regs->ip = addr1;
23558 + return 2;
23559 + }
23560 + } while (0);
23561 +
23562 + return 1; /* PaX in action */
23563 +}
23564 +#endif
23565 +
23566 +/*
23567 + * PaX: decide what to do with offenders (regs->ip = fault address)
23568 + *
23569 + * returns 1 when task should be killed
23570 + * 2 when gcc trampoline was detected
23571 + */
23572 +static int pax_handle_fetch_fault(struct pt_regs *regs)
23573 +{
23574 + if (v8086_mode(regs))
23575 + return 1;
23576 +
23577 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
23578 + return 1;
23579 +
23580 +#ifdef CONFIG_X86_32
23581 + return pax_handle_fetch_fault_32(regs);
23582 +#else
23583 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
23584 + return pax_handle_fetch_fault_32(regs);
23585 + else
23586 + return pax_handle_fetch_fault_64(regs);
23587 +#endif
23588 +}
23589 +#endif
23590 +
23591 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23592 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
23593 +{
23594 + long i;
23595 +
23596 + printk(KERN_ERR "PAX: bytes at PC: ");
23597 + for (i = 0; i < 20; i++) {
23598 + unsigned char c;
23599 + if (get_user(c, (unsigned char __force_user *)pc+i))
23600 + printk(KERN_CONT "?? ");
23601 + else
23602 + printk(KERN_CONT "%02x ", c);
23603 + }
23604 + printk("\n");
23605 +
23606 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
23607 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
23608 + unsigned long c;
23609 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
23610 +#ifdef CONFIG_X86_32
23611 + printk(KERN_CONT "???????? ");
23612 +#else
23613 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
23614 + printk(KERN_CONT "???????? ???????? ");
23615 + else
23616 + printk(KERN_CONT "???????????????? ");
23617 +#endif
23618 + } else {
23619 +#ifdef CONFIG_X86_64
23620 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
23621 + printk(KERN_CONT "%08x ", (unsigned int)c);
23622 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
23623 + } else
23624 +#endif
23625 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
23626 + }
23627 + }
23628 + printk("\n");
23629 +}
23630 +#endif
23631 +
23632 +/**
23633 + * probe_kernel_write(): safely attempt to write to a location
23634 + * @dst: address to write to
23635 + * @src: pointer to the data that shall be written
23636 + * @size: size of the data chunk
23637 + *
23638 + * Safely write to address @dst from the buffer at @src. If a kernel fault
23639 + * happens, handle that and return -EFAULT.
23640 + */
23641 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
23642 +{
23643 + long ret;
23644 + mm_segment_t old_fs = get_fs();
23645 +
23646 + set_fs(KERNEL_DS);
23647 + pagefault_disable();
23648 + pax_open_kernel();
23649 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
23650 + pax_close_kernel();
23651 + pagefault_enable();
23652 + set_fs(old_fs);
23653 +
23654 + return ret ? -EFAULT : 0;
23655 +}
23656 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
23657 index dd74e46..7d26398 100644
23658 --- a/arch/x86/mm/gup.c
23659 +++ b/arch/x86/mm/gup.c
23660 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
23661 addr = start;
23662 len = (unsigned long) nr_pages << PAGE_SHIFT;
23663 end = start + len;
23664 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
23665 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
23666 (void __user *)start, len)))
23667 return 0;
23668
23669 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
23670 index f4f29b1..5cac4fb 100644
23671 --- a/arch/x86/mm/highmem_32.c
23672 +++ b/arch/x86/mm/highmem_32.c
23673 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
23674 idx = type + KM_TYPE_NR*smp_processor_id();
23675 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
23676 BUG_ON(!pte_none(*(kmap_pte-idx)));
23677 +
23678 + pax_open_kernel();
23679 set_pte(kmap_pte-idx, mk_pte(page, prot));
23680 + pax_close_kernel();
23681 +
23682 arch_flush_lazy_mmu_mode();
23683
23684 return (void *)vaddr;
23685 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
23686 index f581a18..29efd37 100644
23687 --- a/arch/x86/mm/hugetlbpage.c
23688 +++ b/arch/x86/mm/hugetlbpage.c
23689 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
23690 struct hstate *h = hstate_file(file);
23691 struct mm_struct *mm = current->mm;
23692 struct vm_area_struct *vma;
23693 - unsigned long start_addr;
23694 + unsigned long start_addr, pax_task_size = TASK_SIZE;
23695 +
23696 +#ifdef CONFIG_PAX_SEGMEXEC
23697 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23698 + pax_task_size = SEGMEXEC_TASK_SIZE;
23699 +#endif
23700 +
23701 + pax_task_size -= PAGE_SIZE;
23702
23703 if (len > mm->cached_hole_size) {
23704 - start_addr = mm->free_area_cache;
23705 + start_addr = mm->free_area_cache;
23706 } else {
23707 - start_addr = TASK_UNMAPPED_BASE;
23708 - mm->cached_hole_size = 0;
23709 + start_addr = mm->mmap_base;
23710 + mm->cached_hole_size = 0;
23711 }
23712
23713 full_search:
23714 @@ -280,26 +287,27 @@ full_search:
23715
23716 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
23717 /* At this point: (!vma || addr < vma->vm_end). */
23718 - if (TASK_SIZE - len < addr) {
23719 + if (pax_task_size - len < addr) {
23720 /*
23721 * Start a new search - just in case we missed
23722 * some holes.
23723 */
23724 - if (start_addr != TASK_UNMAPPED_BASE) {
23725 - start_addr = TASK_UNMAPPED_BASE;
23726 + if (start_addr != mm->mmap_base) {
23727 + start_addr = mm->mmap_base;
23728 mm->cached_hole_size = 0;
23729 goto full_search;
23730 }
23731 return -ENOMEM;
23732 }
23733 - if (!vma || addr + len <= vma->vm_start) {
23734 - mm->free_area_cache = addr + len;
23735 - return addr;
23736 - }
23737 + if (check_heap_stack_gap(vma, addr, len))
23738 + break;
23739 if (addr + mm->cached_hole_size < vma->vm_start)
23740 mm->cached_hole_size = vma->vm_start - addr;
23741 addr = ALIGN(vma->vm_end, huge_page_size(h));
23742 }
23743 +
23744 + mm->free_area_cache = addr + len;
23745 + return addr;
23746 }
23747
23748 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
23749 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
23750 {
23751 struct hstate *h = hstate_file(file);
23752 struct mm_struct *mm = current->mm;
23753 - struct vm_area_struct *vma, *prev_vma;
23754 - unsigned long base = mm->mmap_base, addr = addr0;
23755 + struct vm_area_struct *vma;
23756 + unsigned long base = mm->mmap_base, addr;
23757 unsigned long largest_hole = mm->cached_hole_size;
23758 - int first_time = 1;
23759
23760 /* don't allow allocations above current base */
23761 if (mm->free_area_cache > base)
23762 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
23763 largest_hole = 0;
23764 mm->free_area_cache = base;
23765 }
23766 -try_again:
23767 +
23768 /* make sure it can fit in the remaining address space */
23769 if (mm->free_area_cache < len)
23770 goto fail;
23771
23772 /* either no address requested or can't fit in requested address hole */
23773 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
23774 + addr = (mm->free_area_cache - len);
23775 do {
23776 + addr &= huge_page_mask(h);
23777 + vma = find_vma(mm, addr);
23778 /*
23779 * Lookup failure means no vma is above this address,
23780 * i.e. return with success:
23781 - */
23782 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
23783 - return addr;
23784 -
23785 - /*
23786 * new region fits between prev_vma->vm_end and
23787 * vma->vm_start, use it:
23788 */
23789 - if (addr + len <= vma->vm_start &&
23790 - (!prev_vma || (addr >= prev_vma->vm_end))) {
23791 + if (check_heap_stack_gap(vma, addr, len)) {
23792 /* remember the address as a hint for next time */
23793 - mm->cached_hole_size = largest_hole;
23794 - return (mm->free_area_cache = addr);
23795 - } else {
23796 - /* pull free_area_cache down to the first hole */
23797 - if (mm->free_area_cache == vma->vm_end) {
23798 - mm->free_area_cache = vma->vm_start;
23799 - mm->cached_hole_size = largest_hole;
23800 - }
23801 + mm->cached_hole_size = largest_hole;
23802 + return (mm->free_area_cache = addr);
23803 + }
23804 + /* pull free_area_cache down to the first hole */
23805 + if (mm->free_area_cache == vma->vm_end) {
23806 + mm->free_area_cache = vma->vm_start;
23807 + mm->cached_hole_size = largest_hole;
23808 }
23809
23810 /* remember the largest hole we saw so far */
23811 if (addr + largest_hole < vma->vm_start)
23812 - largest_hole = vma->vm_start - addr;
23813 + largest_hole = vma->vm_start - addr;
23814
23815 /* try just below the current vma->vm_start */
23816 - addr = (vma->vm_start - len) & huge_page_mask(h);
23817 - } while (len <= vma->vm_start);
23818 + addr = skip_heap_stack_gap(vma, len);
23819 + } while (!IS_ERR_VALUE(addr));
23820
23821 fail:
23822 /*
23823 - * if hint left us with no space for the requested
23824 - * mapping then try again:
23825 - */
23826 - if (first_time) {
23827 - mm->free_area_cache = base;
23828 - largest_hole = 0;
23829 - first_time = 0;
23830 - goto try_again;
23831 - }
23832 - /*
23833 * A failed mmap() very likely causes application failure,
23834 * so fall back to the bottom-up function here. This scenario
23835 * can happen with large stack limits and large mmap()
23836 * allocations.
23837 */
23838 - mm->free_area_cache = TASK_UNMAPPED_BASE;
23839 +
23840 +#ifdef CONFIG_PAX_SEGMEXEC
23841 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23842 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
23843 + else
23844 +#endif
23845 +
23846 + mm->mmap_base = TASK_UNMAPPED_BASE;
23847 +
23848 +#ifdef CONFIG_PAX_RANDMMAP
23849 + if (mm->pax_flags & MF_PAX_RANDMMAP)
23850 + mm->mmap_base += mm->delta_mmap;
23851 +#endif
23852 +
23853 + mm->free_area_cache = mm->mmap_base;
23854 mm->cached_hole_size = ~0UL;
23855 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
23856 len, pgoff, flags);
23857 @@ -386,6 +392,7 @@ fail:
23858 /*
23859 * Restore the topdown base:
23860 */
23861 + mm->mmap_base = base;
23862 mm->free_area_cache = base;
23863 mm->cached_hole_size = ~0UL;
23864
23865 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
23866 struct hstate *h = hstate_file(file);
23867 struct mm_struct *mm = current->mm;
23868 struct vm_area_struct *vma;
23869 + unsigned long pax_task_size = TASK_SIZE;
23870
23871 if (len & ~huge_page_mask(h))
23872 return -EINVAL;
23873 - if (len > TASK_SIZE)
23874 +
23875 +#ifdef CONFIG_PAX_SEGMEXEC
23876 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
23877 + pax_task_size = SEGMEXEC_TASK_SIZE;
23878 +#endif
23879 +
23880 + pax_task_size -= PAGE_SIZE;
23881 +
23882 + if (len > pax_task_size)
23883 return -ENOMEM;
23884
23885 if (flags & MAP_FIXED) {
23886 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
23887 if (addr) {
23888 addr = ALIGN(addr, huge_page_size(h));
23889 vma = find_vma(mm, addr);
23890 - if (TASK_SIZE - len >= addr &&
23891 - (!vma || addr + len <= vma->vm_start))
23892 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
23893 return addr;
23894 }
23895 if (mm->get_unmapped_area == arch_get_unmapped_area)
23896 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
23897 index 87488b9..399f416 100644
23898 --- a/arch/x86/mm/init.c
23899 +++ b/arch/x86/mm/init.c
23900 @@ -15,6 +15,7 @@
23901 #include <asm/tlbflush.h>
23902 #include <asm/tlb.h>
23903 #include <asm/proto.h>
23904 +#include <asm/desc.h>
23905
23906 unsigned long __initdata pgt_buf_start;
23907 unsigned long __meminitdata pgt_buf_end;
23908 @@ -31,7 +32,7 @@ int direct_gbpages
23909 static void __init find_early_table_space(unsigned long end, int use_pse,
23910 int use_gbpages)
23911 {
23912 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
23913 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
23914 phys_addr_t base;
23915
23916 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
23917 @@ -312,8 +313,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
23918 */
23919 int devmem_is_allowed(unsigned long pagenr)
23920 {
23921 +#ifdef CONFIG_GRKERNSEC_KMEM
23922 + /* allow BDA */
23923 + if (!pagenr)
23924 + return 1;
23925 + /* allow EBDA */
23926 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
23927 + return 1;
23928 +#else
23929 + if (!pagenr)
23930 + return 1;
23931 +#ifdef CONFIG_VM86
23932 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
23933 + return 1;
23934 +#endif
23935 +#endif
23936 +
23937 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
23938 + return 1;
23939 +#ifdef CONFIG_GRKERNSEC_KMEM
23940 + /* throw out everything else below 1MB */
23941 if (pagenr <= 256)
23942 - return 1;
23943 + return 0;
23944 +#endif
23945 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
23946 return 0;
23947 if (!page_is_ram(pagenr))
23948 @@ -372,6 +394,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
23949
23950 void free_initmem(void)
23951 {
23952 +
23953 +#ifdef CONFIG_PAX_KERNEXEC
23954 +#ifdef CONFIG_X86_32
23955 + /* PaX: limit KERNEL_CS to actual size */
23956 + unsigned long addr, limit;
23957 + struct desc_struct d;
23958 + int cpu;
23959 +
23960 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
23961 + limit = (limit - 1UL) >> PAGE_SHIFT;
23962 +
23963 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
23964 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
23965 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
23966 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
23967 + }
23968 +
23969 + /* PaX: make KERNEL_CS read-only */
23970 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
23971 + if (!paravirt_enabled())
23972 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
23973 +/*
23974 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
23975 + pgd = pgd_offset_k(addr);
23976 + pud = pud_offset(pgd, addr);
23977 + pmd = pmd_offset(pud, addr);
23978 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
23979 + }
23980 +*/
23981 +#ifdef CONFIG_X86_PAE
23982 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
23983 +/*
23984 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
23985 + pgd = pgd_offset_k(addr);
23986 + pud = pud_offset(pgd, addr);
23987 + pmd = pmd_offset(pud, addr);
23988 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
23989 + }
23990 +*/
23991 +#endif
23992 +
23993 +#ifdef CONFIG_MODULES
23994 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
23995 +#endif
23996 +
23997 +#else
23998 + pgd_t *pgd;
23999 + pud_t *pud;
24000 + pmd_t *pmd;
24001 + unsigned long addr, end;
24002 +
24003 + /* PaX: make kernel code/rodata read-only, rest non-executable */
24004 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24005 + pgd = pgd_offset_k(addr);
24006 + pud = pud_offset(pgd, addr);
24007 + pmd = pmd_offset(pud, addr);
24008 + if (!pmd_present(*pmd))
24009 + continue;
24010 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24011 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24012 + else
24013 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24014 + }
24015 +
24016 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24017 + end = addr + KERNEL_IMAGE_SIZE;
24018 + for (; addr < end; addr += PMD_SIZE) {
24019 + pgd = pgd_offset_k(addr);
24020 + pud = pud_offset(pgd, addr);
24021 + pmd = pmd_offset(pud, addr);
24022 + if (!pmd_present(*pmd))
24023 + continue;
24024 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24025 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24026 + }
24027 +#endif
24028 +
24029 + flush_tlb_all();
24030 +#endif
24031 +
24032 free_init_pages("unused kernel memory",
24033 (unsigned long)(&__init_begin),
24034 (unsigned long)(&__init_end));
24035 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24036 index 29f7c6d..b46b35b 100644
24037 --- a/arch/x86/mm/init_32.c
24038 +++ b/arch/x86/mm/init_32.c
24039 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
24040 }
24041
24042 /*
24043 - * Creates a middle page table and puts a pointer to it in the
24044 - * given global directory entry. This only returns the gd entry
24045 - * in non-PAE compilation mode, since the middle layer is folded.
24046 - */
24047 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
24048 -{
24049 - pud_t *pud;
24050 - pmd_t *pmd_table;
24051 -
24052 -#ifdef CONFIG_X86_PAE
24053 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24054 - if (after_bootmem)
24055 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24056 - else
24057 - pmd_table = (pmd_t *)alloc_low_page();
24058 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24059 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24060 - pud = pud_offset(pgd, 0);
24061 - BUG_ON(pmd_table != pmd_offset(pud, 0));
24062 -
24063 - return pmd_table;
24064 - }
24065 -#endif
24066 - pud = pud_offset(pgd, 0);
24067 - pmd_table = pmd_offset(pud, 0);
24068 -
24069 - return pmd_table;
24070 -}
24071 -
24072 -/*
24073 * Create a page table and place a pointer to it in a middle page
24074 * directory entry:
24075 */
24076 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24077 page_table = (pte_t *)alloc_low_page();
24078
24079 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24080 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24081 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24082 +#else
24083 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24084 +#endif
24085 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24086 }
24087
24088 return pte_offset_kernel(pmd, 0);
24089 }
24090
24091 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
24092 +{
24093 + pud_t *pud;
24094 + pmd_t *pmd_table;
24095 +
24096 + pud = pud_offset(pgd, 0);
24097 + pmd_table = pmd_offset(pud, 0);
24098 +
24099 + return pmd_table;
24100 +}
24101 +
24102 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24103 {
24104 int pgd_idx = pgd_index(vaddr);
24105 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24106 int pgd_idx, pmd_idx;
24107 unsigned long vaddr;
24108 pgd_t *pgd;
24109 + pud_t *pud;
24110 pmd_t *pmd;
24111 pte_t *pte = NULL;
24112
24113 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24114 pgd = pgd_base + pgd_idx;
24115
24116 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24117 - pmd = one_md_table_init(pgd);
24118 - pmd = pmd + pmd_index(vaddr);
24119 + pud = pud_offset(pgd, vaddr);
24120 + pmd = pmd_offset(pud, vaddr);
24121 +
24122 +#ifdef CONFIG_X86_PAE
24123 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24124 +#endif
24125 +
24126 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24127 pmd++, pmd_idx++) {
24128 pte = page_table_kmap_check(one_page_table_init(pmd),
24129 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24130 }
24131 }
24132
24133 -static inline int is_kernel_text(unsigned long addr)
24134 +static inline int is_kernel_text(unsigned long start, unsigned long end)
24135 {
24136 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
24137 - return 1;
24138 - return 0;
24139 + if ((start > ktla_ktva((unsigned long)_etext) ||
24140 + end <= ktla_ktva((unsigned long)_stext)) &&
24141 + (start > ktla_ktva((unsigned long)_einittext) ||
24142 + end <= ktla_ktva((unsigned long)_sinittext)) &&
24143 +
24144 +#ifdef CONFIG_ACPI_SLEEP
24145 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24146 +#endif
24147 +
24148 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24149 + return 0;
24150 + return 1;
24151 }
24152
24153 /*
24154 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
24155 unsigned long last_map_addr = end;
24156 unsigned long start_pfn, end_pfn;
24157 pgd_t *pgd_base = swapper_pg_dir;
24158 - int pgd_idx, pmd_idx, pte_ofs;
24159 + unsigned int pgd_idx, pmd_idx, pte_ofs;
24160 unsigned long pfn;
24161 pgd_t *pgd;
24162 + pud_t *pud;
24163 pmd_t *pmd;
24164 pte_t *pte;
24165 unsigned pages_2m, pages_4k;
24166 @@ -281,8 +282,13 @@ repeat:
24167 pfn = start_pfn;
24168 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24169 pgd = pgd_base + pgd_idx;
24170 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
24171 - pmd = one_md_table_init(pgd);
24172 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
24173 + pud = pud_offset(pgd, 0);
24174 + pmd = pmd_offset(pud, 0);
24175 +
24176 +#ifdef CONFIG_X86_PAE
24177 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24178 +#endif
24179
24180 if (pfn >= end_pfn)
24181 continue;
24182 @@ -294,14 +300,13 @@ repeat:
24183 #endif
24184 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
24185 pmd++, pmd_idx++) {
24186 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
24187 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
24188
24189 /*
24190 * Map with big pages if possible, otherwise
24191 * create normal page tables:
24192 */
24193 if (use_pse) {
24194 - unsigned int addr2;
24195 pgprot_t prot = PAGE_KERNEL_LARGE;
24196 /*
24197 * first pass will use the same initial
24198 @@ -311,11 +316,7 @@ repeat:
24199 __pgprot(PTE_IDENT_ATTR |
24200 _PAGE_PSE);
24201
24202 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
24203 - PAGE_OFFSET + PAGE_SIZE-1;
24204 -
24205 - if (is_kernel_text(addr) ||
24206 - is_kernel_text(addr2))
24207 + if (is_kernel_text(address, address + PMD_SIZE))
24208 prot = PAGE_KERNEL_LARGE_EXEC;
24209
24210 pages_2m++;
24211 @@ -332,7 +333,7 @@ repeat:
24212 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24213 pte += pte_ofs;
24214 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
24215 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
24216 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
24217 pgprot_t prot = PAGE_KERNEL;
24218 /*
24219 * first pass will use the same initial
24220 @@ -340,7 +341,7 @@ repeat:
24221 */
24222 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
24223
24224 - if (is_kernel_text(addr))
24225 + if (is_kernel_text(address, address + PAGE_SIZE))
24226 prot = PAGE_KERNEL_EXEC;
24227
24228 pages_4k++;
24229 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
24230
24231 pud = pud_offset(pgd, va);
24232 pmd = pmd_offset(pud, va);
24233 - if (!pmd_present(*pmd))
24234 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
24235 break;
24236
24237 pte = pte_offset_kernel(pmd, va);
24238 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_range_init(void)
24239
24240 static void __init pagetable_init(void)
24241 {
24242 - pgd_t *pgd_base = swapper_pg_dir;
24243 -
24244 - permanent_kmaps_init(pgd_base);
24245 + permanent_kmaps_init(swapper_pg_dir);
24246 }
24247
24248 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24249 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24250 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24251
24252 /* user-defined highmem size */
24253 @@ -757,6 +756,12 @@ void __init mem_init(void)
24254
24255 pci_iommu_alloc();
24256
24257 +#ifdef CONFIG_PAX_PER_CPU_PGD
24258 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24259 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24260 + KERNEL_PGD_PTRS);
24261 +#endif
24262 +
24263 #ifdef CONFIG_FLATMEM
24264 BUG_ON(!mem_map);
24265 #endif
24266 @@ -774,7 +779,7 @@ void __init mem_init(void)
24267 set_highmem_pages_init();
24268
24269 codesize = (unsigned long) &_etext - (unsigned long) &_text;
24270 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
24271 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
24272 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
24273
24274 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
24275 @@ -815,10 +820,10 @@ void __init mem_init(void)
24276 ((unsigned long)&__init_end -
24277 (unsigned long)&__init_begin) >> 10,
24278
24279 - (unsigned long)&_etext, (unsigned long)&_edata,
24280 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
24281 + (unsigned long)&_sdata, (unsigned long)&_edata,
24282 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
24283
24284 - (unsigned long)&_text, (unsigned long)&_etext,
24285 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
24286 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
24287
24288 /*
24289 @@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
24290 if (!kernel_set_to_readonly)
24291 return;
24292
24293 + start = ktla_ktva(start);
24294 pr_debug("Set kernel text: %lx - %lx for read write\n",
24295 start, start+size);
24296
24297 @@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
24298 if (!kernel_set_to_readonly)
24299 return;
24300
24301 + start = ktla_ktva(start);
24302 pr_debug("Set kernel text: %lx - %lx for read only\n",
24303 start, start+size);
24304
24305 @@ -938,6 +945,7 @@ void mark_rodata_ro(void)
24306 unsigned long start = PFN_ALIGN(_text);
24307 unsigned long size = PFN_ALIGN(_etext) - start;
24308
24309 + start = ktla_ktva(start);
24310 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
24311 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
24312 size >> 10);
24313 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
24314 index bbaaa00..796fa65 100644
24315 --- a/arch/x86/mm/init_64.c
24316 +++ b/arch/x86/mm/init_64.c
24317 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
24318 * around without checking the pgd every time.
24319 */
24320
24321 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
24322 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
24323 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24324
24325 int force_personality32;
24326 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24327
24328 for (address = start; address <= end; address += PGDIR_SIZE) {
24329 const pgd_t *pgd_ref = pgd_offset_k(address);
24330 +
24331 +#ifdef CONFIG_PAX_PER_CPU_PGD
24332 + unsigned long cpu;
24333 +#else
24334 struct page *page;
24335 +#endif
24336
24337 if (pgd_none(*pgd_ref))
24338 continue;
24339
24340 spin_lock(&pgd_lock);
24341 +
24342 +#ifdef CONFIG_PAX_PER_CPU_PGD
24343 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24344 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
24345 +#else
24346 list_for_each_entry(page, &pgd_list, lru) {
24347 pgd_t *pgd;
24348 spinlock_t *pgt_lock;
24349 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24350 /* the pgt_lock only for Xen */
24351 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
24352 spin_lock(pgt_lock);
24353 +#endif
24354
24355 if (pgd_none(*pgd))
24356 set_pgd(pgd, *pgd_ref);
24357 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24358 BUG_ON(pgd_page_vaddr(*pgd)
24359 != pgd_page_vaddr(*pgd_ref));
24360
24361 +#ifndef CONFIG_PAX_PER_CPU_PGD
24362 spin_unlock(pgt_lock);
24363 +#endif
24364 +
24365 }
24366 spin_unlock(&pgd_lock);
24367 }
24368 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
24369 pmd = fill_pmd(pud, vaddr);
24370 pte = fill_pte(pmd, vaddr);
24371
24372 + pax_open_kernel();
24373 set_pte(pte, new_pte);
24374 + pax_close_kernel();
24375
24376 /*
24377 * It's enough to flush this one mapping.
24378 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
24379 pgd = pgd_offset_k((unsigned long)__va(phys));
24380 if (pgd_none(*pgd)) {
24381 pud = (pud_t *) spp_getpage();
24382 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
24383 - _PAGE_USER));
24384 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
24385 }
24386 pud = pud_offset(pgd, (unsigned long)__va(phys));
24387 if (pud_none(*pud)) {
24388 pmd = (pmd_t *) spp_getpage();
24389 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
24390 - _PAGE_USER));
24391 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
24392 }
24393 pmd = pmd_offset(pud, phys);
24394 BUG_ON(!pmd_none(*pmd));
24395 @@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
24396 if (pfn >= pgt_buf_top)
24397 panic("alloc_low_page: ran out of memory");
24398
24399 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
24400 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
24401 clear_page(adr);
24402 *phys = pfn * PAGE_SIZE;
24403 return adr;
24404 @@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
24405
24406 phys = __pa(virt);
24407 left = phys & (PAGE_SIZE - 1);
24408 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
24409 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
24410 adr = (void *)(((unsigned long)adr) | left);
24411
24412 return adr;
24413 @@ -693,6 +707,12 @@ void __init mem_init(void)
24414
24415 pci_iommu_alloc();
24416
24417 +#ifdef CONFIG_PAX_PER_CPU_PGD
24418 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24419 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24420 + KERNEL_PGD_PTRS);
24421 +#endif
24422 +
24423 /* clear_bss() already clear the empty_zero_page */
24424
24425 reservedpages = 0;
24426 @@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
24427 static struct vm_area_struct gate_vma = {
24428 .vm_start = VSYSCALL_START,
24429 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
24430 - .vm_page_prot = PAGE_READONLY_EXEC,
24431 - .vm_flags = VM_READ | VM_EXEC
24432 + .vm_page_prot = PAGE_READONLY,
24433 + .vm_flags = VM_READ
24434 };
24435
24436 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
24437 @@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long addr)
24438
24439 const char *arch_vma_name(struct vm_area_struct *vma)
24440 {
24441 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24442 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24443 return "[vdso]";
24444 if (vma == &gate_vma)
24445 return "[vsyscall]";
24446 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
24447 index 7b179b4..6bd1777 100644
24448 --- a/arch/x86/mm/iomap_32.c
24449 +++ b/arch/x86/mm/iomap_32.c
24450 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
24451 type = kmap_atomic_idx_push();
24452 idx = type + KM_TYPE_NR * smp_processor_id();
24453 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24454 +
24455 + pax_open_kernel();
24456 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
24457 + pax_close_kernel();
24458 +
24459 arch_flush_lazy_mmu_mode();
24460
24461 return (void *)vaddr;
24462 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
24463 index be1ef57..55f0160 100644
24464 --- a/arch/x86/mm/ioremap.c
24465 +++ b/arch/x86/mm/ioremap.c
24466 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
24467 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
24468 int is_ram = page_is_ram(pfn);
24469
24470 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
24471 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
24472 return NULL;
24473 WARN_ON_ONCE(is_ram);
24474 }
24475 @@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
24476
24477 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
24478 if (page_is_ram(start >> PAGE_SHIFT))
24479 +#ifdef CONFIG_HIGHMEM
24480 + if ((start >> PAGE_SHIFT) < max_low_pfn)
24481 +#endif
24482 return __va(phys);
24483
24484 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
24485 @@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
24486 early_param("early_ioremap_debug", early_ioremap_debug_setup);
24487
24488 static __initdata int after_paging_init;
24489 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
24490 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
24491
24492 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
24493 {
24494 @@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
24495 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
24496
24497 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
24498 - memset(bm_pte, 0, sizeof(bm_pte));
24499 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
24500 + pmd_populate_user(&init_mm, pmd, bm_pte);
24501
24502 /*
24503 * The boot-ioremap range spans multiple pmds, for which
24504 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
24505 index d87dd6d..bf3fa66 100644
24506 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
24507 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
24508 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
24509 * memory (e.g. tracked pages)? For now, we need this to avoid
24510 * invoking kmemcheck for PnP BIOS calls.
24511 */
24512 - if (regs->flags & X86_VM_MASK)
24513 + if (v8086_mode(regs))
24514 return false;
24515 - if (regs->cs != __KERNEL_CS)
24516 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
24517 return false;
24518
24519 pte = kmemcheck_pte_lookup(address);
24520 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
24521 index 845df68..1d8d29f 100644
24522 --- a/arch/x86/mm/mmap.c
24523 +++ b/arch/x86/mm/mmap.c
24524 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
24525 * Leave an at least ~128 MB hole with possible stack randomization.
24526 */
24527 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
24528 -#define MAX_GAP (TASK_SIZE/6*5)
24529 +#define MAX_GAP (pax_task_size/6*5)
24530
24531 static int mmap_is_legacy(void)
24532 {
24533 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
24534 return rnd << PAGE_SHIFT;
24535 }
24536
24537 -static unsigned long mmap_base(void)
24538 +static unsigned long mmap_base(struct mm_struct *mm)
24539 {
24540 unsigned long gap = rlimit(RLIMIT_STACK);
24541 + unsigned long pax_task_size = TASK_SIZE;
24542 +
24543 +#ifdef CONFIG_PAX_SEGMEXEC
24544 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24545 + pax_task_size = SEGMEXEC_TASK_SIZE;
24546 +#endif
24547
24548 if (gap < MIN_GAP)
24549 gap = MIN_GAP;
24550 else if (gap > MAX_GAP)
24551 gap = MAX_GAP;
24552
24553 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
24554 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
24555 }
24556
24557 /*
24558 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
24559 * does, but not when emulating X86_32
24560 */
24561 -static unsigned long mmap_legacy_base(void)
24562 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
24563 {
24564 - if (mmap_is_ia32())
24565 + if (mmap_is_ia32()) {
24566 +
24567 +#ifdef CONFIG_PAX_SEGMEXEC
24568 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24569 + return SEGMEXEC_TASK_UNMAPPED_BASE;
24570 + else
24571 +#endif
24572 +
24573 return TASK_UNMAPPED_BASE;
24574 - else
24575 + } else
24576 return TASK_UNMAPPED_BASE + mmap_rnd();
24577 }
24578
24579 @@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
24580 void arch_pick_mmap_layout(struct mm_struct *mm)
24581 {
24582 if (mmap_is_legacy()) {
24583 - mm->mmap_base = mmap_legacy_base();
24584 + mm->mmap_base = mmap_legacy_base(mm);
24585 +
24586 +#ifdef CONFIG_PAX_RANDMMAP
24587 + if (mm->pax_flags & MF_PAX_RANDMMAP)
24588 + mm->mmap_base += mm->delta_mmap;
24589 +#endif
24590 +
24591 mm->get_unmapped_area = arch_get_unmapped_area;
24592 mm->unmap_area = arch_unmap_area;
24593 } else {
24594 - mm->mmap_base = mmap_base();
24595 + mm->mmap_base = mmap_base(mm);
24596 +
24597 +#ifdef CONFIG_PAX_RANDMMAP
24598 + if (mm->pax_flags & MF_PAX_RANDMMAP)
24599 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
24600 +#endif
24601 +
24602 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
24603 mm->unmap_area = arch_unmap_area_topdown;
24604 }
24605 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
24606 index de54b9b..799051e 100644
24607 --- a/arch/x86/mm/mmio-mod.c
24608 +++ b/arch/x86/mm/mmio-mod.c
24609 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
24610 break;
24611 default:
24612 {
24613 - unsigned char *ip = (unsigned char *)instptr;
24614 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
24615 my_trace->opcode = MMIO_UNKNOWN_OP;
24616 my_trace->width = 0;
24617 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
24618 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
24619 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
24620 void __iomem *addr)
24621 {
24622 - static atomic_t next_id;
24623 + static atomic_unchecked_t next_id;
24624 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
24625 /* These are page-unaligned. */
24626 struct mmiotrace_map map = {
24627 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
24628 .private = trace
24629 },
24630 .phys = offset,
24631 - .id = atomic_inc_return(&next_id)
24632 + .id = atomic_inc_return_unchecked(&next_id)
24633 };
24634 map.map_id = trace->id;
24635
24636 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
24637 index b008656..773eac2 100644
24638 --- a/arch/x86/mm/pageattr-test.c
24639 +++ b/arch/x86/mm/pageattr-test.c
24640 @@ -36,7 +36,7 @@ enum {
24641
24642 static int pte_testbit(pte_t pte)
24643 {
24644 - return pte_flags(pte) & _PAGE_UNUSED1;
24645 + return pte_flags(pte) & _PAGE_CPA_TEST;
24646 }
24647
24648 struct split_state {
24649 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
24650 index f9e5267..77b1a40 100644
24651 --- a/arch/x86/mm/pageattr.c
24652 +++ b/arch/x86/mm/pageattr.c
24653 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
24654 */
24655 #ifdef CONFIG_PCI_BIOS
24656 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
24657 - pgprot_val(forbidden) |= _PAGE_NX;
24658 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
24659 #endif
24660
24661 /*
24662 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
24663 * Does not cover __inittext since that is gone later on. On
24664 * 64bit we do not enforce !NX on the low mapping
24665 */
24666 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
24667 - pgprot_val(forbidden) |= _PAGE_NX;
24668 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
24669 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
24670
24671 +#ifdef CONFIG_DEBUG_RODATA
24672 /*
24673 * The .rodata section needs to be read-only. Using the pfn
24674 * catches all aliases.
24675 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
24676 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
24677 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
24678 pgprot_val(forbidden) |= _PAGE_RW;
24679 +#endif
24680
24681 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
24682 /*
24683 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
24684 }
24685 #endif
24686
24687 +#ifdef CONFIG_PAX_KERNEXEC
24688 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
24689 + pgprot_val(forbidden) |= _PAGE_RW;
24690 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
24691 + }
24692 +#endif
24693 +
24694 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
24695
24696 return prot;
24697 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
24698 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
24699 {
24700 /* change init_mm */
24701 + pax_open_kernel();
24702 set_pte_atomic(kpte, pte);
24703 +
24704 #ifdef CONFIG_X86_32
24705 if (!SHARED_KERNEL_PMD) {
24706 +
24707 +#ifdef CONFIG_PAX_PER_CPU_PGD
24708 + unsigned long cpu;
24709 +#else
24710 struct page *page;
24711 +#endif
24712
24713 +#ifdef CONFIG_PAX_PER_CPU_PGD
24714 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24715 + pgd_t *pgd = get_cpu_pgd(cpu);
24716 +#else
24717 list_for_each_entry(page, &pgd_list, lru) {
24718 - pgd_t *pgd;
24719 + pgd_t *pgd = (pgd_t *)page_address(page);
24720 +#endif
24721 +
24722 pud_t *pud;
24723 pmd_t *pmd;
24724
24725 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
24726 + pgd += pgd_index(address);
24727 pud = pud_offset(pgd, address);
24728 pmd = pmd_offset(pud, address);
24729 set_pte_atomic((pte_t *)pmd, pte);
24730 }
24731 }
24732 #endif
24733 + pax_close_kernel();
24734 }
24735
24736 static int
24737 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
24738 index f6ff57b..481690f 100644
24739 --- a/arch/x86/mm/pat.c
24740 +++ b/arch/x86/mm/pat.c
24741 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
24742
24743 if (!entry) {
24744 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
24745 - current->comm, current->pid, start, end);
24746 + current->comm, task_pid_nr(current), start, end);
24747 return -EINVAL;
24748 }
24749
24750 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
24751 while (cursor < to) {
24752 if (!devmem_is_allowed(pfn)) {
24753 printk(KERN_INFO
24754 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
24755 - current->comm, from, to);
24756 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
24757 + current->comm, from, to, cursor);
24758 return 0;
24759 }
24760 cursor += PAGE_SIZE;
24761 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
24762 printk(KERN_INFO
24763 "%s:%d ioremap_change_attr failed %s "
24764 "for %Lx-%Lx\n",
24765 - current->comm, current->pid,
24766 + current->comm, task_pid_nr(current),
24767 cattr_name(flags),
24768 base, (unsigned long long)(base + size));
24769 return -EINVAL;
24770 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
24771 if (want_flags != flags) {
24772 printk(KERN_WARNING
24773 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
24774 - current->comm, current->pid,
24775 + current->comm, task_pid_nr(current),
24776 cattr_name(want_flags),
24777 (unsigned long long)paddr,
24778 (unsigned long long)(paddr + size),
24779 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
24780 free_memtype(paddr, paddr + size);
24781 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
24782 " for %Lx-%Lx, got %s\n",
24783 - current->comm, current->pid,
24784 + current->comm, task_pid_nr(current),
24785 cattr_name(want_flags),
24786 (unsigned long long)paddr,
24787 (unsigned long long)(paddr + size),
24788 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
24789 index 9f0614d..92ae64a 100644
24790 --- a/arch/x86/mm/pf_in.c
24791 +++ b/arch/x86/mm/pf_in.c
24792 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
24793 int i;
24794 enum reason_type rv = OTHERS;
24795
24796 - p = (unsigned char *)ins_addr;
24797 + p = (unsigned char *)ktla_ktva(ins_addr);
24798 p += skip_prefix(p, &prf);
24799 p += get_opcode(p, &opcode);
24800
24801 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
24802 struct prefix_bits prf;
24803 int i;
24804
24805 - p = (unsigned char *)ins_addr;
24806 + p = (unsigned char *)ktla_ktva(ins_addr);
24807 p += skip_prefix(p, &prf);
24808 p += get_opcode(p, &opcode);
24809
24810 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
24811 struct prefix_bits prf;
24812 int i;
24813
24814 - p = (unsigned char *)ins_addr;
24815 + p = (unsigned char *)ktla_ktva(ins_addr);
24816 p += skip_prefix(p, &prf);
24817 p += get_opcode(p, &opcode);
24818
24819 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
24820 struct prefix_bits prf;
24821 int i;
24822
24823 - p = (unsigned char *)ins_addr;
24824 + p = (unsigned char *)ktla_ktva(ins_addr);
24825 p += skip_prefix(p, &prf);
24826 p += get_opcode(p, &opcode);
24827 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
24828 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
24829 struct prefix_bits prf;
24830 int i;
24831
24832 - p = (unsigned char *)ins_addr;
24833 + p = (unsigned char *)ktla_ktva(ins_addr);
24834 p += skip_prefix(p, &prf);
24835 p += get_opcode(p, &opcode);
24836 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
24837 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
24838 index 8573b83..c3b1a30 100644
24839 --- a/arch/x86/mm/pgtable.c
24840 +++ b/arch/x86/mm/pgtable.c
24841 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd)
24842 list_del(&page->lru);
24843 }
24844
24845 -#define UNSHARED_PTRS_PER_PGD \
24846 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
24847 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24848 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
24849
24850 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
24851 +{
24852 + while (count--)
24853 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
24854 +}
24855 +#endif
24856
24857 +#ifdef CONFIG_PAX_PER_CPU_PGD
24858 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
24859 +{
24860 + while (count--)
24861 +
24862 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24863 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
24864 +#else
24865 + *dst++ = *src++;
24866 +#endif
24867 +
24868 +}
24869 +#endif
24870 +
24871 +#ifdef CONFIG_X86_64
24872 +#define pxd_t pud_t
24873 +#define pyd_t pgd_t
24874 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
24875 +#define pxd_free(mm, pud) pud_free((mm), (pud))
24876 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
24877 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
24878 +#define PYD_SIZE PGDIR_SIZE
24879 +#else
24880 +#define pxd_t pmd_t
24881 +#define pyd_t pud_t
24882 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
24883 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
24884 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
24885 +#define pyd_offset(mm, address) pud_offset((mm), (address))
24886 +#define PYD_SIZE PUD_SIZE
24887 +#endif
24888 +
24889 +#ifdef CONFIG_PAX_PER_CPU_PGD
24890 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
24891 +static inline void pgd_dtor(pgd_t *pgd) {}
24892 +#else
24893 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
24894 {
24895 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
24896 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
24897 pgd_list_del(pgd);
24898 spin_unlock(&pgd_lock);
24899 }
24900 +#endif
24901
24902 /*
24903 * List of all pgd's needed for non-PAE so it can invalidate entries
24904 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
24905 * -- wli
24906 */
24907
24908 -#ifdef CONFIG_X86_PAE
24909 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
24910 /*
24911 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
24912 * updating the top-level pagetable entries to guarantee the
24913 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
24914 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
24915 * and initialize the kernel pmds here.
24916 */
24917 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
24918 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
24919
24920 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
24921 {
24922 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
24923 */
24924 flush_tlb_mm(mm);
24925 }
24926 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
24927 +#define PREALLOCATED_PXDS USER_PGD_PTRS
24928 #else /* !CONFIG_X86_PAE */
24929
24930 /* No need to prepopulate any pagetable entries in non-PAE modes. */
24931 -#define PREALLOCATED_PMDS 0
24932 +#define PREALLOCATED_PXDS 0
24933
24934 #endif /* CONFIG_X86_PAE */
24935
24936 -static void free_pmds(pmd_t *pmds[])
24937 +static void free_pxds(pxd_t *pxds[])
24938 {
24939 int i;
24940
24941 - for(i = 0; i < PREALLOCATED_PMDS; i++)
24942 - if (pmds[i])
24943 - free_page((unsigned long)pmds[i]);
24944 + for(i = 0; i < PREALLOCATED_PXDS; i++)
24945 + if (pxds[i])
24946 + free_page((unsigned long)pxds[i]);
24947 }
24948
24949 -static int preallocate_pmds(pmd_t *pmds[])
24950 +static int preallocate_pxds(pxd_t *pxds[])
24951 {
24952 int i;
24953 bool failed = false;
24954
24955 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
24956 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
24957 - if (pmd == NULL)
24958 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
24959 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
24960 + if (pxd == NULL)
24961 failed = true;
24962 - pmds[i] = pmd;
24963 + pxds[i] = pxd;
24964 }
24965
24966 if (failed) {
24967 - free_pmds(pmds);
24968 + free_pxds(pxds);
24969 return -ENOMEM;
24970 }
24971
24972 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[])
24973 * preallocate which never got a corresponding vma will need to be
24974 * freed manually.
24975 */
24976 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
24977 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
24978 {
24979 int i;
24980
24981 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
24982 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
24983 pgd_t pgd = pgdp[i];
24984
24985 if (pgd_val(pgd) != 0) {
24986 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
24987 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
24988
24989 - pgdp[i] = native_make_pgd(0);
24990 + set_pgd(pgdp + i, native_make_pgd(0));
24991
24992 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
24993 - pmd_free(mm, pmd);
24994 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
24995 + pxd_free(mm, pxd);
24996 }
24997 }
24998 }
24999
25000 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25001 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25002 {
25003 - pud_t *pud;
25004 + pyd_t *pyd;
25005 unsigned long addr;
25006 int i;
25007
25008 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25009 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25010 return;
25011
25012 - pud = pud_offset(pgd, 0);
25013 +#ifdef CONFIG_X86_64
25014 + pyd = pyd_offset(mm, 0L);
25015 +#else
25016 + pyd = pyd_offset(pgd, 0L);
25017 +#endif
25018
25019 - for (addr = i = 0; i < PREALLOCATED_PMDS;
25020 - i++, pud++, addr += PUD_SIZE) {
25021 - pmd_t *pmd = pmds[i];
25022 + for (addr = i = 0; i < PREALLOCATED_PXDS;
25023 + i++, pyd++, addr += PYD_SIZE) {
25024 + pxd_t *pxd = pxds[i];
25025
25026 if (i >= KERNEL_PGD_BOUNDARY)
25027 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25028 - sizeof(pmd_t) * PTRS_PER_PMD);
25029 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25030 + sizeof(pxd_t) * PTRS_PER_PMD);
25031
25032 - pud_populate(mm, pud, pmd);
25033 + pyd_populate(mm, pyd, pxd);
25034 }
25035 }
25036
25037 pgd_t *pgd_alloc(struct mm_struct *mm)
25038 {
25039 pgd_t *pgd;
25040 - pmd_t *pmds[PREALLOCATED_PMDS];
25041 + pxd_t *pxds[PREALLOCATED_PXDS];
25042
25043 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25044
25045 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25046
25047 mm->pgd = pgd;
25048
25049 - if (preallocate_pmds(pmds) != 0)
25050 + if (preallocate_pxds(pxds) != 0)
25051 goto out_free_pgd;
25052
25053 if (paravirt_pgd_alloc(mm) != 0)
25054 - goto out_free_pmds;
25055 + goto out_free_pxds;
25056
25057 /*
25058 * Make sure that pre-populating the pmds is atomic with
25059 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25060 spin_lock(&pgd_lock);
25061
25062 pgd_ctor(mm, pgd);
25063 - pgd_prepopulate_pmd(mm, pgd, pmds);
25064 + pgd_prepopulate_pxd(mm, pgd, pxds);
25065
25066 spin_unlock(&pgd_lock);
25067
25068 return pgd;
25069
25070 -out_free_pmds:
25071 - free_pmds(pmds);
25072 +out_free_pxds:
25073 + free_pxds(pxds);
25074 out_free_pgd:
25075 free_page((unsigned long)pgd);
25076 out:
25077 @@ -295,7 +344,7 @@ out:
25078
25079 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25080 {
25081 - pgd_mop_up_pmds(mm, pgd);
25082 + pgd_mop_up_pxds(mm, pgd);
25083 pgd_dtor(pgd);
25084 paravirt_pgd_free(mm, pgd);
25085 free_page((unsigned long)pgd);
25086 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25087 index cac7184..09a39fa 100644
25088 --- a/arch/x86/mm/pgtable_32.c
25089 +++ b/arch/x86/mm/pgtable_32.c
25090 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25091 return;
25092 }
25093 pte = pte_offset_kernel(pmd, vaddr);
25094 +
25095 + pax_open_kernel();
25096 if (pte_val(pteval))
25097 set_pte_at(&init_mm, vaddr, pte, pteval);
25098 else
25099 pte_clear(&init_mm, vaddr, pte);
25100 + pax_close_kernel();
25101
25102 /*
25103 * It's enough to flush this one mapping.
25104 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25105 index 410531d..0f16030 100644
25106 --- a/arch/x86/mm/setup_nx.c
25107 +++ b/arch/x86/mm/setup_nx.c
25108 @@ -5,8 +5,10 @@
25109 #include <asm/pgtable.h>
25110 #include <asm/proto.h>
25111
25112 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25113 static int disable_nx __cpuinitdata;
25114
25115 +#ifndef CONFIG_PAX_PAGEEXEC
25116 /*
25117 * noexec = on|off
25118 *
25119 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
25120 return 0;
25121 }
25122 early_param("noexec", noexec_setup);
25123 +#endif
25124 +
25125 +#endif
25126
25127 void __cpuinit x86_configure_nx(void)
25128 {
25129 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25130 if (cpu_has_nx && !disable_nx)
25131 __supported_pte_mask |= _PAGE_NX;
25132 else
25133 +#endif
25134 __supported_pte_mask &= ~_PAGE_NX;
25135 }
25136
25137 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
25138 index d6c0418..06a0ad5 100644
25139 --- a/arch/x86/mm/tlb.c
25140 +++ b/arch/x86/mm/tlb.c
25141 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
25142 BUG();
25143 cpumask_clear_cpu(cpu,
25144 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
25145 +
25146 +#ifndef CONFIG_PAX_PER_CPU_PGD
25147 load_cr3(swapper_pg_dir);
25148 +#endif
25149 +
25150 }
25151 EXPORT_SYMBOL_GPL(leave_mm);
25152
25153 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
25154 index 6687022..ceabcfa 100644
25155 --- a/arch/x86/net/bpf_jit.S
25156 +++ b/arch/x86/net/bpf_jit.S
25157 @@ -9,6 +9,7 @@
25158 */
25159 #include <linux/linkage.h>
25160 #include <asm/dwarf2.h>
25161 +#include <asm/alternative-asm.h>
25162
25163 /*
25164 * Calling convention :
25165 @@ -35,6 +36,7 @@ sk_load_word:
25166 jle bpf_slow_path_word
25167 mov (SKBDATA,%rsi),%eax
25168 bswap %eax /* ntohl() */
25169 + pax_force_retaddr
25170 ret
25171
25172
25173 @@ -53,6 +55,7 @@ sk_load_half:
25174 jle bpf_slow_path_half
25175 movzwl (SKBDATA,%rsi),%eax
25176 rol $8,%ax # ntohs()
25177 + pax_force_retaddr
25178 ret
25179
25180 sk_load_byte_ind:
25181 @@ -66,6 +69,7 @@ sk_load_byte:
25182 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
25183 jle bpf_slow_path_byte
25184 movzbl (SKBDATA,%rsi),%eax
25185 + pax_force_retaddr
25186 ret
25187
25188 /**
25189 @@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
25190 movzbl (SKBDATA,%rsi),%ebx
25191 and $15,%bl
25192 shl $2,%bl
25193 + pax_force_retaddr
25194 ret
25195 CFI_ENDPROC
25196 ENDPROC(sk_load_byte_msh)
25197 @@ -91,6 +96,7 @@ bpf_error:
25198 xor %eax,%eax
25199 mov -8(%rbp),%rbx
25200 leaveq
25201 + pax_force_retaddr
25202 ret
25203
25204 /* rsi contains offset and can be scratched */
25205 @@ -113,6 +119,7 @@ bpf_slow_path_word:
25206 js bpf_error
25207 mov -12(%rbp),%eax
25208 bswap %eax
25209 + pax_force_retaddr
25210 ret
25211
25212 bpf_slow_path_half:
25213 @@ -121,12 +128,14 @@ bpf_slow_path_half:
25214 mov -12(%rbp),%ax
25215 rol $8,%ax
25216 movzwl %ax,%eax
25217 + pax_force_retaddr
25218 ret
25219
25220 bpf_slow_path_byte:
25221 bpf_slow_path_common(1)
25222 js bpf_error
25223 movzbl -12(%rbp),%eax
25224 + pax_force_retaddr
25225 ret
25226
25227 bpf_slow_path_byte_msh:
25228 @@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
25229 and $15,%al
25230 shl $2,%al
25231 xchg %eax,%ebx
25232 + pax_force_retaddr
25233 ret
25234 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
25235 index 7c1b765..8c072c6 100644
25236 --- a/arch/x86/net/bpf_jit_comp.c
25237 +++ b/arch/x86/net/bpf_jit_comp.c
25238 @@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
25239 set_fs(old_fs);
25240 }
25241
25242 +struct bpf_jit_work {
25243 + struct work_struct work;
25244 + void *image;
25245 +};
25246
25247 void bpf_jit_compile(struct sk_filter *fp)
25248 {
25249 @@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
25250 if (addrs == NULL)
25251 return;
25252
25253 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
25254 + if (!fp->work)
25255 + goto out;
25256 +
25257 /* Before first pass, make a rough estimation of addrs[]
25258 * each bpf instruction is translated to less than 64 bytes
25259 */
25260 @@ -476,7 +484,7 @@ void bpf_jit_compile(struct sk_filter *fp)
25261 func = sk_load_word;
25262 common_load: seen |= SEEN_DATAREF;
25263 if ((int)K < 0)
25264 - goto out;
25265 + goto error;
25266 t_offset = func - (image + addrs[i]);
25267 EMIT1_off32(0xbe, K); /* mov imm32,%esi */
25268 EMIT1_off32(0xe8, t_offset); /* call */
25269 @@ -586,17 +594,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25270 break;
25271 default:
25272 /* hmm, too complex filter, give up with jit compiler */
25273 - goto out;
25274 + goto error;
25275 }
25276 ilen = prog - temp;
25277 if (image) {
25278 if (unlikely(proglen + ilen > oldproglen)) {
25279 pr_err("bpb_jit_compile fatal error\n");
25280 - kfree(addrs);
25281 - module_free(NULL, image);
25282 - return;
25283 + module_free_exec(NULL, image);
25284 + goto error;
25285 }
25286 + pax_open_kernel();
25287 memcpy(image + proglen, temp, ilen);
25288 + pax_close_kernel();
25289 }
25290 proglen += ilen;
25291 addrs[i] = proglen;
25292 @@ -617,11 +626,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25293 break;
25294 }
25295 if (proglen == oldproglen) {
25296 - image = module_alloc(max_t(unsigned int,
25297 - proglen,
25298 - sizeof(struct work_struct)));
25299 + image = module_alloc_exec(proglen);
25300 if (!image)
25301 - goto out;
25302 + goto error;
25303 }
25304 oldproglen = proglen;
25305 }
25306 @@ -637,7 +644,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25307 bpf_flush_icache(image, image + proglen);
25308
25309 fp->bpf_func = (void *)image;
25310 - }
25311 + } else
25312 +error:
25313 + kfree(fp->work);
25314 +
25315 out:
25316 kfree(addrs);
25317 return;
25318 @@ -645,18 +655,20 @@ out:
25319
25320 static void jit_free_defer(struct work_struct *arg)
25321 {
25322 - module_free(NULL, arg);
25323 + module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
25324 + kfree(arg);
25325 }
25326
25327 /* run from softirq, we must use a work_struct to call
25328 - * module_free() from process context
25329 + * module_free_exec() from process context
25330 */
25331 void bpf_jit_free(struct sk_filter *fp)
25332 {
25333 if (fp->bpf_func != sk_run_filter) {
25334 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
25335 + struct work_struct *work = &fp->work->work;
25336
25337 INIT_WORK(work, jit_free_defer);
25338 + fp->work->image = fp->bpf_func;
25339 schedule_work(work);
25340 }
25341 }
25342 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
25343 index bff89df..377758a 100644
25344 --- a/arch/x86/oprofile/backtrace.c
25345 +++ b/arch/x86/oprofile/backtrace.c
25346 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
25347 struct stack_frame_ia32 *fp;
25348 unsigned long bytes;
25349
25350 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
25351 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
25352 if (bytes != sizeof(bufhead))
25353 return NULL;
25354
25355 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
25356 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
25357
25358 oprofile_add_trace(bufhead[0].return_address);
25359
25360 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
25361 struct stack_frame bufhead[2];
25362 unsigned long bytes;
25363
25364 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
25365 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
25366 if (bytes != sizeof(bufhead))
25367 return NULL;
25368
25369 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
25370 {
25371 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
25372
25373 - if (!user_mode_vm(regs)) {
25374 + if (!user_mode(regs)) {
25375 unsigned long stack = kernel_stack_pointer(regs);
25376 if (depth)
25377 dump_trace(NULL, regs, (unsigned long *)stack, 0,
25378 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
25379 index cb29191..036766d 100644
25380 --- a/arch/x86/pci/mrst.c
25381 +++ b/arch/x86/pci/mrst.c
25382 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
25383 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
25384 pci_mmcfg_late_init();
25385 pcibios_enable_irq = mrst_pci_irq_enable;
25386 - pci_root_ops = pci_mrst_ops;
25387 + pax_open_kernel();
25388 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
25389 + pax_close_kernel();
25390 /* Continue with standard init */
25391 return 1;
25392 }
25393 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
25394 index db0e9a5..0372c14 100644
25395 --- a/arch/x86/pci/pcbios.c
25396 +++ b/arch/x86/pci/pcbios.c
25397 @@ -79,50 +79,93 @@ union bios32 {
25398 static struct {
25399 unsigned long address;
25400 unsigned short segment;
25401 -} bios32_indirect = { 0, __KERNEL_CS };
25402 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
25403
25404 /*
25405 * Returns the entry point for the given service, NULL on error
25406 */
25407
25408 -static unsigned long bios32_service(unsigned long service)
25409 +static unsigned long __devinit bios32_service(unsigned long service)
25410 {
25411 unsigned char return_code; /* %al */
25412 unsigned long address; /* %ebx */
25413 unsigned long length; /* %ecx */
25414 unsigned long entry; /* %edx */
25415 unsigned long flags;
25416 + struct desc_struct d, *gdt;
25417
25418 local_irq_save(flags);
25419 - __asm__("lcall *(%%edi); cld"
25420 +
25421 + gdt = get_cpu_gdt_table(smp_processor_id());
25422 +
25423 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
25424 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
25425 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
25426 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
25427 +
25428 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
25429 : "=a" (return_code),
25430 "=b" (address),
25431 "=c" (length),
25432 "=d" (entry)
25433 : "0" (service),
25434 "1" (0),
25435 - "D" (&bios32_indirect));
25436 + "D" (&bios32_indirect),
25437 + "r"(__PCIBIOS_DS)
25438 + : "memory");
25439 +
25440 + pax_open_kernel();
25441 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
25442 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
25443 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
25444 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
25445 + pax_close_kernel();
25446 +
25447 local_irq_restore(flags);
25448
25449 switch (return_code) {
25450 - case 0:
25451 - return address + entry;
25452 - case 0x80: /* Not present */
25453 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
25454 - return 0;
25455 - default: /* Shouldn't happen */
25456 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
25457 - service, return_code);
25458 + case 0: {
25459 + int cpu;
25460 + unsigned char flags;
25461 +
25462 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
25463 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
25464 + printk(KERN_WARNING "bios32_service: not valid\n");
25465 return 0;
25466 + }
25467 + address = address + PAGE_OFFSET;
25468 + length += 16UL; /* some BIOSs underreport this... */
25469 + flags = 4;
25470 + if (length >= 64*1024*1024) {
25471 + length >>= PAGE_SHIFT;
25472 + flags |= 8;
25473 + }
25474 +
25475 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
25476 + gdt = get_cpu_gdt_table(cpu);
25477 + pack_descriptor(&d, address, length, 0x9b, flags);
25478 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
25479 + pack_descriptor(&d, address, length, 0x93, flags);
25480 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
25481 + }
25482 + return entry;
25483 + }
25484 + case 0x80: /* Not present */
25485 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
25486 + return 0;
25487 + default: /* Shouldn't happen */
25488 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
25489 + service, return_code);
25490 + return 0;
25491 }
25492 }
25493
25494 static struct {
25495 unsigned long address;
25496 unsigned short segment;
25497 -} pci_indirect = { 0, __KERNEL_CS };
25498 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
25499
25500 -static int pci_bios_present;
25501 +static int pci_bios_present __read_only;
25502
25503 static int __devinit check_pcibios(void)
25504 {
25505 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
25506 unsigned long flags, pcibios_entry;
25507
25508 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
25509 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
25510 + pci_indirect.address = pcibios_entry;
25511
25512 local_irq_save(flags);
25513 - __asm__(
25514 - "lcall *(%%edi); cld\n\t"
25515 + __asm__("movw %w6, %%ds\n\t"
25516 + "lcall *%%ss:(%%edi); cld\n\t"
25517 + "push %%ss\n\t"
25518 + "pop %%ds\n\t"
25519 "jc 1f\n\t"
25520 "xor %%ah, %%ah\n"
25521 "1:"
25522 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
25523 "=b" (ebx),
25524 "=c" (ecx)
25525 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
25526 - "D" (&pci_indirect)
25527 + "D" (&pci_indirect),
25528 + "r" (__PCIBIOS_DS)
25529 : "memory");
25530 local_irq_restore(flags);
25531
25532 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25533
25534 switch (len) {
25535 case 1:
25536 - __asm__("lcall *(%%esi); cld\n\t"
25537 + __asm__("movw %w6, %%ds\n\t"
25538 + "lcall *%%ss:(%%esi); cld\n\t"
25539 + "push %%ss\n\t"
25540 + "pop %%ds\n\t"
25541 "jc 1f\n\t"
25542 "xor %%ah, %%ah\n"
25543 "1:"
25544 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25545 : "1" (PCIBIOS_READ_CONFIG_BYTE),
25546 "b" (bx),
25547 "D" ((long)reg),
25548 - "S" (&pci_indirect));
25549 + "S" (&pci_indirect),
25550 + "r" (__PCIBIOS_DS));
25551 /*
25552 * Zero-extend the result beyond 8 bits, do not trust the
25553 * BIOS having done it:
25554 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25555 *value &= 0xff;
25556 break;
25557 case 2:
25558 - __asm__("lcall *(%%esi); cld\n\t"
25559 + __asm__("movw %w6, %%ds\n\t"
25560 + "lcall *%%ss:(%%esi); cld\n\t"
25561 + "push %%ss\n\t"
25562 + "pop %%ds\n\t"
25563 "jc 1f\n\t"
25564 "xor %%ah, %%ah\n"
25565 "1:"
25566 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25567 : "1" (PCIBIOS_READ_CONFIG_WORD),
25568 "b" (bx),
25569 "D" ((long)reg),
25570 - "S" (&pci_indirect));
25571 + "S" (&pci_indirect),
25572 + "r" (__PCIBIOS_DS));
25573 /*
25574 * Zero-extend the result beyond 16 bits, do not trust the
25575 * BIOS having done it:
25576 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25577 *value &= 0xffff;
25578 break;
25579 case 4:
25580 - __asm__("lcall *(%%esi); cld\n\t"
25581 + __asm__("movw %w6, %%ds\n\t"
25582 + "lcall *%%ss:(%%esi); cld\n\t"
25583 + "push %%ss\n\t"
25584 + "pop %%ds\n\t"
25585 "jc 1f\n\t"
25586 "xor %%ah, %%ah\n"
25587 "1:"
25588 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25589 : "1" (PCIBIOS_READ_CONFIG_DWORD),
25590 "b" (bx),
25591 "D" ((long)reg),
25592 - "S" (&pci_indirect));
25593 + "S" (&pci_indirect),
25594 + "r" (__PCIBIOS_DS));
25595 break;
25596 }
25597
25598 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
25599
25600 switch (len) {
25601 case 1:
25602 - __asm__("lcall *(%%esi); cld\n\t"
25603 + __asm__("movw %w6, %%ds\n\t"
25604 + "lcall *%%ss:(%%esi); cld\n\t"
25605 + "push %%ss\n\t"
25606 + "pop %%ds\n\t"
25607 "jc 1f\n\t"
25608 "xor %%ah, %%ah\n"
25609 "1:"
25610 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
25611 "c" (value),
25612 "b" (bx),
25613 "D" ((long)reg),
25614 - "S" (&pci_indirect));
25615 + "S" (&pci_indirect),
25616 + "r" (__PCIBIOS_DS));
25617 break;
25618 case 2:
25619 - __asm__("lcall *(%%esi); cld\n\t"
25620 + __asm__("movw %w6, %%ds\n\t"
25621 + "lcall *%%ss:(%%esi); cld\n\t"
25622 + "push %%ss\n\t"
25623 + "pop %%ds\n\t"
25624 "jc 1f\n\t"
25625 "xor %%ah, %%ah\n"
25626 "1:"
25627 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
25628 "c" (value),
25629 "b" (bx),
25630 "D" ((long)reg),
25631 - "S" (&pci_indirect));
25632 + "S" (&pci_indirect),
25633 + "r" (__PCIBIOS_DS));
25634 break;
25635 case 4:
25636 - __asm__("lcall *(%%esi); cld\n\t"
25637 + __asm__("movw %w6, %%ds\n\t"
25638 + "lcall *%%ss:(%%esi); cld\n\t"
25639 + "push %%ss\n\t"
25640 + "pop %%ds\n\t"
25641 "jc 1f\n\t"
25642 "xor %%ah, %%ah\n"
25643 "1:"
25644 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
25645 "c" (value),
25646 "b" (bx),
25647 "D" ((long)reg),
25648 - "S" (&pci_indirect));
25649 + "S" (&pci_indirect),
25650 + "r" (__PCIBIOS_DS));
25651 break;
25652 }
25653
25654 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
25655
25656 DBG("PCI: Fetching IRQ routing table... ");
25657 __asm__("push %%es\n\t"
25658 + "movw %w8, %%ds\n\t"
25659 "push %%ds\n\t"
25660 "pop %%es\n\t"
25661 - "lcall *(%%esi); cld\n\t"
25662 + "lcall *%%ss:(%%esi); cld\n\t"
25663 "pop %%es\n\t"
25664 + "push %%ss\n\t"
25665 + "pop %%ds\n"
25666 "jc 1f\n\t"
25667 "xor %%ah, %%ah\n"
25668 "1:"
25669 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
25670 "1" (0),
25671 "D" ((long) &opt),
25672 "S" (&pci_indirect),
25673 - "m" (opt)
25674 + "m" (opt),
25675 + "r" (__PCIBIOS_DS)
25676 : "memory");
25677 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
25678 if (ret & 0xff00)
25679 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
25680 {
25681 int ret;
25682
25683 - __asm__("lcall *(%%esi); cld\n\t"
25684 + __asm__("movw %w5, %%ds\n\t"
25685 + "lcall *%%ss:(%%esi); cld\n\t"
25686 + "push %%ss\n\t"
25687 + "pop %%ds\n"
25688 "jc 1f\n\t"
25689 "xor %%ah, %%ah\n"
25690 "1:"
25691 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
25692 : "0" (PCIBIOS_SET_PCI_HW_INT),
25693 "b" ((dev->bus->number << 8) | dev->devfn),
25694 "c" ((irq << 8) | (pin + 10)),
25695 - "S" (&pci_indirect));
25696 + "S" (&pci_indirect),
25697 + "r" (__PCIBIOS_DS));
25698 return !(ret & 0xff00);
25699 }
25700 EXPORT_SYMBOL(pcibios_set_irq_routing);
25701 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
25702 index 40e4469..1ab536e 100644
25703 --- a/arch/x86/platform/efi/efi_32.c
25704 +++ b/arch/x86/platform/efi/efi_32.c
25705 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
25706 {
25707 struct desc_ptr gdt_descr;
25708
25709 +#ifdef CONFIG_PAX_KERNEXEC
25710 + struct desc_struct d;
25711 +#endif
25712 +
25713 local_irq_save(efi_rt_eflags);
25714
25715 load_cr3(initial_page_table);
25716 __flush_tlb_all();
25717
25718 +#ifdef CONFIG_PAX_KERNEXEC
25719 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
25720 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
25721 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
25722 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
25723 +#endif
25724 +
25725 gdt_descr.address = __pa(get_cpu_gdt_table(0));
25726 gdt_descr.size = GDT_SIZE - 1;
25727 load_gdt(&gdt_descr);
25728 @@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
25729 {
25730 struct desc_ptr gdt_descr;
25731
25732 +#ifdef CONFIG_PAX_KERNEXEC
25733 + struct desc_struct d;
25734 +
25735 + memset(&d, 0, sizeof d);
25736 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
25737 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
25738 +#endif
25739 +
25740 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
25741 gdt_descr.size = GDT_SIZE - 1;
25742 load_gdt(&gdt_descr);
25743 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
25744 index fbe66e6..c5c0dd2 100644
25745 --- a/arch/x86/platform/efi/efi_stub_32.S
25746 +++ b/arch/x86/platform/efi/efi_stub_32.S
25747 @@ -6,7 +6,9 @@
25748 */
25749
25750 #include <linux/linkage.h>
25751 +#include <linux/init.h>
25752 #include <asm/page_types.h>
25753 +#include <asm/segment.h>
25754
25755 /*
25756 * efi_call_phys(void *, ...) is a function with variable parameters.
25757 @@ -20,7 +22,7 @@
25758 * service functions will comply with gcc calling convention, too.
25759 */
25760
25761 -.text
25762 +__INIT
25763 ENTRY(efi_call_phys)
25764 /*
25765 * 0. The function can only be called in Linux kernel. So CS has been
25766 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
25767 * The mapping of lower virtual memory has been created in prelog and
25768 * epilog.
25769 */
25770 - movl $1f, %edx
25771 - subl $__PAGE_OFFSET, %edx
25772 - jmp *%edx
25773 + movl $(__KERNEXEC_EFI_DS), %edx
25774 + mov %edx, %ds
25775 + mov %edx, %es
25776 + mov %edx, %ss
25777 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
25778 1:
25779
25780 /*
25781 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
25782 * parameter 2, ..., param n. To make things easy, we save the return
25783 * address of efi_call_phys in a global variable.
25784 */
25785 - popl %edx
25786 - movl %edx, saved_return_addr
25787 - /* get the function pointer into ECX*/
25788 - popl %ecx
25789 - movl %ecx, efi_rt_function_ptr
25790 - movl $2f, %edx
25791 - subl $__PAGE_OFFSET, %edx
25792 - pushl %edx
25793 + popl (saved_return_addr)
25794 + popl (efi_rt_function_ptr)
25795
25796 /*
25797 * 3. Clear PG bit in %CR0.
25798 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
25799 /*
25800 * 5. Call the physical function.
25801 */
25802 - jmp *%ecx
25803 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
25804
25805 -2:
25806 /*
25807 * 6. After EFI runtime service returns, control will return to
25808 * following instruction. We'd better readjust stack pointer first.
25809 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
25810 movl %cr0, %edx
25811 orl $0x80000000, %edx
25812 movl %edx, %cr0
25813 - jmp 1f
25814 -1:
25815 +
25816 /*
25817 * 8. Now restore the virtual mode from flat mode by
25818 * adding EIP with PAGE_OFFSET.
25819 */
25820 - movl $1f, %edx
25821 - jmp *%edx
25822 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
25823 1:
25824 + movl $(__KERNEL_DS), %edx
25825 + mov %edx, %ds
25826 + mov %edx, %es
25827 + mov %edx, %ss
25828
25829 /*
25830 * 9. Balance the stack. And because EAX contain the return value,
25831 * we'd better not clobber it.
25832 */
25833 - leal efi_rt_function_ptr, %edx
25834 - movl (%edx), %ecx
25835 - pushl %ecx
25836 + pushl (efi_rt_function_ptr)
25837
25838 /*
25839 - * 10. Push the saved return address onto the stack and return.
25840 + * 10. Return to the saved return address.
25841 */
25842 - leal saved_return_addr, %edx
25843 - movl (%edx), %ecx
25844 - pushl %ecx
25845 - ret
25846 + jmpl *(saved_return_addr)
25847 ENDPROC(efi_call_phys)
25848 .previous
25849
25850 -.data
25851 +__INITDATA
25852 saved_return_addr:
25853 .long 0
25854 efi_rt_function_ptr:
25855 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
25856 index 4c07cca..2c8427d 100644
25857 --- a/arch/x86/platform/efi/efi_stub_64.S
25858 +++ b/arch/x86/platform/efi/efi_stub_64.S
25859 @@ -7,6 +7,7 @@
25860 */
25861
25862 #include <linux/linkage.h>
25863 +#include <asm/alternative-asm.h>
25864
25865 #define SAVE_XMM \
25866 mov %rsp, %rax; \
25867 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
25868 call *%rdi
25869 addq $32, %rsp
25870 RESTORE_XMM
25871 + pax_force_retaddr 0, 1
25872 ret
25873 ENDPROC(efi_call0)
25874
25875 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
25876 call *%rdi
25877 addq $32, %rsp
25878 RESTORE_XMM
25879 + pax_force_retaddr 0, 1
25880 ret
25881 ENDPROC(efi_call1)
25882
25883 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
25884 call *%rdi
25885 addq $32, %rsp
25886 RESTORE_XMM
25887 + pax_force_retaddr 0, 1
25888 ret
25889 ENDPROC(efi_call2)
25890
25891 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
25892 call *%rdi
25893 addq $32, %rsp
25894 RESTORE_XMM
25895 + pax_force_retaddr 0, 1
25896 ret
25897 ENDPROC(efi_call3)
25898
25899 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
25900 call *%rdi
25901 addq $32, %rsp
25902 RESTORE_XMM
25903 + pax_force_retaddr 0, 1
25904 ret
25905 ENDPROC(efi_call4)
25906
25907 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
25908 call *%rdi
25909 addq $48, %rsp
25910 RESTORE_XMM
25911 + pax_force_retaddr 0, 1
25912 ret
25913 ENDPROC(efi_call5)
25914
25915 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
25916 call *%rdi
25917 addq $48, %rsp
25918 RESTORE_XMM
25919 + pax_force_retaddr 0, 1
25920 ret
25921 ENDPROC(efi_call6)
25922 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
25923 index ad4ec1c..686479e 100644
25924 --- a/arch/x86/platform/mrst/mrst.c
25925 +++ b/arch/x86/platform/mrst/mrst.c
25926 @@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
25927 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
25928 int sfi_mrtc_num;
25929
25930 -static void mrst_power_off(void)
25931 +static __noreturn void mrst_power_off(void)
25932 {
25933 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
25934 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
25935 + BUG();
25936 }
25937
25938 -static void mrst_reboot(void)
25939 +static __noreturn void mrst_reboot(void)
25940 {
25941 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
25942 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
25943 else
25944 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
25945 + BUG();
25946 }
25947
25948 /* parse all the mtimer info to a static mtimer array */
25949 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
25950 index f10c0af..3ec1f95 100644
25951 --- a/arch/x86/power/cpu.c
25952 +++ b/arch/x86/power/cpu.c
25953 @@ -131,7 +131,7 @@ static void do_fpu_end(void)
25954 static void fix_processor_context(void)
25955 {
25956 int cpu = smp_processor_id();
25957 - struct tss_struct *t = &per_cpu(init_tss, cpu);
25958 + struct tss_struct *t = init_tss + cpu;
25959
25960 set_tss_desc(cpu, t); /*
25961 * This just modifies memory; should not be
25962 @@ -141,7 +141,9 @@ static void fix_processor_context(void)
25963 */
25964
25965 #ifdef CONFIG_X86_64
25966 + pax_open_kernel();
25967 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
25968 + pax_close_kernel();
25969
25970 syscall_init(); /* This sets MSR_*STAR and related */
25971 #endif
25972 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
25973 index 5d17950..2253fc9 100644
25974 --- a/arch/x86/vdso/Makefile
25975 +++ b/arch/x86/vdso/Makefile
25976 @@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
25977 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
25978 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
25979
25980 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
25981 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
25982 GCOV_PROFILE := n
25983
25984 #
25985 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
25986 index 468d591..8e80a0a 100644
25987 --- a/arch/x86/vdso/vdso32-setup.c
25988 +++ b/arch/x86/vdso/vdso32-setup.c
25989 @@ -25,6 +25,7 @@
25990 #include <asm/tlbflush.h>
25991 #include <asm/vdso.h>
25992 #include <asm/proto.h>
25993 +#include <asm/mman.h>
25994
25995 enum {
25996 VDSO_DISABLED = 0,
25997 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
25998 void enable_sep_cpu(void)
25999 {
26000 int cpu = get_cpu();
26001 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
26002 + struct tss_struct *tss = init_tss + cpu;
26003
26004 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26005 put_cpu();
26006 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26007 gate_vma.vm_start = FIXADDR_USER_START;
26008 gate_vma.vm_end = FIXADDR_USER_END;
26009 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26010 - gate_vma.vm_page_prot = __P101;
26011 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26012 /*
26013 * Make sure the vDSO gets into every core dump.
26014 * Dumping its contents makes post-mortem fully interpretable later
26015 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26016 if (compat)
26017 addr = VDSO_HIGH_BASE;
26018 else {
26019 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26020 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26021 if (IS_ERR_VALUE(addr)) {
26022 ret = addr;
26023 goto up_fail;
26024 }
26025 }
26026
26027 - current->mm->context.vdso = (void *)addr;
26028 + current->mm->context.vdso = addr;
26029
26030 if (compat_uses_vma || !compat) {
26031 /*
26032 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26033 }
26034
26035 current_thread_info()->sysenter_return =
26036 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26037 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26038
26039 up_fail:
26040 if (ret)
26041 - current->mm->context.vdso = NULL;
26042 + current->mm->context.vdso = 0;
26043
26044 up_write(&mm->mmap_sem);
26045
26046 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
26047
26048 const char *arch_vma_name(struct vm_area_struct *vma)
26049 {
26050 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26051 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26052 return "[vdso]";
26053 +
26054 +#ifdef CONFIG_PAX_SEGMEXEC
26055 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
26056 + return "[vdso]";
26057 +#endif
26058 +
26059 return NULL;
26060 }
26061
26062 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
26063 * Check to see if the corresponding task was created in compat vdso
26064 * mode.
26065 */
26066 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
26067 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
26068 return &gate_vma;
26069 return NULL;
26070 }
26071 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
26072 index 153407c..611cba9 100644
26073 --- a/arch/x86/vdso/vma.c
26074 +++ b/arch/x86/vdso/vma.c
26075 @@ -16,8 +16,6 @@
26076 #include <asm/vdso.h>
26077 #include <asm/page.h>
26078
26079 -unsigned int __read_mostly vdso_enabled = 1;
26080 -
26081 extern char vdso_start[], vdso_end[];
26082 extern unsigned short vdso_sync_cpuid;
26083
26084 @@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26085 * unaligned here as a result of stack start randomization.
26086 */
26087 addr = PAGE_ALIGN(addr);
26088 - addr = align_addr(addr, NULL, ALIGN_VDSO);
26089
26090 return addr;
26091 }
26092 @@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26093 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26094 {
26095 struct mm_struct *mm = current->mm;
26096 - unsigned long addr;
26097 + unsigned long addr = 0;
26098 int ret;
26099
26100 - if (!vdso_enabled)
26101 - return 0;
26102 -
26103 down_write(&mm->mmap_sem);
26104 +
26105 +#ifdef CONFIG_PAX_RANDMMAP
26106 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26107 +#endif
26108 +
26109 addr = vdso_addr(mm->start_stack, vdso_size);
26110 + addr = align_addr(addr, NULL, ALIGN_VDSO);
26111 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
26112 if (IS_ERR_VALUE(addr)) {
26113 ret = addr;
26114 goto up_fail;
26115 }
26116
26117 - current->mm->context.vdso = (void *)addr;
26118 + mm->context.vdso = addr;
26119
26120 ret = install_special_mapping(mm, addr, vdso_size,
26121 VM_READ|VM_EXEC|
26122 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
26123 VM_ALWAYSDUMP,
26124 vdso_pages);
26125 - if (ret) {
26126 - current->mm->context.vdso = NULL;
26127 - goto up_fail;
26128 - }
26129 +
26130 + if (ret)
26131 + mm->context.vdso = 0;
26132
26133 up_fail:
26134 up_write(&mm->mmap_sem);
26135 return ret;
26136 }
26137 -
26138 -static __init int vdso_setup(char *s)
26139 -{
26140 - vdso_enabled = simple_strtoul(s, NULL, 0);
26141 - return 0;
26142 -}
26143 -__setup("vdso=", vdso_setup);
26144 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
26145 index 1f92865..c843b20 100644
26146 --- a/arch/x86/xen/enlighten.c
26147 +++ b/arch/x86/xen/enlighten.c
26148 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
26149
26150 struct shared_info xen_dummy_shared_info;
26151
26152 -void *xen_initial_gdt;
26153 -
26154 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
26155 __read_mostly int xen_have_vector_callback;
26156 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
26157 @@ -1029,7 +1027,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
26158 #endif
26159 };
26160
26161 -static void xen_reboot(int reason)
26162 +static __noreturn void xen_reboot(int reason)
26163 {
26164 struct sched_shutdown r = { .reason = reason };
26165
26166 @@ -1037,17 +1035,17 @@ static void xen_reboot(int reason)
26167 BUG();
26168 }
26169
26170 -static void xen_restart(char *msg)
26171 +static __noreturn void xen_restart(char *msg)
26172 {
26173 xen_reboot(SHUTDOWN_reboot);
26174 }
26175
26176 -static void xen_emergency_restart(void)
26177 +static __noreturn void xen_emergency_restart(void)
26178 {
26179 xen_reboot(SHUTDOWN_reboot);
26180 }
26181
26182 -static void xen_machine_halt(void)
26183 +static __noreturn void xen_machine_halt(void)
26184 {
26185 xen_reboot(SHUTDOWN_poweroff);
26186 }
26187 @@ -1153,7 +1151,17 @@ asmlinkage void __init xen_start_kernel(void)
26188 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
26189
26190 /* Work out if we support NX */
26191 - x86_configure_nx();
26192 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26193 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
26194 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
26195 + unsigned l, h;
26196 +
26197 + __supported_pte_mask |= _PAGE_NX;
26198 + rdmsr(MSR_EFER, l, h);
26199 + l |= EFER_NX;
26200 + wrmsr(MSR_EFER, l, h);
26201 + }
26202 +#endif
26203
26204 xen_setup_features();
26205
26206 @@ -1184,13 +1192,6 @@ asmlinkage void __init xen_start_kernel(void)
26207
26208 machine_ops = xen_machine_ops;
26209
26210 - /*
26211 - * The only reliable way to retain the initial address of the
26212 - * percpu gdt_page is to remember it here, so we can go and
26213 - * mark it RW later, when the initial percpu area is freed.
26214 - */
26215 - xen_initial_gdt = &per_cpu(gdt_page, 0);
26216 -
26217 xen_smp_init();
26218
26219 #ifdef CONFIG_ACPI_NUMA
26220 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
26221 index 87f6673..e2555a6 100644
26222 --- a/arch/x86/xen/mmu.c
26223 +++ b/arch/x86/xen/mmu.c
26224 @@ -1733,6 +1733,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
26225 convert_pfn_mfn(init_level4_pgt);
26226 convert_pfn_mfn(level3_ident_pgt);
26227 convert_pfn_mfn(level3_kernel_pgt);
26228 + convert_pfn_mfn(level3_vmalloc_start_pgt);
26229 + convert_pfn_mfn(level3_vmalloc_end_pgt);
26230 + convert_pfn_mfn(level3_vmemmap_pgt);
26231
26232 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
26233 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
26234 @@ -1751,7 +1754,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
26235 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
26236 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
26237 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
26238 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
26239 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
26240 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
26241 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
26242 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
26243 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
26244 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
26245
26246 @@ -1962,6 +1969,7 @@ static void __init xen_post_allocator_init(void)
26247 pv_mmu_ops.set_pud = xen_set_pud;
26248 #if PAGETABLE_LEVELS == 4
26249 pv_mmu_ops.set_pgd = xen_set_pgd;
26250 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
26251 #endif
26252
26253 /* This will work as long as patching hasn't happened yet
26254 @@ -2043,6 +2051,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
26255 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
26256 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
26257 .set_pgd = xen_set_pgd_hyper,
26258 + .set_pgd_batched = xen_set_pgd_hyper,
26259
26260 .alloc_pud = xen_alloc_pmd_init,
26261 .release_pud = xen_release_pmd_init,
26262 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
26263 index 041d4fe..7666b7e 100644
26264 --- a/arch/x86/xen/smp.c
26265 +++ b/arch/x86/xen/smp.c
26266 @@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
26267 {
26268 BUG_ON(smp_processor_id() != 0);
26269 native_smp_prepare_boot_cpu();
26270 -
26271 - /* We've switched to the "real" per-cpu gdt, so make sure the
26272 - old memory can be recycled */
26273 - make_lowmem_page_readwrite(xen_initial_gdt);
26274 -
26275 xen_filter_cpu_maps();
26276 xen_setup_vcpu_info_placement();
26277 }
26278 @@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
26279 gdt = get_cpu_gdt_table(cpu);
26280
26281 ctxt->flags = VGCF_IN_KERNEL;
26282 - ctxt->user_regs.ds = __USER_DS;
26283 - ctxt->user_regs.es = __USER_DS;
26284 + ctxt->user_regs.ds = __KERNEL_DS;
26285 + ctxt->user_regs.es = __KERNEL_DS;
26286 ctxt->user_regs.ss = __KERNEL_DS;
26287 #ifdef CONFIG_X86_32
26288 ctxt->user_regs.fs = __KERNEL_PERCPU;
26289 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
26290 + savesegment(gs, ctxt->user_regs.gs);
26291 #else
26292 ctxt->gs_base_kernel = per_cpu_offset(cpu);
26293 #endif
26294 @@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
26295 int rc;
26296
26297 per_cpu(current_task, cpu) = idle;
26298 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
26299 #ifdef CONFIG_X86_32
26300 irq_ctx_init(cpu);
26301 #else
26302 clear_tsk_thread_flag(idle, TIF_FORK);
26303 - per_cpu(kernel_stack, cpu) =
26304 - (unsigned long)task_stack_page(idle) -
26305 - KERNEL_STACK_OFFSET + THREAD_SIZE;
26306 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
26307 #endif
26308 xen_setup_runstate_info(cpu);
26309 xen_setup_timer(cpu);
26310 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
26311 index b040b0e..8cc4fe0 100644
26312 --- a/arch/x86/xen/xen-asm_32.S
26313 +++ b/arch/x86/xen/xen-asm_32.S
26314 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
26315 ESP_OFFSET=4 # bytes pushed onto stack
26316
26317 /*
26318 - * Store vcpu_info pointer for easy access. Do it this way to
26319 - * avoid having to reload %fs
26320 + * Store vcpu_info pointer for easy access.
26321 */
26322 #ifdef CONFIG_SMP
26323 - GET_THREAD_INFO(%eax)
26324 - movl TI_cpu(%eax), %eax
26325 - movl __per_cpu_offset(,%eax,4), %eax
26326 - mov xen_vcpu(%eax), %eax
26327 + push %fs
26328 + mov $(__KERNEL_PERCPU), %eax
26329 + mov %eax, %fs
26330 + mov PER_CPU_VAR(xen_vcpu), %eax
26331 + pop %fs
26332 #else
26333 movl xen_vcpu, %eax
26334 #endif
26335 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
26336 index aaa7291..3f77960 100644
26337 --- a/arch/x86/xen/xen-head.S
26338 +++ b/arch/x86/xen/xen-head.S
26339 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
26340 #ifdef CONFIG_X86_32
26341 mov %esi,xen_start_info
26342 mov $init_thread_union+THREAD_SIZE,%esp
26343 +#ifdef CONFIG_SMP
26344 + movl $cpu_gdt_table,%edi
26345 + movl $__per_cpu_load,%eax
26346 + movw %ax,__KERNEL_PERCPU + 2(%edi)
26347 + rorl $16,%eax
26348 + movb %al,__KERNEL_PERCPU + 4(%edi)
26349 + movb %ah,__KERNEL_PERCPU + 7(%edi)
26350 + movl $__per_cpu_end - 1,%eax
26351 + subl $__per_cpu_start,%eax
26352 + movw %ax,__KERNEL_PERCPU + 0(%edi)
26353 +#endif
26354 #else
26355 mov %rsi,xen_start_info
26356 mov $init_thread_union+THREAD_SIZE,%rsp
26357 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
26358 index b095739..8c17bcd 100644
26359 --- a/arch/x86/xen/xen-ops.h
26360 +++ b/arch/x86/xen/xen-ops.h
26361 @@ -10,8 +10,6 @@
26362 extern const char xen_hypervisor_callback[];
26363 extern const char xen_failsafe_callback[];
26364
26365 -extern void *xen_initial_gdt;
26366 -
26367 struct trap_info;
26368 void xen_copy_trap_info(struct trap_info *traps);
26369
26370 diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
26371 index 525bd3d..ef888b1 100644
26372 --- a/arch/xtensa/variants/dc232b/include/variant/core.h
26373 +++ b/arch/xtensa/variants/dc232b/include/variant/core.h
26374 @@ -119,9 +119,9 @@
26375 ----------------------------------------------------------------------*/
26376
26377 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
26378 -#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
26379 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
26380 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
26381 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
26382
26383 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
26384 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
26385 diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
26386 index 2f33760..835e50a 100644
26387 --- a/arch/xtensa/variants/fsf/include/variant/core.h
26388 +++ b/arch/xtensa/variants/fsf/include/variant/core.h
26389 @@ -11,6 +11,7 @@
26390 #ifndef _XTENSA_CORE_H
26391 #define _XTENSA_CORE_H
26392
26393 +#include <linux/const.h>
26394
26395 /****************************************************************************
26396 Parameters Useful for Any Code, USER or PRIVILEGED
26397 @@ -112,9 +113,9 @@
26398 ----------------------------------------------------------------------*/
26399
26400 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
26401 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
26402 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
26403 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
26404 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
26405
26406 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
26407 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
26408 diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
26409 index af00795..2bb8105 100644
26410 --- a/arch/xtensa/variants/s6000/include/variant/core.h
26411 +++ b/arch/xtensa/variants/s6000/include/variant/core.h
26412 @@ -11,6 +11,7 @@
26413 #ifndef _XTENSA_CORE_CONFIGURATION_H
26414 #define _XTENSA_CORE_CONFIGURATION_H
26415
26416 +#include <linux/const.h>
26417
26418 /****************************************************************************
26419 Parameters Useful for Any Code, USER or PRIVILEGED
26420 @@ -118,9 +119,9 @@
26421 ----------------------------------------------------------------------*/
26422
26423 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
26424 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
26425 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
26426 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
26427 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
26428
26429 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
26430 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
26431 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
26432 index 58916af..9cb880b 100644
26433 --- a/block/blk-iopoll.c
26434 +++ b/block/blk-iopoll.c
26435 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
26436 }
26437 EXPORT_SYMBOL(blk_iopoll_complete);
26438
26439 -static void blk_iopoll_softirq(struct softirq_action *h)
26440 +static void blk_iopoll_softirq(void)
26441 {
26442 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
26443 int rearm = 0, budget = blk_iopoll_budget;
26444 diff --git a/block/blk-map.c b/block/blk-map.c
26445 index 623e1cd..ca1e109 100644
26446 --- a/block/blk-map.c
26447 +++ b/block/blk-map.c
26448 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
26449 if (!len || !kbuf)
26450 return -EINVAL;
26451
26452 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
26453 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
26454 if (do_copy)
26455 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
26456 else
26457 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
26458 index 1366a89..e17f54b 100644
26459 --- a/block/blk-softirq.c
26460 +++ b/block/blk-softirq.c
26461 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
26462 * Softirq action handler - move entries to local list and loop over them
26463 * while passing them to the queue registered handler.
26464 */
26465 -static void blk_done_softirq(struct softirq_action *h)
26466 +static void blk_done_softirq(void)
26467 {
26468 struct list_head *cpu_list, local_list;
26469
26470 diff --git a/block/bsg.c b/block/bsg.c
26471 index c0ab25c..9d49f8f 100644
26472 --- a/block/bsg.c
26473 +++ b/block/bsg.c
26474 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
26475 struct sg_io_v4 *hdr, struct bsg_device *bd,
26476 fmode_t has_write_perm)
26477 {
26478 + unsigned char tmpcmd[sizeof(rq->__cmd)];
26479 + unsigned char *cmdptr;
26480 +
26481 if (hdr->request_len > BLK_MAX_CDB) {
26482 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
26483 if (!rq->cmd)
26484 return -ENOMEM;
26485 - }
26486 + cmdptr = rq->cmd;
26487 + } else
26488 + cmdptr = tmpcmd;
26489
26490 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
26491 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
26492 hdr->request_len))
26493 return -EFAULT;
26494
26495 + if (cmdptr != rq->cmd)
26496 + memcpy(rq->cmd, cmdptr, hdr->request_len);
26497 +
26498 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
26499 if (blk_verify_command(rq->cmd, has_write_perm))
26500 return -EPERM;
26501 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
26502 index 7b72502..646105c 100644
26503 --- a/block/compat_ioctl.c
26504 +++ b/block/compat_ioctl.c
26505 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
26506 err |= __get_user(f->spec1, &uf->spec1);
26507 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
26508 err |= __get_user(name, &uf->name);
26509 - f->name = compat_ptr(name);
26510 + f->name = (void __force_kernel *)compat_ptr(name);
26511 if (err) {
26512 err = -EFAULT;
26513 goto out;
26514 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
26515 index 688be8a..8a37d98 100644
26516 --- a/block/scsi_ioctl.c
26517 +++ b/block/scsi_ioctl.c
26518 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
26519 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
26520 struct sg_io_hdr *hdr, fmode_t mode)
26521 {
26522 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
26523 + unsigned char tmpcmd[sizeof(rq->__cmd)];
26524 + unsigned char *cmdptr;
26525 +
26526 + if (rq->cmd != rq->__cmd)
26527 + cmdptr = rq->cmd;
26528 + else
26529 + cmdptr = tmpcmd;
26530 +
26531 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
26532 return -EFAULT;
26533 +
26534 + if (cmdptr != rq->cmd)
26535 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
26536 +
26537 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
26538 return -EPERM;
26539
26540 @@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
26541 int err;
26542 unsigned int in_len, out_len, bytes, opcode, cmdlen;
26543 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
26544 + unsigned char tmpcmd[sizeof(rq->__cmd)];
26545 + unsigned char *cmdptr;
26546
26547 if (!sic)
26548 return -EINVAL;
26549 @@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
26550 */
26551 err = -EFAULT;
26552 rq->cmd_len = cmdlen;
26553 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
26554 +
26555 + if (rq->cmd != rq->__cmd)
26556 + cmdptr = rq->cmd;
26557 + else
26558 + cmdptr = tmpcmd;
26559 +
26560 + if (copy_from_user(cmdptr, sic->data, cmdlen))
26561 goto error;
26562
26563 + if (rq->cmd != cmdptr)
26564 + memcpy(rq->cmd, cmdptr, cmdlen);
26565 +
26566 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
26567 goto error;
26568
26569 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
26570 index 671d4d6..5f24030 100644
26571 --- a/crypto/cryptd.c
26572 +++ b/crypto/cryptd.c
26573 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
26574
26575 struct cryptd_blkcipher_request_ctx {
26576 crypto_completion_t complete;
26577 -};
26578 +} __no_const;
26579
26580 struct cryptd_hash_ctx {
26581 struct crypto_shash *child;
26582 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
26583
26584 struct cryptd_aead_request_ctx {
26585 crypto_completion_t complete;
26586 -};
26587 +} __no_const;
26588
26589 static void cryptd_queue_worker(struct work_struct *work);
26590
26591 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
26592 index 5d41894..22021e4 100644
26593 --- a/drivers/acpi/apei/cper.c
26594 +++ b/drivers/acpi/apei/cper.c
26595 @@ -38,12 +38,12 @@
26596 */
26597 u64 cper_next_record_id(void)
26598 {
26599 - static atomic64_t seq;
26600 + static atomic64_unchecked_t seq;
26601
26602 - if (!atomic64_read(&seq))
26603 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
26604 + if (!atomic64_read_unchecked(&seq))
26605 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
26606
26607 - return atomic64_inc_return(&seq);
26608 + return atomic64_inc_return_unchecked(&seq);
26609 }
26610 EXPORT_SYMBOL_GPL(cper_next_record_id);
26611
26612 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
26613 index 6c47ae9..abfdd63 100644
26614 --- a/drivers/acpi/ec_sys.c
26615 +++ b/drivers/acpi/ec_sys.c
26616 @@ -12,6 +12,7 @@
26617 #include <linux/acpi.h>
26618 #include <linux/debugfs.h>
26619 #include <linux/module.h>
26620 +#include <linux/uaccess.h>
26621 #include "internal.h"
26622
26623 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
26624 @@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
26625 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
26626 */
26627 unsigned int size = EC_SPACE_SIZE;
26628 - u8 *data = (u8 *) buf;
26629 + u8 data;
26630 loff_t init_off = *off;
26631 int err = 0;
26632
26633 @@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
26634 size = count;
26635
26636 while (size) {
26637 - err = ec_read(*off, &data[*off - init_off]);
26638 + err = ec_read(*off, &data);
26639 if (err)
26640 return err;
26641 + if (put_user(data, &buf[*off - init_off]))
26642 + return -EFAULT;
26643 *off += 1;
26644 size--;
26645 }
26646 @@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
26647
26648 unsigned int size = count;
26649 loff_t init_off = *off;
26650 - u8 *data = (u8 *) buf;
26651 int err = 0;
26652
26653 if (*off >= EC_SPACE_SIZE)
26654 @@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
26655 }
26656
26657 while (size) {
26658 - u8 byte_write = data[*off - init_off];
26659 + u8 byte_write;
26660 + if (get_user(byte_write, &buf[*off - init_off]))
26661 + return -EFAULT;
26662 err = ec_write(*off, byte_write);
26663 if (err)
26664 return err;
26665 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
26666 index 251c7b62..000462d 100644
26667 --- a/drivers/acpi/proc.c
26668 +++ b/drivers/acpi/proc.c
26669 @@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
26670 size_t count, loff_t * ppos)
26671 {
26672 struct list_head *node, *next;
26673 - char strbuf[5];
26674 - char str[5] = "";
26675 - unsigned int len = count;
26676 + char strbuf[5] = {0};
26677
26678 - if (len > 4)
26679 - len = 4;
26680 - if (len < 0)
26681 + if (count > 4)
26682 + count = 4;
26683 + if (copy_from_user(strbuf, buffer, count))
26684 return -EFAULT;
26685 -
26686 - if (copy_from_user(strbuf, buffer, len))
26687 - return -EFAULT;
26688 - strbuf[len] = '\0';
26689 - sscanf(strbuf, "%s", str);
26690 + strbuf[count] = '\0';
26691
26692 mutex_lock(&acpi_device_lock);
26693 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
26694 @@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
26695 if (!dev->wakeup.flags.valid)
26696 continue;
26697
26698 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
26699 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
26700 if (device_can_wakeup(&dev->dev)) {
26701 bool enable = !device_may_wakeup(&dev->dev);
26702 device_set_wakeup_enable(&dev->dev, enable);
26703 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
26704 index 9d7bc9f..a6fc091 100644
26705 --- a/drivers/acpi/processor_driver.c
26706 +++ b/drivers/acpi/processor_driver.c
26707 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
26708 return 0;
26709 #endif
26710
26711 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
26712 + BUG_ON(pr->id >= nr_cpu_ids);
26713
26714 /*
26715 * Buggy BIOS check
26716 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
26717 index c04ad68..0b99473 100644
26718 --- a/drivers/ata/libata-core.c
26719 +++ b/drivers/ata/libata-core.c
26720 @@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
26721 struct ata_port *ap;
26722 unsigned int tag;
26723
26724 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
26725 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
26726 ap = qc->ap;
26727
26728 qc->flags = 0;
26729 @@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
26730 struct ata_port *ap;
26731 struct ata_link *link;
26732
26733 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
26734 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
26735 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
26736 ap = qc->ap;
26737 link = qc->dev->link;
26738 @@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
26739 return;
26740
26741 spin_lock(&lock);
26742 + pax_open_kernel();
26743
26744 for (cur = ops->inherits; cur; cur = cur->inherits) {
26745 void **inherit = (void **)cur;
26746 @@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
26747 if (IS_ERR(*pp))
26748 *pp = NULL;
26749
26750 - ops->inherits = NULL;
26751 + *(struct ata_port_operations **)&ops->inherits = NULL;
26752
26753 + pax_close_kernel();
26754 spin_unlock(&lock);
26755 }
26756
26757 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
26758 index e8574bb..f9f6a72 100644
26759 --- a/drivers/ata/pata_arasan_cf.c
26760 +++ b/drivers/ata/pata_arasan_cf.c
26761 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
26762 /* Handle platform specific quirks */
26763 if (pdata->quirk) {
26764 if (pdata->quirk & CF_BROKEN_PIO) {
26765 - ap->ops->set_piomode = NULL;
26766 + pax_open_kernel();
26767 + *(void **)&ap->ops->set_piomode = NULL;
26768 + pax_close_kernel();
26769 ap->pio_mask = 0;
26770 }
26771 if (pdata->quirk & CF_BROKEN_MWDMA)
26772 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
26773 index f9b983a..887b9d8 100644
26774 --- a/drivers/atm/adummy.c
26775 +++ b/drivers/atm/adummy.c
26776 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
26777 vcc->pop(vcc, skb);
26778 else
26779 dev_kfree_skb_any(skb);
26780 - atomic_inc(&vcc->stats->tx);
26781 + atomic_inc_unchecked(&vcc->stats->tx);
26782
26783 return 0;
26784 }
26785 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
26786 index f8f41e0..1f987dd 100644
26787 --- a/drivers/atm/ambassador.c
26788 +++ b/drivers/atm/ambassador.c
26789 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
26790 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
26791
26792 // VC layer stats
26793 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26794 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26795
26796 // free the descriptor
26797 kfree (tx_descr);
26798 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
26799 dump_skb ("<<<", vc, skb);
26800
26801 // VC layer stats
26802 - atomic_inc(&atm_vcc->stats->rx);
26803 + atomic_inc_unchecked(&atm_vcc->stats->rx);
26804 __net_timestamp(skb);
26805 // end of our responsibility
26806 atm_vcc->push (atm_vcc, skb);
26807 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
26808 } else {
26809 PRINTK (KERN_INFO, "dropped over-size frame");
26810 // should we count this?
26811 - atomic_inc(&atm_vcc->stats->rx_drop);
26812 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26813 }
26814
26815 } else {
26816 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
26817 }
26818
26819 if (check_area (skb->data, skb->len)) {
26820 - atomic_inc(&atm_vcc->stats->tx_err);
26821 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
26822 return -ENOMEM; // ?
26823 }
26824
26825 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
26826 index b22d71c..d6e1049 100644
26827 --- a/drivers/atm/atmtcp.c
26828 +++ b/drivers/atm/atmtcp.c
26829 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
26830 if (vcc->pop) vcc->pop(vcc,skb);
26831 else dev_kfree_skb(skb);
26832 if (dev_data) return 0;
26833 - atomic_inc(&vcc->stats->tx_err);
26834 + atomic_inc_unchecked(&vcc->stats->tx_err);
26835 return -ENOLINK;
26836 }
26837 size = skb->len+sizeof(struct atmtcp_hdr);
26838 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
26839 if (!new_skb) {
26840 if (vcc->pop) vcc->pop(vcc,skb);
26841 else dev_kfree_skb(skb);
26842 - atomic_inc(&vcc->stats->tx_err);
26843 + atomic_inc_unchecked(&vcc->stats->tx_err);
26844 return -ENOBUFS;
26845 }
26846 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
26847 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
26848 if (vcc->pop) vcc->pop(vcc,skb);
26849 else dev_kfree_skb(skb);
26850 out_vcc->push(out_vcc,new_skb);
26851 - atomic_inc(&vcc->stats->tx);
26852 - atomic_inc(&out_vcc->stats->rx);
26853 + atomic_inc_unchecked(&vcc->stats->tx);
26854 + atomic_inc_unchecked(&out_vcc->stats->rx);
26855 return 0;
26856 }
26857
26858 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
26859 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
26860 read_unlock(&vcc_sklist_lock);
26861 if (!out_vcc) {
26862 - atomic_inc(&vcc->stats->tx_err);
26863 + atomic_inc_unchecked(&vcc->stats->tx_err);
26864 goto done;
26865 }
26866 skb_pull(skb,sizeof(struct atmtcp_hdr));
26867 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
26868 __net_timestamp(new_skb);
26869 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
26870 out_vcc->push(out_vcc,new_skb);
26871 - atomic_inc(&vcc->stats->tx);
26872 - atomic_inc(&out_vcc->stats->rx);
26873 + atomic_inc_unchecked(&vcc->stats->tx);
26874 + atomic_inc_unchecked(&out_vcc->stats->rx);
26875 done:
26876 if (vcc->pop) vcc->pop(vcc,skb);
26877 else dev_kfree_skb(skb);
26878 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
26879 index 956e9ac..133516d 100644
26880 --- a/drivers/atm/eni.c
26881 +++ b/drivers/atm/eni.c
26882 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
26883 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
26884 vcc->dev->number);
26885 length = 0;
26886 - atomic_inc(&vcc->stats->rx_err);
26887 + atomic_inc_unchecked(&vcc->stats->rx_err);
26888 }
26889 else {
26890 length = ATM_CELL_SIZE-1; /* no HEC */
26891 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26892 size);
26893 }
26894 eff = length = 0;
26895 - atomic_inc(&vcc->stats->rx_err);
26896 + atomic_inc_unchecked(&vcc->stats->rx_err);
26897 }
26898 else {
26899 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
26900 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26901 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
26902 vcc->dev->number,vcc->vci,length,size << 2,descr);
26903 length = eff = 0;
26904 - atomic_inc(&vcc->stats->rx_err);
26905 + atomic_inc_unchecked(&vcc->stats->rx_err);
26906 }
26907 }
26908 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
26909 @@ -771,7 +771,7 @@ rx_dequeued++;
26910 vcc->push(vcc,skb);
26911 pushed++;
26912 }
26913 - atomic_inc(&vcc->stats->rx);
26914 + atomic_inc_unchecked(&vcc->stats->rx);
26915 }
26916 wake_up(&eni_dev->rx_wait);
26917 }
26918 @@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
26919 PCI_DMA_TODEVICE);
26920 if (vcc->pop) vcc->pop(vcc,skb);
26921 else dev_kfree_skb_irq(skb);
26922 - atomic_inc(&vcc->stats->tx);
26923 + atomic_inc_unchecked(&vcc->stats->tx);
26924 wake_up(&eni_dev->tx_wait);
26925 dma_complete++;
26926 }
26927 @@ -1569,7 +1569,7 @@ tx_complete++;
26928 /*--------------------------------- entries ---------------------------------*/
26929
26930
26931 -static const char *media_name[] __devinitdata = {
26932 +static const char *media_name[] __devinitconst = {
26933 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
26934 "UTP", "05?", "06?", "07?", /* 4- 7 */
26935 "TAXI","09?", "10?", "11?", /* 8-11 */
26936 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
26937 index 5072f8a..fa52520d 100644
26938 --- a/drivers/atm/firestream.c
26939 +++ b/drivers/atm/firestream.c
26940 @@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
26941 }
26942 }
26943
26944 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26945 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26946
26947 fs_dprintk (FS_DEBUG_TXMEM, "i");
26948 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
26949 @@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
26950 #endif
26951 skb_put (skb, qe->p1 & 0xffff);
26952 ATM_SKB(skb)->vcc = atm_vcc;
26953 - atomic_inc(&atm_vcc->stats->rx);
26954 + atomic_inc_unchecked(&atm_vcc->stats->rx);
26955 __net_timestamp(skb);
26956 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
26957 atm_vcc->push (atm_vcc, skb);
26958 @@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
26959 kfree (pe);
26960 }
26961 if (atm_vcc)
26962 - atomic_inc(&atm_vcc->stats->rx_drop);
26963 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26964 break;
26965 case 0x1f: /* Reassembly abort: no buffers. */
26966 /* Silently increment error counter. */
26967 if (atm_vcc)
26968 - atomic_inc(&atm_vcc->stats->rx_drop);
26969 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26970 break;
26971 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
26972 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
26973 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
26974 index 361f5ae..7fc552d 100644
26975 --- a/drivers/atm/fore200e.c
26976 +++ b/drivers/atm/fore200e.c
26977 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
26978 #endif
26979 /* check error condition */
26980 if (*entry->status & STATUS_ERROR)
26981 - atomic_inc(&vcc->stats->tx_err);
26982 + atomic_inc_unchecked(&vcc->stats->tx_err);
26983 else
26984 - atomic_inc(&vcc->stats->tx);
26985 + atomic_inc_unchecked(&vcc->stats->tx);
26986 }
26987 }
26988
26989 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
26990 if (skb == NULL) {
26991 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
26992
26993 - atomic_inc(&vcc->stats->rx_drop);
26994 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26995 return -ENOMEM;
26996 }
26997
26998 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
26999
27000 dev_kfree_skb_any(skb);
27001
27002 - atomic_inc(&vcc->stats->rx_drop);
27003 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27004 return -ENOMEM;
27005 }
27006
27007 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27008
27009 vcc->push(vcc, skb);
27010 - atomic_inc(&vcc->stats->rx);
27011 + atomic_inc_unchecked(&vcc->stats->rx);
27012
27013 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27014
27015 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
27016 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
27017 fore200e->atm_dev->number,
27018 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
27019 - atomic_inc(&vcc->stats->rx_err);
27020 + atomic_inc_unchecked(&vcc->stats->rx_err);
27021 }
27022 }
27023
27024 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
27025 goto retry_here;
27026 }
27027
27028 - atomic_inc(&vcc->stats->tx_err);
27029 + atomic_inc_unchecked(&vcc->stats->tx_err);
27030
27031 fore200e->tx_sat++;
27032 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
27033 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
27034 index 9a51df4..f3bb5f8 100644
27035 --- a/drivers/atm/he.c
27036 +++ b/drivers/atm/he.c
27037 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27038
27039 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
27040 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
27041 - atomic_inc(&vcc->stats->rx_drop);
27042 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27043 goto return_host_buffers;
27044 }
27045
27046 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27047 RBRQ_LEN_ERR(he_dev->rbrq_head)
27048 ? "LEN_ERR" : "",
27049 vcc->vpi, vcc->vci);
27050 - atomic_inc(&vcc->stats->rx_err);
27051 + atomic_inc_unchecked(&vcc->stats->rx_err);
27052 goto return_host_buffers;
27053 }
27054
27055 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27056 vcc->push(vcc, skb);
27057 spin_lock(&he_dev->global_lock);
27058
27059 - atomic_inc(&vcc->stats->rx);
27060 + atomic_inc_unchecked(&vcc->stats->rx);
27061
27062 return_host_buffers:
27063 ++pdus_assembled;
27064 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
27065 tpd->vcc->pop(tpd->vcc, tpd->skb);
27066 else
27067 dev_kfree_skb_any(tpd->skb);
27068 - atomic_inc(&tpd->vcc->stats->tx_err);
27069 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
27070 }
27071 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
27072 return;
27073 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27074 vcc->pop(vcc, skb);
27075 else
27076 dev_kfree_skb_any(skb);
27077 - atomic_inc(&vcc->stats->tx_err);
27078 + atomic_inc_unchecked(&vcc->stats->tx_err);
27079 return -EINVAL;
27080 }
27081
27082 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27083 vcc->pop(vcc, skb);
27084 else
27085 dev_kfree_skb_any(skb);
27086 - atomic_inc(&vcc->stats->tx_err);
27087 + atomic_inc_unchecked(&vcc->stats->tx_err);
27088 return -EINVAL;
27089 }
27090 #endif
27091 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27092 vcc->pop(vcc, skb);
27093 else
27094 dev_kfree_skb_any(skb);
27095 - atomic_inc(&vcc->stats->tx_err);
27096 + atomic_inc_unchecked(&vcc->stats->tx_err);
27097 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27098 return -ENOMEM;
27099 }
27100 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27101 vcc->pop(vcc, skb);
27102 else
27103 dev_kfree_skb_any(skb);
27104 - atomic_inc(&vcc->stats->tx_err);
27105 + atomic_inc_unchecked(&vcc->stats->tx_err);
27106 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27107 return -ENOMEM;
27108 }
27109 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27110 __enqueue_tpd(he_dev, tpd, cid);
27111 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27112
27113 - atomic_inc(&vcc->stats->tx);
27114 + atomic_inc_unchecked(&vcc->stats->tx);
27115
27116 return 0;
27117 }
27118 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
27119 index b812103..e391a49 100644
27120 --- a/drivers/atm/horizon.c
27121 +++ b/drivers/atm/horizon.c
27122 @@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
27123 {
27124 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
27125 // VC layer stats
27126 - atomic_inc(&vcc->stats->rx);
27127 + atomic_inc_unchecked(&vcc->stats->rx);
27128 __net_timestamp(skb);
27129 // end of our responsibility
27130 vcc->push (vcc, skb);
27131 @@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
27132 dev->tx_iovec = NULL;
27133
27134 // VC layer stats
27135 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27136 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27137
27138 // free the skb
27139 hrz_kfree_skb (skb);
27140 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
27141 index 1c05212..c28e200 100644
27142 --- a/drivers/atm/idt77252.c
27143 +++ b/drivers/atm/idt77252.c
27144 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
27145 else
27146 dev_kfree_skb(skb);
27147
27148 - atomic_inc(&vcc->stats->tx);
27149 + atomic_inc_unchecked(&vcc->stats->tx);
27150 }
27151
27152 atomic_dec(&scq->used);
27153 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27154 if ((sb = dev_alloc_skb(64)) == NULL) {
27155 printk("%s: Can't allocate buffers for aal0.\n",
27156 card->name);
27157 - atomic_add(i, &vcc->stats->rx_drop);
27158 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
27159 break;
27160 }
27161 if (!atm_charge(vcc, sb->truesize)) {
27162 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
27163 card->name);
27164 - atomic_add(i - 1, &vcc->stats->rx_drop);
27165 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
27166 dev_kfree_skb(sb);
27167 break;
27168 }
27169 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27170 ATM_SKB(sb)->vcc = vcc;
27171 __net_timestamp(sb);
27172 vcc->push(vcc, sb);
27173 - atomic_inc(&vcc->stats->rx);
27174 + atomic_inc_unchecked(&vcc->stats->rx);
27175
27176 cell += ATM_CELL_PAYLOAD;
27177 }
27178 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27179 "(CDC: %08x)\n",
27180 card->name, len, rpp->len, readl(SAR_REG_CDC));
27181 recycle_rx_pool_skb(card, rpp);
27182 - atomic_inc(&vcc->stats->rx_err);
27183 + atomic_inc_unchecked(&vcc->stats->rx_err);
27184 return;
27185 }
27186 if (stat & SAR_RSQE_CRC) {
27187 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
27188 recycle_rx_pool_skb(card, rpp);
27189 - atomic_inc(&vcc->stats->rx_err);
27190 + atomic_inc_unchecked(&vcc->stats->rx_err);
27191 return;
27192 }
27193 if (skb_queue_len(&rpp->queue) > 1) {
27194 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27195 RXPRINTK("%s: Can't alloc RX skb.\n",
27196 card->name);
27197 recycle_rx_pool_skb(card, rpp);
27198 - atomic_inc(&vcc->stats->rx_err);
27199 + atomic_inc_unchecked(&vcc->stats->rx_err);
27200 return;
27201 }
27202 if (!atm_charge(vcc, skb->truesize)) {
27203 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27204 __net_timestamp(skb);
27205
27206 vcc->push(vcc, skb);
27207 - atomic_inc(&vcc->stats->rx);
27208 + atomic_inc_unchecked(&vcc->stats->rx);
27209
27210 return;
27211 }
27212 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27213 __net_timestamp(skb);
27214
27215 vcc->push(vcc, skb);
27216 - atomic_inc(&vcc->stats->rx);
27217 + atomic_inc_unchecked(&vcc->stats->rx);
27218
27219 if (skb->truesize > SAR_FB_SIZE_3)
27220 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
27221 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
27222 if (vcc->qos.aal != ATM_AAL0) {
27223 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
27224 card->name, vpi, vci);
27225 - atomic_inc(&vcc->stats->rx_drop);
27226 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27227 goto drop;
27228 }
27229
27230 if ((sb = dev_alloc_skb(64)) == NULL) {
27231 printk("%s: Can't allocate buffers for AAL0.\n",
27232 card->name);
27233 - atomic_inc(&vcc->stats->rx_err);
27234 + atomic_inc_unchecked(&vcc->stats->rx_err);
27235 goto drop;
27236 }
27237
27238 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
27239 ATM_SKB(sb)->vcc = vcc;
27240 __net_timestamp(sb);
27241 vcc->push(vcc, sb);
27242 - atomic_inc(&vcc->stats->rx);
27243 + atomic_inc_unchecked(&vcc->stats->rx);
27244
27245 drop:
27246 skb_pull(queue, 64);
27247 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
27248
27249 if (vc == NULL) {
27250 printk("%s: NULL connection in send().\n", card->name);
27251 - atomic_inc(&vcc->stats->tx_err);
27252 + atomic_inc_unchecked(&vcc->stats->tx_err);
27253 dev_kfree_skb(skb);
27254 return -EINVAL;
27255 }
27256 if (!test_bit(VCF_TX, &vc->flags)) {
27257 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
27258 - atomic_inc(&vcc->stats->tx_err);
27259 + atomic_inc_unchecked(&vcc->stats->tx_err);
27260 dev_kfree_skb(skb);
27261 return -EINVAL;
27262 }
27263 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
27264 break;
27265 default:
27266 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
27267 - atomic_inc(&vcc->stats->tx_err);
27268 + atomic_inc_unchecked(&vcc->stats->tx_err);
27269 dev_kfree_skb(skb);
27270 return -EINVAL;
27271 }
27272
27273 if (skb_shinfo(skb)->nr_frags != 0) {
27274 printk("%s: No scatter-gather yet.\n", card->name);
27275 - atomic_inc(&vcc->stats->tx_err);
27276 + atomic_inc_unchecked(&vcc->stats->tx_err);
27277 dev_kfree_skb(skb);
27278 return -EINVAL;
27279 }
27280 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
27281
27282 err = queue_skb(card, vc, skb, oam);
27283 if (err) {
27284 - atomic_inc(&vcc->stats->tx_err);
27285 + atomic_inc_unchecked(&vcc->stats->tx_err);
27286 dev_kfree_skb(skb);
27287 return err;
27288 }
27289 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
27290 skb = dev_alloc_skb(64);
27291 if (!skb) {
27292 printk("%s: Out of memory in send_oam().\n", card->name);
27293 - atomic_inc(&vcc->stats->tx_err);
27294 + atomic_inc_unchecked(&vcc->stats->tx_err);
27295 return -ENOMEM;
27296 }
27297 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
27298 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
27299 index 3d0c2b0..45441fa 100644
27300 --- a/drivers/atm/iphase.c
27301 +++ b/drivers/atm/iphase.c
27302 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
27303 status = (u_short) (buf_desc_ptr->desc_mode);
27304 if (status & (RX_CER | RX_PTE | RX_OFL))
27305 {
27306 - atomic_inc(&vcc->stats->rx_err);
27307 + atomic_inc_unchecked(&vcc->stats->rx_err);
27308 IF_ERR(printk("IA: bad packet, dropping it");)
27309 if (status & RX_CER) {
27310 IF_ERR(printk(" cause: packet CRC error\n");)
27311 @@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
27312 len = dma_addr - buf_addr;
27313 if (len > iadev->rx_buf_sz) {
27314 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
27315 - atomic_inc(&vcc->stats->rx_err);
27316 + atomic_inc_unchecked(&vcc->stats->rx_err);
27317 goto out_free_desc;
27318 }
27319
27320 @@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
27321 ia_vcc = INPH_IA_VCC(vcc);
27322 if (ia_vcc == NULL)
27323 {
27324 - atomic_inc(&vcc->stats->rx_err);
27325 + atomic_inc_unchecked(&vcc->stats->rx_err);
27326 dev_kfree_skb_any(skb);
27327 atm_return(vcc, atm_guess_pdu2truesize(len));
27328 goto INCR_DLE;
27329 @@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
27330 if ((length > iadev->rx_buf_sz) || (length >
27331 (skb->len - sizeof(struct cpcs_trailer))))
27332 {
27333 - atomic_inc(&vcc->stats->rx_err);
27334 + atomic_inc_unchecked(&vcc->stats->rx_err);
27335 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
27336 length, skb->len);)
27337 dev_kfree_skb_any(skb);
27338 @@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
27339
27340 IF_RX(printk("rx_dle_intr: skb push");)
27341 vcc->push(vcc,skb);
27342 - atomic_inc(&vcc->stats->rx);
27343 + atomic_inc_unchecked(&vcc->stats->rx);
27344 iadev->rx_pkt_cnt++;
27345 }
27346 INCR_DLE:
27347 @@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
27348 {
27349 struct k_sonet_stats *stats;
27350 stats = &PRIV(_ia_dev[board])->sonet_stats;
27351 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
27352 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
27353 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
27354 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
27355 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
27356 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
27357 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
27358 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
27359 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
27360 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
27361 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
27362 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
27363 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
27364 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
27365 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
27366 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
27367 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
27368 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
27369 }
27370 ia_cmds.status = 0;
27371 break;
27372 @@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
27373 if ((desc == 0) || (desc > iadev->num_tx_desc))
27374 {
27375 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
27376 - atomic_inc(&vcc->stats->tx);
27377 + atomic_inc_unchecked(&vcc->stats->tx);
27378 if (vcc->pop)
27379 vcc->pop(vcc, skb);
27380 else
27381 @@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
27382 ATM_DESC(skb) = vcc->vci;
27383 skb_queue_tail(&iadev->tx_dma_q, skb);
27384
27385 - atomic_inc(&vcc->stats->tx);
27386 + atomic_inc_unchecked(&vcc->stats->tx);
27387 iadev->tx_pkt_cnt++;
27388 /* Increment transaction counter */
27389 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
27390
27391 #if 0
27392 /* add flow control logic */
27393 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
27394 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
27395 if (iavcc->vc_desc_cnt > 10) {
27396 vcc->tx_quota = vcc->tx_quota * 3 / 4;
27397 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
27398 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
27399 index f556969..0da15eb 100644
27400 --- a/drivers/atm/lanai.c
27401 +++ b/drivers/atm/lanai.c
27402 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
27403 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
27404 lanai_endtx(lanai, lvcc);
27405 lanai_free_skb(lvcc->tx.atmvcc, skb);
27406 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
27407 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
27408 }
27409
27410 /* Try to fill the buffer - don't call unless there is backlog */
27411 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
27412 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
27413 __net_timestamp(skb);
27414 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
27415 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
27416 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
27417 out:
27418 lvcc->rx.buf.ptr = end;
27419 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
27420 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
27421 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
27422 "vcc %d\n", lanai->number, (unsigned int) s, vci);
27423 lanai->stats.service_rxnotaal5++;
27424 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
27425 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
27426 return 0;
27427 }
27428 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
27429 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
27430 int bytes;
27431 read_unlock(&vcc_sklist_lock);
27432 DPRINTK("got trashed rx pdu on vci %d\n", vci);
27433 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
27434 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
27435 lvcc->stats.x.aal5.service_trash++;
27436 bytes = (SERVICE_GET_END(s) * 16) -
27437 (((unsigned long) lvcc->rx.buf.ptr) -
27438 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
27439 }
27440 if (s & SERVICE_STREAM) {
27441 read_unlock(&vcc_sklist_lock);
27442 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
27443 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
27444 lvcc->stats.x.aal5.service_stream++;
27445 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
27446 "PDU on VCI %d!\n", lanai->number, vci);
27447 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
27448 return 0;
27449 }
27450 DPRINTK("got rx crc error on vci %d\n", vci);
27451 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
27452 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
27453 lvcc->stats.x.aal5.service_rxcrc++;
27454 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
27455 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
27456 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
27457 index 1c70c45..300718d 100644
27458 --- a/drivers/atm/nicstar.c
27459 +++ b/drivers/atm/nicstar.c
27460 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
27461 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
27462 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
27463 card->index);
27464 - atomic_inc(&vcc->stats->tx_err);
27465 + atomic_inc_unchecked(&vcc->stats->tx_err);
27466 dev_kfree_skb_any(skb);
27467 return -EINVAL;
27468 }
27469 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
27470 if (!vc->tx) {
27471 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
27472 card->index);
27473 - atomic_inc(&vcc->stats->tx_err);
27474 + atomic_inc_unchecked(&vcc->stats->tx_err);
27475 dev_kfree_skb_any(skb);
27476 return -EINVAL;
27477 }
27478 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
27479 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
27480 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
27481 card->index);
27482 - atomic_inc(&vcc->stats->tx_err);
27483 + atomic_inc_unchecked(&vcc->stats->tx_err);
27484 dev_kfree_skb_any(skb);
27485 return -EINVAL;
27486 }
27487
27488 if (skb_shinfo(skb)->nr_frags != 0) {
27489 printk("nicstar%d: No scatter-gather yet.\n", card->index);
27490 - atomic_inc(&vcc->stats->tx_err);
27491 + atomic_inc_unchecked(&vcc->stats->tx_err);
27492 dev_kfree_skb_any(skb);
27493 return -EINVAL;
27494 }
27495 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
27496 }
27497
27498 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
27499 - atomic_inc(&vcc->stats->tx_err);
27500 + atomic_inc_unchecked(&vcc->stats->tx_err);
27501 dev_kfree_skb_any(skb);
27502 return -EIO;
27503 }
27504 - atomic_inc(&vcc->stats->tx);
27505 + atomic_inc_unchecked(&vcc->stats->tx);
27506
27507 return 0;
27508 }
27509 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27510 printk
27511 ("nicstar%d: Can't allocate buffers for aal0.\n",
27512 card->index);
27513 - atomic_add(i, &vcc->stats->rx_drop);
27514 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
27515 break;
27516 }
27517 if (!atm_charge(vcc, sb->truesize)) {
27518 RXPRINTK
27519 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
27520 card->index);
27521 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
27522 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
27523 dev_kfree_skb_any(sb);
27524 break;
27525 }
27526 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27527 ATM_SKB(sb)->vcc = vcc;
27528 __net_timestamp(sb);
27529 vcc->push(vcc, sb);
27530 - atomic_inc(&vcc->stats->rx);
27531 + atomic_inc_unchecked(&vcc->stats->rx);
27532 cell += ATM_CELL_PAYLOAD;
27533 }
27534
27535 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27536 if (iovb == NULL) {
27537 printk("nicstar%d: Out of iovec buffers.\n",
27538 card->index);
27539 - atomic_inc(&vcc->stats->rx_drop);
27540 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27541 recycle_rx_buf(card, skb);
27542 return;
27543 }
27544 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27545 small or large buffer itself. */
27546 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
27547 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
27548 - atomic_inc(&vcc->stats->rx_err);
27549 + atomic_inc_unchecked(&vcc->stats->rx_err);
27550 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
27551 NS_MAX_IOVECS);
27552 NS_PRV_IOVCNT(iovb) = 0;
27553 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27554 ("nicstar%d: Expected a small buffer, and this is not one.\n",
27555 card->index);
27556 which_list(card, skb);
27557 - atomic_inc(&vcc->stats->rx_err);
27558 + atomic_inc_unchecked(&vcc->stats->rx_err);
27559 recycle_rx_buf(card, skb);
27560 vc->rx_iov = NULL;
27561 recycle_iov_buf(card, iovb);
27562 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27563 ("nicstar%d: Expected a large buffer, and this is not one.\n",
27564 card->index);
27565 which_list(card, skb);
27566 - atomic_inc(&vcc->stats->rx_err);
27567 + atomic_inc_unchecked(&vcc->stats->rx_err);
27568 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
27569 NS_PRV_IOVCNT(iovb));
27570 vc->rx_iov = NULL;
27571 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27572 printk(" - PDU size mismatch.\n");
27573 else
27574 printk(".\n");
27575 - atomic_inc(&vcc->stats->rx_err);
27576 + atomic_inc_unchecked(&vcc->stats->rx_err);
27577 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
27578 NS_PRV_IOVCNT(iovb));
27579 vc->rx_iov = NULL;
27580 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27581 /* skb points to a small buffer */
27582 if (!atm_charge(vcc, skb->truesize)) {
27583 push_rxbufs(card, skb);
27584 - atomic_inc(&vcc->stats->rx_drop);
27585 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27586 } else {
27587 skb_put(skb, len);
27588 dequeue_sm_buf(card, skb);
27589 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27590 ATM_SKB(skb)->vcc = vcc;
27591 __net_timestamp(skb);
27592 vcc->push(vcc, skb);
27593 - atomic_inc(&vcc->stats->rx);
27594 + atomic_inc_unchecked(&vcc->stats->rx);
27595 }
27596 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
27597 struct sk_buff *sb;
27598 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27599 if (len <= NS_SMBUFSIZE) {
27600 if (!atm_charge(vcc, sb->truesize)) {
27601 push_rxbufs(card, sb);
27602 - atomic_inc(&vcc->stats->rx_drop);
27603 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27604 } else {
27605 skb_put(sb, len);
27606 dequeue_sm_buf(card, sb);
27607 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27608 ATM_SKB(sb)->vcc = vcc;
27609 __net_timestamp(sb);
27610 vcc->push(vcc, sb);
27611 - atomic_inc(&vcc->stats->rx);
27612 + atomic_inc_unchecked(&vcc->stats->rx);
27613 }
27614
27615 push_rxbufs(card, skb);
27616 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27617
27618 if (!atm_charge(vcc, skb->truesize)) {
27619 push_rxbufs(card, skb);
27620 - atomic_inc(&vcc->stats->rx_drop);
27621 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27622 } else {
27623 dequeue_lg_buf(card, skb);
27624 #ifdef NS_USE_DESTRUCTORS
27625 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27626 ATM_SKB(skb)->vcc = vcc;
27627 __net_timestamp(skb);
27628 vcc->push(vcc, skb);
27629 - atomic_inc(&vcc->stats->rx);
27630 + atomic_inc_unchecked(&vcc->stats->rx);
27631 }
27632
27633 push_rxbufs(card, sb);
27634 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27635 printk
27636 ("nicstar%d: Out of huge buffers.\n",
27637 card->index);
27638 - atomic_inc(&vcc->stats->rx_drop);
27639 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27640 recycle_iovec_rx_bufs(card,
27641 (struct iovec *)
27642 iovb->data,
27643 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27644 card->hbpool.count++;
27645 } else
27646 dev_kfree_skb_any(hb);
27647 - atomic_inc(&vcc->stats->rx_drop);
27648 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27649 } else {
27650 /* Copy the small buffer to the huge buffer */
27651 sb = (struct sk_buff *)iov->iov_base;
27652 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
27653 #endif /* NS_USE_DESTRUCTORS */
27654 __net_timestamp(hb);
27655 vcc->push(vcc, hb);
27656 - atomic_inc(&vcc->stats->rx);
27657 + atomic_inc_unchecked(&vcc->stats->rx);
27658 }
27659 }
27660
27661 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
27662 index 5d1d076..12fbca4 100644
27663 --- a/drivers/atm/solos-pci.c
27664 +++ b/drivers/atm/solos-pci.c
27665 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
27666 }
27667 atm_charge(vcc, skb->truesize);
27668 vcc->push(vcc, skb);
27669 - atomic_inc(&vcc->stats->rx);
27670 + atomic_inc_unchecked(&vcc->stats->rx);
27671 break;
27672
27673 case PKT_STATUS:
27674 @@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
27675 vcc = SKB_CB(oldskb)->vcc;
27676
27677 if (vcc) {
27678 - atomic_inc(&vcc->stats->tx);
27679 + atomic_inc_unchecked(&vcc->stats->tx);
27680 solos_pop(vcc, oldskb);
27681 } else
27682 dev_kfree_skb_irq(oldskb);
27683 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
27684 index 90f1ccc..04c4a1e 100644
27685 --- a/drivers/atm/suni.c
27686 +++ b/drivers/atm/suni.c
27687 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
27688
27689
27690 #define ADD_LIMITED(s,v) \
27691 - atomic_add((v),&stats->s); \
27692 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
27693 + atomic_add_unchecked((v),&stats->s); \
27694 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
27695
27696
27697 static void suni_hz(unsigned long from_timer)
27698 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
27699 index 5120a96..e2572bd 100644
27700 --- a/drivers/atm/uPD98402.c
27701 +++ b/drivers/atm/uPD98402.c
27702 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
27703 struct sonet_stats tmp;
27704 int error = 0;
27705
27706 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
27707 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
27708 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
27709 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
27710 if (zero && !error) {
27711 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
27712
27713
27714 #define ADD_LIMITED(s,v) \
27715 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
27716 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
27717 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
27718 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
27719 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
27720 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
27721
27722
27723 static void stat_event(struct atm_dev *dev)
27724 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
27725 if (reason & uPD98402_INT_PFM) stat_event(dev);
27726 if (reason & uPD98402_INT_PCO) {
27727 (void) GET(PCOCR); /* clear interrupt cause */
27728 - atomic_add(GET(HECCT),
27729 + atomic_add_unchecked(GET(HECCT),
27730 &PRIV(dev)->sonet_stats.uncorr_hcs);
27731 }
27732 if ((reason & uPD98402_INT_RFO) &&
27733 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
27734 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
27735 uPD98402_INT_LOS),PIMR); /* enable them */
27736 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
27737 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
27738 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
27739 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
27740 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
27741 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
27742 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
27743 return 0;
27744 }
27745
27746 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
27747 index d889f56..17eb71e 100644
27748 --- a/drivers/atm/zatm.c
27749 +++ b/drivers/atm/zatm.c
27750 @@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
27751 }
27752 if (!size) {
27753 dev_kfree_skb_irq(skb);
27754 - if (vcc) atomic_inc(&vcc->stats->rx_err);
27755 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
27756 continue;
27757 }
27758 if (!atm_charge(vcc,skb->truesize)) {
27759 @@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
27760 skb->len = size;
27761 ATM_SKB(skb)->vcc = vcc;
27762 vcc->push(vcc,skb);
27763 - atomic_inc(&vcc->stats->rx);
27764 + atomic_inc_unchecked(&vcc->stats->rx);
27765 }
27766 zout(pos & 0xffff,MTA(mbx));
27767 #if 0 /* probably a stupid idea */
27768 @@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
27769 skb_queue_head(&zatm_vcc->backlog,skb);
27770 break;
27771 }
27772 - atomic_inc(&vcc->stats->tx);
27773 + atomic_inc_unchecked(&vcc->stats->tx);
27774 wake_up(&zatm_vcc->tx_wait);
27775 }
27776
27777 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
27778 index a4760e0..51283cf 100644
27779 --- a/drivers/base/devtmpfs.c
27780 +++ b/drivers/base/devtmpfs.c
27781 @@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
27782 if (!thread)
27783 return 0;
27784
27785 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
27786 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
27787 if (err)
27788 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
27789 else
27790 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
27791 index caf995f..6f76697 100644
27792 --- a/drivers/base/power/wakeup.c
27793 +++ b/drivers/base/power/wakeup.c
27794 @@ -30,14 +30,14 @@ bool events_check_enabled;
27795 * They need to be modified together atomically, so it's better to use one
27796 * atomic variable to hold them both.
27797 */
27798 -static atomic_t combined_event_count = ATOMIC_INIT(0);
27799 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
27800
27801 #define IN_PROGRESS_BITS (sizeof(int) * 4)
27802 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
27803
27804 static void split_counters(unsigned int *cnt, unsigned int *inpr)
27805 {
27806 - unsigned int comb = atomic_read(&combined_event_count);
27807 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
27808
27809 *cnt = (comb >> IN_PROGRESS_BITS);
27810 *inpr = comb & MAX_IN_PROGRESS;
27811 @@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
27812 ws->last_time = ktime_get();
27813
27814 /* Increment the counter of events in progress. */
27815 - atomic_inc(&combined_event_count);
27816 + atomic_inc_unchecked(&combined_event_count);
27817 }
27818
27819 /**
27820 @@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
27821 * Increment the counter of registered wakeup events and decrement the
27822 * couter of wakeup events in progress simultaneously.
27823 */
27824 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
27825 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
27826 }
27827
27828 /**
27829 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
27830 index b0f553b..77b928b 100644
27831 --- a/drivers/block/cciss.c
27832 +++ b/drivers/block/cciss.c
27833 @@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
27834 int err;
27835 u32 cp;
27836
27837 + memset(&arg64, 0, sizeof(arg64));
27838 +
27839 err = 0;
27840 err |=
27841 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
27842 @@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
27843 while (!list_empty(&h->reqQ)) {
27844 c = list_entry(h->reqQ.next, CommandList_struct, list);
27845 /* can't do anything if fifo is full */
27846 - if ((h->access.fifo_full(h))) {
27847 + if ((h->access->fifo_full(h))) {
27848 dev_warn(&h->pdev->dev, "fifo full\n");
27849 break;
27850 }
27851 @@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
27852 h->Qdepth--;
27853
27854 /* Tell the controller execute command */
27855 - h->access.submit_command(h, c);
27856 + h->access->submit_command(h, c);
27857
27858 /* Put job onto the completed Q */
27859 addQ(&h->cmpQ, c);
27860 @@ -3443,17 +3445,17 @@ startio:
27861
27862 static inline unsigned long get_next_completion(ctlr_info_t *h)
27863 {
27864 - return h->access.command_completed(h);
27865 + return h->access->command_completed(h);
27866 }
27867
27868 static inline int interrupt_pending(ctlr_info_t *h)
27869 {
27870 - return h->access.intr_pending(h);
27871 + return h->access->intr_pending(h);
27872 }
27873
27874 static inline long interrupt_not_for_us(ctlr_info_t *h)
27875 {
27876 - return ((h->access.intr_pending(h) == 0) ||
27877 + return ((h->access->intr_pending(h) == 0) ||
27878 (h->interrupts_enabled == 0));
27879 }
27880
27881 @@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
27882 u32 a;
27883
27884 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
27885 - return h->access.command_completed(h);
27886 + return h->access->command_completed(h);
27887
27888 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
27889 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
27890 @@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
27891 trans_support & CFGTBL_Trans_use_short_tags);
27892
27893 /* Change the access methods to the performant access methods */
27894 - h->access = SA5_performant_access;
27895 + h->access = &SA5_performant_access;
27896 h->transMethod = CFGTBL_Trans_Performant;
27897
27898 return;
27899 @@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
27900 if (prod_index < 0)
27901 return -ENODEV;
27902 h->product_name = products[prod_index].product_name;
27903 - h->access = *(products[prod_index].access);
27904 + h->access = products[prod_index].access;
27905
27906 if (cciss_board_disabled(h)) {
27907 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
27908 @@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
27909 }
27910
27911 /* make sure the board interrupts are off */
27912 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
27913 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
27914 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
27915 if (rc)
27916 goto clean2;
27917 @@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
27918 * fake ones to scoop up any residual completions.
27919 */
27920 spin_lock_irqsave(&h->lock, flags);
27921 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
27922 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
27923 spin_unlock_irqrestore(&h->lock, flags);
27924 free_irq(h->intr[h->intr_mode], h);
27925 rc = cciss_request_irq(h, cciss_msix_discard_completions,
27926 @@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
27927 dev_info(&h->pdev->dev, "Board READY.\n");
27928 dev_info(&h->pdev->dev,
27929 "Waiting for stale completions to drain.\n");
27930 - h->access.set_intr_mask(h, CCISS_INTR_ON);
27931 + h->access->set_intr_mask(h, CCISS_INTR_ON);
27932 msleep(10000);
27933 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
27934 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
27935
27936 rc = controller_reset_failed(h->cfgtable);
27937 if (rc)
27938 @@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
27939 cciss_scsi_setup(h);
27940
27941 /* Turn the interrupts on so we can service requests */
27942 - h->access.set_intr_mask(h, CCISS_INTR_ON);
27943 + h->access->set_intr_mask(h, CCISS_INTR_ON);
27944
27945 /* Get the firmware version */
27946 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
27947 @@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
27948 kfree(flush_buf);
27949 if (return_code != IO_OK)
27950 dev_warn(&h->pdev->dev, "Error flushing cache\n");
27951 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
27952 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
27953 free_irq(h->intr[h->intr_mode], h);
27954 }
27955
27956 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
27957 index 7fda30e..eb5dfe0 100644
27958 --- a/drivers/block/cciss.h
27959 +++ b/drivers/block/cciss.h
27960 @@ -101,7 +101,7 @@ struct ctlr_info
27961 /* information about each logical volume */
27962 drive_info_struct *drv[CISS_MAX_LUN];
27963
27964 - struct access_method access;
27965 + struct access_method *access;
27966
27967 /* queue and queue Info */
27968 struct list_head reqQ;
27969 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
27970 index 9125bbe..eede5c8 100644
27971 --- a/drivers/block/cpqarray.c
27972 +++ b/drivers/block/cpqarray.c
27973 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
27974 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
27975 goto Enomem4;
27976 }
27977 - hba[i]->access.set_intr_mask(hba[i], 0);
27978 + hba[i]->access->set_intr_mask(hba[i], 0);
27979 if (request_irq(hba[i]->intr, do_ida_intr,
27980 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
27981 {
27982 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
27983 add_timer(&hba[i]->timer);
27984
27985 /* Enable IRQ now that spinlock and rate limit timer are set up */
27986 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27987 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27988
27989 for(j=0; j<NWD; j++) {
27990 struct gendisk *disk = ida_gendisk[i][j];
27991 @@ -694,7 +694,7 @@ DBGINFO(
27992 for(i=0; i<NR_PRODUCTS; i++) {
27993 if (board_id == products[i].board_id) {
27994 c->product_name = products[i].product_name;
27995 - c->access = *(products[i].access);
27996 + c->access = products[i].access;
27997 break;
27998 }
27999 }
28000 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
28001 hba[ctlr]->intr = intr;
28002 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
28003 hba[ctlr]->product_name = products[j].product_name;
28004 - hba[ctlr]->access = *(products[j].access);
28005 + hba[ctlr]->access = products[j].access;
28006 hba[ctlr]->ctlr = ctlr;
28007 hba[ctlr]->board_id = board_id;
28008 hba[ctlr]->pci_dev = NULL; /* not PCI */
28009 @@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
28010
28011 while((c = h->reqQ) != NULL) {
28012 /* Can't do anything if we're busy */
28013 - if (h->access.fifo_full(h) == 0)
28014 + if (h->access->fifo_full(h) == 0)
28015 return;
28016
28017 /* Get the first entry from the request Q */
28018 @@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
28019 h->Qdepth--;
28020
28021 /* Tell the controller to do our bidding */
28022 - h->access.submit_command(h, c);
28023 + h->access->submit_command(h, c);
28024
28025 /* Get onto the completion Q */
28026 addQ(&h->cmpQ, c);
28027 @@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28028 unsigned long flags;
28029 __u32 a,a1;
28030
28031 - istat = h->access.intr_pending(h);
28032 + istat = h->access->intr_pending(h);
28033 /* Is this interrupt for us? */
28034 if (istat == 0)
28035 return IRQ_NONE;
28036 @@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28037 */
28038 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
28039 if (istat & FIFO_NOT_EMPTY) {
28040 - while((a = h->access.command_completed(h))) {
28041 + while((a = h->access->command_completed(h))) {
28042 a1 = a; a &= ~3;
28043 if ((c = h->cmpQ) == NULL)
28044 {
28045 @@ -1449,11 +1449,11 @@ static int sendcmd(
28046 /*
28047 * Disable interrupt
28048 */
28049 - info_p->access.set_intr_mask(info_p, 0);
28050 + info_p->access->set_intr_mask(info_p, 0);
28051 /* Make sure there is room in the command FIFO */
28052 /* Actually it should be completely empty at this time. */
28053 for (i = 200000; i > 0; i--) {
28054 - temp = info_p->access.fifo_full(info_p);
28055 + temp = info_p->access->fifo_full(info_p);
28056 if (temp != 0) {
28057 break;
28058 }
28059 @@ -1466,7 +1466,7 @@ DBG(
28060 /*
28061 * Send the cmd
28062 */
28063 - info_p->access.submit_command(info_p, c);
28064 + info_p->access->submit_command(info_p, c);
28065 complete = pollcomplete(ctlr);
28066
28067 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
28068 @@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
28069 * we check the new geometry. Then turn interrupts back on when
28070 * we're done.
28071 */
28072 - host->access.set_intr_mask(host, 0);
28073 + host->access->set_intr_mask(host, 0);
28074 getgeometry(ctlr);
28075 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
28076 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
28077
28078 for(i=0; i<NWD; i++) {
28079 struct gendisk *disk = ida_gendisk[ctlr][i];
28080 @@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
28081 /* Wait (up to 2 seconds) for a command to complete */
28082
28083 for (i = 200000; i > 0; i--) {
28084 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
28085 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
28086 if (done == 0) {
28087 udelay(10); /* a short fixed delay */
28088 } else
28089 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
28090 index be73e9d..7fbf140 100644
28091 --- a/drivers/block/cpqarray.h
28092 +++ b/drivers/block/cpqarray.h
28093 @@ -99,7 +99,7 @@ struct ctlr_info {
28094 drv_info_t drv[NWD];
28095 struct proc_dir_entry *proc;
28096
28097 - struct access_method access;
28098 + struct access_method *access;
28099
28100 cmdlist_t *reqQ;
28101 cmdlist_t *cmpQ;
28102 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
28103 index 9cf2035..bffca95 100644
28104 --- a/drivers/block/drbd/drbd_int.h
28105 +++ b/drivers/block/drbd/drbd_int.h
28106 @@ -736,7 +736,7 @@ struct drbd_request;
28107 struct drbd_epoch {
28108 struct list_head list;
28109 unsigned int barrier_nr;
28110 - atomic_t epoch_size; /* increased on every request added. */
28111 + atomic_unchecked_t epoch_size; /* increased on every request added. */
28112 atomic_t active; /* increased on every req. added, and dec on every finished. */
28113 unsigned long flags;
28114 };
28115 @@ -1108,7 +1108,7 @@ struct drbd_conf {
28116 void *int_dig_in;
28117 void *int_dig_vv;
28118 wait_queue_head_t seq_wait;
28119 - atomic_t packet_seq;
28120 + atomic_unchecked_t packet_seq;
28121 unsigned int peer_seq;
28122 spinlock_t peer_seq_lock;
28123 unsigned int minor;
28124 @@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
28125
28126 static inline void drbd_tcp_cork(struct socket *sock)
28127 {
28128 - int __user val = 1;
28129 + int val = 1;
28130 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
28131 - (char __user *)&val, sizeof(val));
28132 + (char __force_user *)&val, sizeof(val));
28133 }
28134
28135 static inline void drbd_tcp_uncork(struct socket *sock)
28136 {
28137 - int __user val = 0;
28138 + int val = 0;
28139 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
28140 - (char __user *)&val, sizeof(val));
28141 + (char __force_user *)&val, sizeof(val));
28142 }
28143
28144 static inline void drbd_tcp_nodelay(struct socket *sock)
28145 {
28146 - int __user val = 1;
28147 + int val = 1;
28148 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
28149 - (char __user *)&val, sizeof(val));
28150 + (char __force_user *)&val, sizeof(val));
28151 }
28152
28153 static inline void drbd_tcp_quickack(struct socket *sock)
28154 {
28155 - int __user val = 2;
28156 + int val = 2;
28157 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
28158 - (char __user *)&val, sizeof(val));
28159 + (char __force_user *)&val, sizeof(val));
28160 }
28161
28162 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
28163 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
28164 index 0358e55..bc33689 100644
28165 --- a/drivers/block/drbd/drbd_main.c
28166 +++ b/drivers/block/drbd/drbd_main.c
28167 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
28168 p.sector = sector;
28169 p.block_id = block_id;
28170 p.blksize = blksize;
28171 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
28172 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
28173
28174 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
28175 return false;
28176 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
28177 p.sector = cpu_to_be64(req->sector);
28178 p.block_id = (unsigned long)req;
28179 p.seq_num = cpu_to_be32(req->seq_num =
28180 - atomic_add_return(1, &mdev->packet_seq));
28181 + atomic_add_return_unchecked(1, &mdev->packet_seq));
28182
28183 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
28184
28185 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
28186 atomic_set(&mdev->unacked_cnt, 0);
28187 atomic_set(&mdev->local_cnt, 0);
28188 atomic_set(&mdev->net_cnt, 0);
28189 - atomic_set(&mdev->packet_seq, 0);
28190 + atomic_set_unchecked(&mdev->packet_seq, 0);
28191 atomic_set(&mdev->pp_in_use, 0);
28192 atomic_set(&mdev->pp_in_use_by_net, 0);
28193 atomic_set(&mdev->rs_sect_in, 0);
28194 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
28195 mdev->receiver.t_state);
28196
28197 /* no need to lock it, I'm the only thread alive */
28198 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
28199 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
28200 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
28201 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
28202 mdev->al_writ_cnt =
28203 mdev->bm_writ_cnt =
28204 mdev->read_cnt =
28205 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
28206 index af2a250..219c74b 100644
28207 --- a/drivers/block/drbd/drbd_nl.c
28208 +++ b/drivers/block/drbd/drbd_nl.c
28209 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
28210 module_put(THIS_MODULE);
28211 }
28212
28213 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
28214 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
28215
28216 static unsigned short *
28217 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
28218 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
28219 cn_reply->id.idx = CN_IDX_DRBD;
28220 cn_reply->id.val = CN_VAL_DRBD;
28221
28222 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
28223 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
28224 cn_reply->ack = 0; /* not used here. */
28225 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28226 (int)((char *)tl - (char *)reply->tag_list);
28227 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
28228 cn_reply->id.idx = CN_IDX_DRBD;
28229 cn_reply->id.val = CN_VAL_DRBD;
28230
28231 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
28232 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
28233 cn_reply->ack = 0; /* not used here. */
28234 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28235 (int)((char *)tl - (char *)reply->tag_list);
28236 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
28237 cn_reply->id.idx = CN_IDX_DRBD;
28238 cn_reply->id.val = CN_VAL_DRBD;
28239
28240 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
28241 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
28242 cn_reply->ack = 0; // not used here.
28243 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28244 (int)((char*)tl - (char*)reply->tag_list);
28245 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
28246 cn_reply->id.idx = CN_IDX_DRBD;
28247 cn_reply->id.val = CN_VAL_DRBD;
28248
28249 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
28250 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
28251 cn_reply->ack = 0; /* not used here. */
28252 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28253 (int)((char *)tl - (char *)reply->tag_list);
28254 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
28255 index 43beaca..4a5b1dd 100644
28256 --- a/drivers/block/drbd/drbd_receiver.c
28257 +++ b/drivers/block/drbd/drbd_receiver.c
28258 @@ -894,7 +894,7 @@ retry:
28259 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
28260 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
28261
28262 - atomic_set(&mdev->packet_seq, 0);
28263 + atomic_set_unchecked(&mdev->packet_seq, 0);
28264 mdev->peer_seq = 0;
28265
28266 drbd_thread_start(&mdev->asender);
28267 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
28268 do {
28269 next_epoch = NULL;
28270
28271 - epoch_size = atomic_read(&epoch->epoch_size);
28272 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
28273
28274 switch (ev & ~EV_CLEANUP) {
28275 case EV_PUT:
28276 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
28277 rv = FE_DESTROYED;
28278 } else {
28279 epoch->flags = 0;
28280 - atomic_set(&epoch->epoch_size, 0);
28281 + atomic_set_unchecked(&epoch->epoch_size, 0);
28282 /* atomic_set(&epoch->active, 0); is already zero */
28283 if (rv == FE_STILL_LIVE)
28284 rv = FE_RECYCLED;
28285 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
28286 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
28287 drbd_flush(mdev);
28288
28289 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
28290 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
28291 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
28292 if (epoch)
28293 break;
28294 }
28295
28296 epoch = mdev->current_epoch;
28297 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
28298 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
28299
28300 D_ASSERT(atomic_read(&epoch->active) == 0);
28301 D_ASSERT(epoch->flags == 0);
28302 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
28303 }
28304
28305 epoch->flags = 0;
28306 - atomic_set(&epoch->epoch_size, 0);
28307 + atomic_set_unchecked(&epoch->epoch_size, 0);
28308 atomic_set(&epoch->active, 0);
28309
28310 spin_lock(&mdev->epoch_lock);
28311 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
28312 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
28313 list_add(&epoch->list, &mdev->current_epoch->list);
28314 mdev->current_epoch = epoch;
28315 mdev->epochs++;
28316 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
28317 spin_unlock(&mdev->peer_seq_lock);
28318
28319 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
28320 - atomic_inc(&mdev->current_epoch->epoch_size);
28321 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
28322 return drbd_drain_block(mdev, data_size);
28323 }
28324
28325 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
28326
28327 spin_lock(&mdev->epoch_lock);
28328 e->epoch = mdev->current_epoch;
28329 - atomic_inc(&e->epoch->epoch_size);
28330 + atomic_inc_unchecked(&e->epoch->epoch_size);
28331 atomic_inc(&e->epoch->active);
28332 spin_unlock(&mdev->epoch_lock);
28333
28334 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
28335 D_ASSERT(list_empty(&mdev->done_ee));
28336
28337 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
28338 - atomic_set(&mdev->current_epoch->epoch_size, 0);
28339 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
28340 D_ASSERT(list_empty(&mdev->current_epoch->list));
28341 }
28342
28343 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
28344 index 1e888c9..05cf1b0 100644
28345 --- a/drivers/block/loop.c
28346 +++ b/drivers/block/loop.c
28347 @@ -227,7 +227,7 @@ static int __do_lo_send_write(struct file *file,
28348 mm_segment_t old_fs = get_fs();
28349
28350 set_fs(get_ds());
28351 - bw = file->f_op->write(file, buf, len, &pos);
28352 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
28353 set_fs(old_fs);
28354 if (likely(bw == len))
28355 return 0;
28356 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
28357 index 4364303..9adf4ee 100644
28358 --- a/drivers/char/Kconfig
28359 +++ b/drivers/char/Kconfig
28360 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
28361
28362 config DEVKMEM
28363 bool "/dev/kmem virtual device support"
28364 - default y
28365 + default n
28366 + depends on !GRKERNSEC_KMEM
28367 help
28368 Say Y here if you want to support the /dev/kmem device. The
28369 /dev/kmem device is rarely used, but can be used for certain
28370 @@ -596,6 +597,7 @@ config DEVPORT
28371 bool
28372 depends on !M68K
28373 depends on ISA || PCI
28374 + depends on !GRKERNSEC_KMEM
28375 default y
28376
28377 source "drivers/s390/char/Kconfig"
28378 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
28379 index 2e04433..22afc64 100644
28380 --- a/drivers/char/agp/frontend.c
28381 +++ b/drivers/char/agp/frontend.c
28382 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
28383 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
28384 return -EFAULT;
28385
28386 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
28387 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
28388 return -EFAULT;
28389
28390 client = agp_find_client_by_pid(reserve.pid);
28391 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
28392 index 095ab90..afad0a4 100644
28393 --- a/drivers/char/briq_panel.c
28394 +++ b/drivers/char/briq_panel.c
28395 @@ -9,6 +9,7 @@
28396 #include <linux/types.h>
28397 #include <linux/errno.h>
28398 #include <linux/tty.h>
28399 +#include <linux/mutex.h>
28400 #include <linux/timer.h>
28401 #include <linux/kernel.h>
28402 #include <linux/wait.h>
28403 @@ -34,6 +35,7 @@ static int vfd_is_open;
28404 static unsigned char vfd[40];
28405 static int vfd_cursor;
28406 static unsigned char ledpb, led;
28407 +static DEFINE_MUTEX(vfd_mutex);
28408
28409 static void update_vfd(void)
28410 {
28411 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
28412 if (!vfd_is_open)
28413 return -EBUSY;
28414
28415 + mutex_lock(&vfd_mutex);
28416 for (;;) {
28417 char c;
28418 if (!indx)
28419 break;
28420 - if (get_user(c, buf))
28421 + if (get_user(c, buf)) {
28422 + mutex_unlock(&vfd_mutex);
28423 return -EFAULT;
28424 + }
28425 if (esc) {
28426 set_led(c);
28427 esc = 0;
28428 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
28429 buf++;
28430 }
28431 update_vfd();
28432 + mutex_unlock(&vfd_mutex);
28433
28434 return len;
28435 }
28436 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
28437 index f773a9d..65cd683 100644
28438 --- a/drivers/char/genrtc.c
28439 +++ b/drivers/char/genrtc.c
28440 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
28441 switch (cmd) {
28442
28443 case RTC_PLL_GET:
28444 + memset(&pll, 0, sizeof(pll));
28445 if (get_rtc_pll(&pll))
28446 return -EINVAL;
28447 else
28448 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
28449 index 0833896..cccce52 100644
28450 --- a/drivers/char/hpet.c
28451 +++ b/drivers/char/hpet.c
28452 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
28453 }
28454
28455 static int
28456 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
28457 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
28458 struct hpet_info *info)
28459 {
28460 struct hpet_timer __iomem *timer;
28461 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
28462 index 58c0e63..46c16bf 100644
28463 --- a/drivers/char/ipmi/ipmi_msghandler.c
28464 +++ b/drivers/char/ipmi/ipmi_msghandler.c
28465 @@ -415,7 +415,7 @@ struct ipmi_smi {
28466 struct proc_dir_entry *proc_dir;
28467 char proc_dir_name[10];
28468
28469 - atomic_t stats[IPMI_NUM_STATS];
28470 + atomic_unchecked_t stats[IPMI_NUM_STATS];
28471
28472 /*
28473 * run_to_completion duplicate of smb_info, smi_info
28474 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
28475
28476
28477 #define ipmi_inc_stat(intf, stat) \
28478 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
28479 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
28480 #define ipmi_get_stat(intf, stat) \
28481 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
28482 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
28483
28484 static int is_lan_addr(struct ipmi_addr *addr)
28485 {
28486 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
28487 INIT_LIST_HEAD(&intf->cmd_rcvrs);
28488 init_waitqueue_head(&intf->waitq);
28489 for (i = 0; i < IPMI_NUM_STATS; i++)
28490 - atomic_set(&intf->stats[i], 0);
28491 + atomic_set_unchecked(&intf->stats[i], 0);
28492
28493 intf->proc_dir = NULL;
28494
28495 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
28496 index 9397ab4..d01bee1 100644
28497 --- a/drivers/char/ipmi/ipmi_si_intf.c
28498 +++ b/drivers/char/ipmi/ipmi_si_intf.c
28499 @@ -277,7 +277,7 @@ struct smi_info {
28500 unsigned char slave_addr;
28501
28502 /* Counters and things for the proc filesystem. */
28503 - atomic_t stats[SI_NUM_STATS];
28504 + atomic_unchecked_t stats[SI_NUM_STATS];
28505
28506 struct task_struct *thread;
28507
28508 @@ -286,9 +286,9 @@ struct smi_info {
28509 };
28510
28511 #define smi_inc_stat(smi, stat) \
28512 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
28513 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
28514 #define smi_get_stat(smi, stat) \
28515 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
28516 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
28517
28518 #define SI_MAX_PARMS 4
28519
28520 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
28521 atomic_set(&new_smi->req_events, 0);
28522 new_smi->run_to_completion = 0;
28523 for (i = 0; i < SI_NUM_STATS; i++)
28524 - atomic_set(&new_smi->stats[i], 0);
28525 + atomic_set_unchecked(&new_smi->stats[i], 0);
28526
28527 new_smi->interrupt_disabled = 1;
28528 atomic_set(&new_smi->stop_operation, 0);
28529 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
28530 index 1aeaaba..e018570 100644
28531 --- a/drivers/char/mbcs.c
28532 +++ b/drivers/char/mbcs.c
28533 @@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
28534 return 0;
28535 }
28536
28537 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
28538 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
28539 {
28540 .part_num = MBCS_PART_NUM,
28541 .mfg_num = MBCS_MFG_NUM,
28542 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
28543 index 1451790..f705c30 100644
28544 --- a/drivers/char/mem.c
28545 +++ b/drivers/char/mem.c
28546 @@ -18,6 +18,7 @@
28547 #include <linux/raw.h>
28548 #include <linux/tty.h>
28549 #include <linux/capability.h>
28550 +#include <linux/security.h>
28551 #include <linux/ptrace.h>
28552 #include <linux/device.h>
28553 #include <linux/highmem.h>
28554 @@ -35,6 +36,10 @@
28555 # include <linux/efi.h>
28556 #endif
28557
28558 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28559 +extern const struct file_operations grsec_fops;
28560 +#endif
28561 +
28562 static inline unsigned long size_inside_page(unsigned long start,
28563 unsigned long size)
28564 {
28565 @@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28566
28567 while (cursor < to) {
28568 if (!devmem_is_allowed(pfn)) {
28569 +#ifdef CONFIG_GRKERNSEC_KMEM
28570 + gr_handle_mem_readwrite(from, to);
28571 +#else
28572 printk(KERN_INFO
28573 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
28574 current->comm, from, to);
28575 +#endif
28576 return 0;
28577 }
28578 cursor += PAGE_SIZE;
28579 @@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28580 }
28581 return 1;
28582 }
28583 +#elif defined(CONFIG_GRKERNSEC_KMEM)
28584 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28585 +{
28586 + return 0;
28587 +}
28588 #else
28589 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28590 {
28591 @@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
28592
28593 while (count > 0) {
28594 unsigned long remaining;
28595 + char *temp;
28596
28597 sz = size_inside_page(p, count);
28598
28599 @@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
28600 if (!ptr)
28601 return -EFAULT;
28602
28603 - remaining = copy_to_user(buf, ptr, sz);
28604 +#ifdef CONFIG_PAX_USERCOPY
28605 + temp = kmalloc(sz, GFP_KERNEL);
28606 + if (!temp) {
28607 + unxlate_dev_mem_ptr(p, ptr);
28608 + return -ENOMEM;
28609 + }
28610 + memcpy(temp, ptr, sz);
28611 +#else
28612 + temp = ptr;
28613 +#endif
28614 +
28615 + remaining = copy_to_user(buf, temp, sz);
28616 +
28617 +#ifdef CONFIG_PAX_USERCOPY
28618 + kfree(temp);
28619 +#endif
28620 +
28621 unxlate_dev_mem_ptr(p, ptr);
28622 if (remaining)
28623 return -EFAULT;
28624 @@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
28625 size_t count, loff_t *ppos)
28626 {
28627 unsigned long p = *ppos;
28628 - ssize_t low_count, read, sz;
28629 + ssize_t low_count, read, sz, err = 0;
28630 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
28631 - int err = 0;
28632
28633 read = 0;
28634 if (p < (unsigned long) high_memory) {
28635 @@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
28636 }
28637 #endif
28638 while (low_count > 0) {
28639 + char *temp;
28640 +
28641 sz = size_inside_page(p, low_count);
28642
28643 /*
28644 @@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
28645 */
28646 kbuf = xlate_dev_kmem_ptr((char *)p);
28647
28648 - if (copy_to_user(buf, kbuf, sz))
28649 +#ifdef CONFIG_PAX_USERCOPY
28650 + temp = kmalloc(sz, GFP_KERNEL);
28651 + if (!temp)
28652 + return -ENOMEM;
28653 + memcpy(temp, kbuf, sz);
28654 +#else
28655 + temp = kbuf;
28656 +#endif
28657 +
28658 + err = copy_to_user(buf, temp, sz);
28659 +
28660 +#ifdef CONFIG_PAX_USERCOPY
28661 + kfree(temp);
28662 +#endif
28663 +
28664 + if (err)
28665 return -EFAULT;
28666 buf += sz;
28667 p += sz;
28668 @@ -867,6 +914,9 @@ static const struct memdev {
28669 #ifdef CONFIG_CRASH_DUMP
28670 [12] = { "oldmem", 0, &oldmem_fops, NULL },
28671 #endif
28672 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28673 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
28674 +#endif
28675 };
28676
28677 static int memory_open(struct inode *inode, struct file *filp)
28678 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
28679 index da3cfee..a5a6606 100644
28680 --- a/drivers/char/nvram.c
28681 +++ b/drivers/char/nvram.c
28682 @@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
28683
28684 spin_unlock_irq(&rtc_lock);
28685
28686 - if (copy_to_user(buf, contents, tmp - contents))
28687 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
28688 return -EFAULT;
28689
28690 *ppos = i;
28691 diff --git a/drivers/char/random.c b/drivers/char/random.c
28692 index 6035ab8..bdfe4fd 100644
28693 --- a/drivers/char/random.c
28694 +++ b/drivers/char/random.c
28695 @@ -261,8 +261,13 @@
28696 /*
28697 * Configuration information
28698 */
28699 +#ifdef CONFIG_GRKERNSEC_RANDNET
28700 +#define INPUT_POOL_WORDS 512
28701 +#define OUTPUT_POOL_WORDS 128
28702 +#else
28703 #define INPUT_POOL_WORDS 128
28704 #define OUTPUT_POOL_WORDS 32
28705 +#endif
28706 #define SEC_XFER_SIZE 512
28707 #define EXTRACT_SIZE 10
28708
28709 @@ -300,10 +305,17 @@ static struct poolinfo {
28710 int poolwords;
28711 int tap1, tap2, tap3, tap4, tap5;
28712 } poolinfo_table[] = {
28713 +#ifdef CONFIG_GRKERNSEC_RANDNET
28714 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
28715 + { 512, 411, 308, 208, 104, 1 },
28716 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
28717 + { 128, 103, 76, 51, 25, 1 },
28718 +#else
28719 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
28720 { 128, 103, 76, 51, 25, 1 },
28721 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
28722 { 32, 26, 20, 14, 7, 1 },
28723 +#endif
28724 #if 0
28725 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
28726 { 2048, 1638, 1231, 819, 411, 1 },
28727 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
28728
28729 extract_buf(r, tmp);
28730 i = min_t(int, nbytes, EXTRACT_SIZE);
28731 - if (copy_to_user(buf, tmp, i)) {
28732 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
28733 ret = -EFAULT;
28734 break;
28735 }
28736 @@ -1228,7 +1240,7 @@ EXPORT_SYMBOL(generate_random_uuid);
28737 #include <linux/sysctl.h>
28738
28739 static int min_read_thresh = 8, min_write_thresh;
28740 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
28741 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
28742 static int max_write_thresh = INPUT_POOL_WORDS * 32;
28743 static char sysctl_bootid[16];
28744
28745 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
28746 index 1ee8ce7..b778bef 100644
28747 --- a/drivers/char/sonypi.c
28748 +++ b/drivers/char/sonypi.c
28749 @@ -55,6 +55,7 @@
28750 #include <asm/uaccess.h>
28751 #include <asm/io.h>
28752 #include <asm/system.h>
28753 +#include <asm/local.h>
28754
28755 #include <linux/sonypi.h>
28756
28757 @@ -491,7 +492,7 @@ static struct sonypi_device {
28758 spinlock_t fifo_lock;
28759 wait_queue_head_t fifo_proc_list;
28760 struct fasync_struct *fifo_async;
28761 - int open_count;
28762 + local_t open_count;
28763 int model;
28764 struct input_dev *input_jog_dev;
28765 struct input_dev *input_key_dev;
28766 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
28767 static int sonypi_misc_release(struct inode *inode, struct file *file)
28768 {
28769 mutex_lock(&sonypi_device.lock);
28770 - sonypi_device.open_count--;
28771 + local_dec(&sonypi_device.open_count);
28772 mutex_unlock(&sonypi_device.lock);
28773 return 0;
28774 }
28775 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
28776 {
28777 mutex_lock(&sonypi_device.lock);
28778 /* Flush input queue on first open */
28779 - if (!sonypi_device.open_count)
28780 + if (!local_read(&sonypi_device.open_count))
28781 kfifo_reset(&sonypi_device.fifo);
28782 - sonypi_device.open_count++;
28783 + local_inc(&sonypi_device.open_count);
28784 mutex_unlock(&sonypi_device.lock);
28785
28786 return 0;
28787 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
28788 index 361a1df..2471eee 100644
28789 --- a/drivers/char/tpm/tpm.c
28790 +++ b/drivers/char/tpm/tpm.c
28791 @@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
28792 chip->vendor.req_complete_val)
28793 goto out_recv;
28794
28795 - if ((status == chip->vendor.req_canceled)) {
28796 + if (status == chip->vendor.req_canceled) {
28797 dev_err(chip->dev, "Operation Canceled\n");
28798 rc = -ECANCELED;
28799 goto out;
28800 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
28801 index 0636520..169c1d0 100644
28802 --- a/drivers/char/tpm/tpm_bios.c
28803 +++ b/drivers/char/tpm/tpm_bios.c
28804 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
28805 event = addr;
28806
28807 if ((event->event_type == 0 && event->event_size == 0) ||
28808 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
28809 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
28810 return NULL;
28811
28812 return addr;
28813 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
28814 return NULL;
28815
28816 if ((event->event_type == 0 && event->event_size == 0) ||
28817 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
28818 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
28819 return NULL;
28820
28821 (*pos)++;
28822 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
28823 int i;
28824
28825 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
28826 - seq_putc(m, data[i]);
28827 + if (!seq_putc(m, data[i]))
28828 + return -EFAULT;
28829
28830 return 0;
28831 }
28832 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
28833 log->bios_event_log_end = log->bios_event_log + len;
28834
28835 virt = acpi_os_map_memory(start, len);
28836 + if (!virt) {
28837 + kfree(log->bios_event_log);
28838 + log->bios_event_log = NULL;
28839 + return -EFAULT;
28840 + }
28841
28842 - memcpy(log->bios_event_log, virt, len);
28843 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
28844
28845 acpi_os_unmap_memory(virt, len);
28846 return 0;
28847 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
28848 index 8e3c46d..c139b99 100644
28849 --- a/drivers/char/virtio_console.c
28850 +++ b/drivers/char/virtio_console.c
28851 @@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
28852 if (to_user) {
28853 ssize_t ret;
28854
28855 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
28856 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
28857 if (ret)
28858 return -EFAULT;
28859 } else {
28860 @@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
28861 if (!port_has_data(port) && !port->host_connected)
28862 return 0;
28863
28864 - return fill_readbuf(port, ubuf, count, true);
28865 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
28866 }
28867
28868 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
28869 diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
28870 index eb1d864..39ee5a7 100644
28871 --- a/drivers/dma/dmatest.c
28872 +++ b/drivers/dma/dmatest.c
28873 @@ -591,7 +591,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
28874 }
28875 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
28876 cnt = dmatest_add_threads(dtc, DMA_PQ);
28877 - thread_count += cnt > 0 ?: 0;
28878 + thread_count += cnt > 0 ? cnt : 0;
28879 }
28880
28881 pr_info("dmatest: Started %u threads using %s\n",
28882 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
28883 index c9eee6d..f9d5280 100644
28884 --- a/drivers/edac/amd64_edac.c
28885 +++ b/drivers/edac/amd64_edac.c
28886 @@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
28887 * PCI core identifies what devices are on a system during boot, and then
28888 * inquiry this table to see if this driver is for a given device found.
28889 */
28890 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
28891 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
28892 {
28893 .vendor = PCI_VENDOR_ID_AMD,
28894 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
28895 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
28896 index e47e73b..348e0bd 100644
28897 --- a/drivers/edac/amd76x_edac.c
28898 +++ b/drivers/edac/amd76x_edac.c
28899 @@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
28900 edac_mc_free(mci);
28901 }
28902
28903 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
28904 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
28905 {
28906 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
28907 AMD762},
28908 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
28909 index 1af531a..3a8ff27 100644
28910 --- a/drivers/edac/e752x_edac.c
28911 +++ b/drivers/edac/e752x_edac.c
28912 @@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
28913 edac_mc_free(mci);
28914 }
28915
28916 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
28917 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
28918 {
28919 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
28920 E7520},
28921 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
28922 index 6ffb6d2..383d8d7 100644
28923 --- a/drivers/edac/e7xxx_edac.c
28924 +++ b/drivers/edac/e7xxx_edac.c
28925 @@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
28926 edac_mc_free(mci);
28927 }
28928
28929 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
28930 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
28931 {
28932 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
28933 E7205},
28934 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
28935 index 495198a..ac08c85 100644
28936 --- a/drivers/edac/edac_pci_sysfs.c
28937 +++ b/drivers/edac/edac_pci_sysfs.c
28938 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
28939 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
28940 static int edac_pci_poll_msec = 1000; /* one second workq period */
28941
28942 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
28943 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
28944 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
28945 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
28946
28947 static struct kobject *edac_pci_top_main_kobj;
28948 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
28949 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28950 edac_printk(KERN_CRIT, EDAC_PCI,
28951 "Signaled System Error on %s\n",
28952 pci_name(dev));
28953 - atomic_inc(&pci_nonparity_count);
28954 + atomic_inc_unchecked(&pci_nonparity_count);
28955 }
28956
28957 if (status & (PCI_STATUS_PARITY)) {
28958 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28959 "Master Data Parity Error on %s\n",
28960 pci_name(dev));
28961
28962 - atomic_inc(&pci_parity_count);
28963 + atomic_inc_unchecked(&pci_parity_count);
28964 }
28965
28966 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28967 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28968 "Detected Parity Error on %s\n",
28969 pci_name(dev));
28970
28971 - atomic_inc(&pci_parity_count);
28972 + atomic_inc_unchecked(&pci_parity_count);
28973 }
28974 }
28975
28976 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28977 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
28978 "Signaled System Error on %s\n",
28979 pci_name(dev));
28980 - atomic_inc(&pci_nonparity_count);
28981 + atomic_inc_unchecked(&pci_nonparity_count);
28982 }
28983
28984 if (status & (PCI_STATUS_PARITY)) {
28985 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28986 "Master Data Parity Error on "
28987 "%s\n", pci_name(dev));
28988
28989 - atomic_inc(&pci_parity_count);
28990 + atomic_inc_unchecked(&pci_parity_count);
28991 }
28992
28993 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28994 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
28995 "Detected Parity Error on %s\n",
28996 pci_name(dev));
28997
28998 - atomic_inc(&pci_parity_count);
28999 + atomic_inc_unchecked(&pci_parity_count);
29000 }
29001 }
29002 }
29003 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
29004 if (!check_pci_errors)
29005 return;
29006
29007 - before_count = atomic_read(&pci_parity_count);
29008 + before_count = atomic_read_unchecked(&pci_parity_count);
29009
29010 /* scan all PCI devices looking for a Parity Error on devices and
29011 * bridges.
29012 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
29013 /* Only if operator has selected panic on PCI Error */
29014 if (edac_pci_get_panic_on_pe()) {
29015 /* If the count is different 'after' from 'before' */
29016 - if (before_count != atomic_read(&pci_parity_count))
29017 + if (before_count != atomic_read_unchecked(&pci_parity_count))
29018 panic("EDAC: PCI Parity Error");
29019 }
29020 }
29021 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
29022 index c0510b3..6e2a954 100644
29023 --- a/drivers/edac/i3000_edac.c
29024 +++ b/drivers/edac/i3000_edac.c
29025 @@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
29026 edac_mc_free(mci);
29027 }
29028
29029 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
29030 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
29031 {
29032 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29033 I3000},
29034 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
29035 index aa08497..7e6822a 100644
29036 --- a/drivers/edac/i3200_edac.c
29037 +++ b/drivers/edac/i3200_edac.c
29038 @@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
29039 edac_mc_free(mci);
29040 }
29041
29042 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
29043 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
29044 {
29045 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29046 I3200},
29047 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
29048 index 4dc3ac2..67d05a6 100644
29049 --- a/drivers/edac/i5000_edac.c
29050 +++ b/drivers/edac/i5000_edac.c
29051 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
29052 *
29053 * The "E500P" device is the first device supported.
29054 */
29055 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
29056 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
29057 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
29058 .driver_data = I5000P},
29059
29060 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
29061 index bcbdeec..9886d16 100644
29062 --- a/drivers/edac/i5100_edac.c
29063 +++ b/drivers/edac/i5100_edac.c
29064 @@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
29065 edac_mc_free(mci);
29066 }
29067
29068 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
29069 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
29070 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
29071 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
29072 { 0, }
29073 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
29074 index 74d6ec34..baff517 100644
29075 --- a/drivers/edac/i5400_edac.c
29076 +++ b/drivers/edac/i5400_edac.c
29077 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
29078 *
29079 * The "E500P" device is the first device supported.
29080 */
29081 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
29082 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
29083 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
29084 {0,} /* 0 terminated list. */
29085 };
29086 diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
29087 index 6104dba..e7ea8e1 100644
29088 --- a/drivers/edac/i7300_edac.c
29089 +++ b/drivers/edac/i7300_edac.c
29090 @@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
29091 *
29092 * Has only 8086:360c PCI ID
29093 */
29094 -static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
29095 +static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
29096 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
29097 {0,} /* 0 terminated list. */
29098 };
29099 diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
29100 index 70ad892..178943c 100644
29101 --- a/drivers/edac/i7core_edac.c
29102 +++ b/drivers/edac/i7core_edac.c
29103 @@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
29104 /*
29105 * pci_device_id table for which devices we are looking for
29106 */
29107 -static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
29108 +static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
29109 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
29110 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
29111 {0,} /* 0 terminated list. */
29112 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
29113 index 4329d39..f3022ef 100644
29114 --- a/drivers/edac/i82443bxgx_edac.c
29115 +++ b/drivers/edac/i82443bxgx_edac.c
29116 @@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
29117
29118 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
29119
29120 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
29121 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
29122 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
29123 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
29124 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
29125 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
29126 index 931a057..fd28340 100644
29127 --- a/drivers/edac/i82860_edac.c
29128 +++ b/drivers/edac/i82860_edac.c
29129 @@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
29130 edac_mc_free(mci);
29131 }
29132
29133 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
29134 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
29135 {
29136 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29137 I82860},
29138 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
29139 index 33864c6..01edc61 100644
29140 --- a/drivers/edac/i82875p_edac.c
29141 +++ b/drivers/edac/i82875p_edac.c
29142 @@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
29143 edac_mc_free(mci);
29144 }
29145
29146 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
29147 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
29148 {
29149 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29150 I82875P},
29151 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
29152 index a5da732..983363b 100644
29153 --- a/drivers/edac/i82975x_edac.c
29154 +++ b/drivers/edac/i82975x_edac.c
29155 @@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
29156 edac_mc_free(mci);
29157 }
29158
29159 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
29160 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
29161 {
29162 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29163 I82975X
29164 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
29165 index 0106747..0b40417 100644
29166 --- a/drivers/edac/mce_amd.h
29167 +++ b/drivers/edac/mce_amd.h
29168 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
29169 bool (*dc_mce)(u16, u8);
29170 bool (*ic_mce)(u16, u8);
29171 bool (*nb_mce)(u16, u8);
29172 -};
29173 +} __no_const;
29174
29175 void amd_report_gart_errors(bool);
29176 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
29177 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
29178 index b153674..ad2ba9b 100644
29179 --- a/drivers/edac/r82600_edac.c
29180 +++ b/drivers/edac/r82600_edac.c
29181 @@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
29182 edac_mc_free(mci);
29183 }
29184
29185 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
29186 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
29187 {
29188 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
29189 },
29190 diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
29191 index 7a402bf..af0b211 100644
29192 --- a/drivers/edac/sb_edac.c
29193 +++ b/drivers/edac/sb_edac.c
29194 @@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
29195 /*
29196 * pci_device_id table for which devices we are looking for
29197 */
29198 -static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
29199 +static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
29200 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
29201 {0,} /* 0 terminated list. */
29202 };
29203 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
29204 index b6f47de..c5acf3a 100644
29205 --- a/drivers/edac/x38_edac.c
29206 +++ b/drivers/edac/x38_edac.c
29207 @@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
29208 edac_mc_free(mci);
29209 }
29210
29211 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
29212 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
29213 {
29214 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29215 X38},
29216 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
29217 index 85661b0..c784559a 100644
29218 --- a/drivers/firewire/core-card.c
29219 +++ b/drivers/firewire/core-card.c
29220 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
29221
29222 void fw_core_remove_card(struct fw_card *card)
29223 {
29224 - struct fw_card_driver dummy_driver = dummy_driver_template;
29225 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
29226
29227 card->driver->update_phy_reg(card, 4,
29228 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
29229 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
29230 index 4799393..37bd3ab 100644
29231 --- a/drivers/firewire/core-cdev.c
29232 +++ b/drivers/firewire/core-cdev.c
29233 @@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
29234 int ret;
29235
29236 if ((request->channels == 0 && request->bandwidth == 0) ||
29237 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
29238 - request->bandwidth < 0)
29239 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
29240 return -EINVAL;
29241
29242 r = kmalloc(sizeof(*r), GFP_KERNEL);
29243 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
29244 index 855ab3f..11f4bbd 100644
29245 --- a/drivers/firewire/core-transaction.c
29246 +++ b/drivers/firewire/core-transaction.c
29247 @@ -37,6 +37,7 @@
29248 #include <linux/timer.h>
29249 #include <linux/types.h>
29250 #include <linux/workqueue.h>
29251 +#include <linux/sched.h>
29252
29253 #include <asm/byteorder.h>
29254
29255 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
29256 index b45be57..5fad18b 100644
29257 --- a/drivers/firewire/core.h
29258 +++ b/drivers/firewire/core.h
29259 @@ -101,6 +101,7 @@ struct fw_card_driver {
29260
29261 int (*stop_iso)(struct fw_iso_context *ctx);
29262 };
29263 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
29264
29265 void fw_card_initialize(struct fw_card *card,
29266 const struct fw_card_driver *driver, struct device *device);
29267 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
29268 index 153980b..4b4d046 100644
29269 --- a/drivers/firmware/dmi_scan.c
29270 +++ b/drivers/firmware/dmi_scan.c
29271 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
29272 }
29273 }
29274 else {
29275 - /*
29276 - * no iounmap() for that ioremap(); it would be a no-op, but
29277 - * it's so early in setup that sucker gets confused into doing
29278 - * what it shouldn't if we actually call it.
29279 - */
29280 p = dmi_ioremap(0xF0000, 0x10000);
29281 if (p == NULL)
29282 goto error;
29283 @@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
29284 if (buf == NULL)
29285 return -1;
29286
29287 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
29288 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
29289
29290 iounmap(buf);
29291 return 0;
29292 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
29293 index 98723cb..10ca85b 100644
29294 --- a/drivers/gpio/gpio-vr41xx.c
29295 +++ b/drivers/gpio/gpio-vr41xx.c
29296 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
29297 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
29298 maskl, pendl, maskh, pendh);
29299
29300 - atomic_inc(&irq_err_count);
29301 + atomic_inc_unchecked(&irq_err_count);
29302
29303 return -EINVAL;
29304 }
29305 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
29306 index 8323fc3..5c1d755 100644
29307 --- a/drivers/gpu/drm/drm_crtc.c
29308 +++ b/drivers/gpu/drm/drm_crtc.c
29309 @@ -1379,7 +1379,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
29310 */
29311 if ((out_resp->count_modes >= mode_count) && mode_count) {
29312 copied = 0;
29313 - mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
29314 + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
29315 list_for_each_entry(mode, &connector->modes, head) {
29316 drm_crtc_convert_to_umode(&u_mode, mode);
29317 if (copy_to_user(mode_ptr + copied,
29318 @@ -1394,8 +1394,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
29319
29320 if ((out_resp->count_props >= props_count) && props_count) {
29321 copied = 0;
29322 - prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
29323 - prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
29324 + prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
29325 + prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
29326 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
29327 if (connector->property_ids[i] != 0) {
29328 if (put_user(connector->property_ids[i],
29329 @@ -1417,7 +1417,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
29330
29331 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
29332 copied = 0;
29333 - encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
29334 + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
29335 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
29336 if (connector->encoder_ids[i] != 0) {
29337 if (put_user(connector->encoder_ids[i],
29338 @@ -1576,7 +1576,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
29339 }
29340
29341 for (i = 0; i < crtc_req->count_connectors; i++) {
29342 - set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
29343 + set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
29344 if (get_user(out_id, &set_connectors_ptr[i])) {
29345 ret = -EFAULT;
29346 goto out;
29347 @@ -1857,7 +1857,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
29348 fb = obj_to_fb(obj);
29349
29350 num_clips = r->num_clips;
29351 - clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
29352 + clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
29353
29354 if (!num_clips != !clips_ptr) {
29355 ret = -EINVAL;
29356 @@ -2283,7 +2283,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
29357 out_resp->flags = property->flags;
29358
29359 if ((out_resp->count_values >= value_count) && value_count) {
29360 - values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
29361 + values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
29362 for (i = 0; i < value_count; i++) {
29363 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
29364 ret = -EFAULT;
29365 @@ -2296,7 +2296,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
29366 if (property->flags & DRM_MODE_PROP_ENUM) {
29367 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
29368 copied = 0;
29369 - enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
29370 + enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
29371 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
29372
29373 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
29374 @@ -2319,7 +2319,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
29375 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
29376 copied = 0;
29377 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
29378 - blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
29379 + blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
29380
29381 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
29382 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
29383 @@ -2380,7 +2380,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
29384 struct drm_mode_get_blob *out_resp = data;
29385 struct drm_property_blob *blob;
29386 int ret = 0;
29387 - void *blob_ptr;
29388 + void __user *blob_ptr;
29389
29390 if (!drm_core_check_feature(dev, DRIVER_MODESET))
29391 return -EINVAL;
29392 @@ -2394,7 +2394,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
29393 blob = obj_to_blob(obj);
29394
29395 if (out_resp->length == blob->length) {
29396 - blob_ptr = (void *)(unsigned long)out_resp->data;
29397 + blob_ptr = (void __user *)(unsigned long)out_resp->data;
29398 if (copy_to_user(blob_ptr, blob->data, blob->length)){
29399 ret = -EFAULT;
29400 goto done;
29401 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
29402 index d2619d7..bd6bd00 100644
29403 --- a/drivers/gpu/drm/drm_crtc_helper.c
29404 +++ b/drivers/gpu/drm/drm_crtc_helper.c
29405 @@ -279,7 +279,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
29406 struct drm_crtc *tmp;
29407 int crtc_mask = 1;
29408
29409 - WARN(!crtc, "checking null crtc?\n");
29410 + BUG_ON(!crtc);
29411
29412 dev = crtc->dev;
29413
29414 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
29415 index 40c187c..5746164 100644
29416 --- a/drivers/gpu/drm/drm_drv.c
29417 +++ b/drivers/gpu/drm/drm_drv.c
29418 @@ -308,7 +308,7 @@ module_exit(drm_core_exit);
29419 /**
29420 * Copy and IOCTL return string to user space
29421 */
29422 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
29423 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
29424 {
29425 int len;
29426
29427 @@ -387,7 +387,7 @@ long drm_ioctl(struct file *filp,
29428
29429 dev = file_priv->minor->dev;
29430 atomic_inc(&dev->ioctl_count);
29431 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
29432 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
29433 ++file_priv->ioctl_count;
29434
29435 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
29436 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
29437 index 828bf65..cdaa0e9 100644
29438 --- a/drivers/gpu/drm/drm_fops.c
29439 +++ b/drivers/gpu/drm/drm_fops.c
29440 @@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
29441 }
29442
29443 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
29444 - atomic_set(&dev->counts[i], 0);
29445 + atomic_set_unchecked(&dev->counts[i], 0);
29446
29447 dev->sigdata.lock = NULL;
29448
29449 @@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp)
29450
29451 retcode = drm_open_helper(inode, filp, dev);
29452 if (!retcode) {
29453 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
29454 - if (!dev->open_count++)
29455 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
29456 + if (local_inc_return(&dev->open_count) == 1)
29457 retcode = drm_setup(dev);
29458 }
29459 if (!retcode) {
29460 @@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp)
29461
29462 mutex_lock(&drm_global_mutex);
29463
29464 - DRM_DEBUG("open_count = %d\n", dev->open_count);
29465 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
29466
29467 if (dev->driver->preclose)
29468 dev->driver->preclose(dev, file_priv);
29469 @@ -485,7 +485,7 @@ int drm_release(struct inode *inode, struct file *filp)
29470 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
29471 task_pid_nr(current),
29472 (long)old_encode_dev(file_priv->minor->device),
29473 - dev->open_count);
29474 + local_read(&dev->open_count));
29475
29476 /* Release any auth tokens that might point to this file_priv,
29477 (do that under the drm_global_mutex) */
29478 @@ -571,8 +571,8 @@ int drm_release(struct inode *inode, struct file *filp)
29479 * End inline drm_release
29480 */
29481
29482 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
29483 - if (!--dev->open_count) {
29484 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
29485 + if (local_dec_and_test(&dev->open_count)) {
29486 if (atomic_read(&dev->ioctl_count)) {
29487 DRM_ERROR("Device busy: %d\n",
29488 atomic_read(&dev->ioctl_count));
29489 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
29490 index c87dc96..326055d 100644
29491 --- a/drivers/gpu/drm/drm_global.c
29492 +++ b/drivers/gpu/drm/drm_global.c
29493 @@ -36,7 +36,7 @@
29494 struct drm_global_item {
29495 struct mutex mutex;
29496 void *object;
29497 - int refcount;
29498 + atomic_t refcount;
29499 };
29500
29501 static struct drm_global_item glob[DRM_GLOBAL_NUM];
29502 @@ -49,7 +49,7 @@ void drm_global_init(void)
29503 struct drm_global_item *item = &glob[i];
29504 mutex_init(&item->mutex);
29505 item->object = NULL;
29506 - item->refcount = 0;
29507 + atomic_set(&item->refcount, 0);
29508 }
29509 }
29510
29511 @@ -59,7 +59,7 @@ void drm_global_release(void)
29512 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
29513 struct drm_global_item *item = &glob[i];
29514 BUG_ON(item->object != NULL);
29515 - BUG_ON(item->refcount != 0);
29516 + BUG_ON(atomic_read(&item->refcount) != 0);
29517 }
29518 }
29519
29520 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
29521 void *object;
29522
29523 mutex_lock(&item->mutex);
29524 - if (item->refcount == 0) {
29525 + if (atomic_read(&item->refcount) == 0) {
29526 item->object = kzalloc(ref->size, GFP_KERNEL);
29527 if (unlikely(item->object == NULL)) {
29528 ret = -ENOMEM;
29529 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
29530 goto out_err;
29531
29532 }
29533 - ++item->refcount;
29534 + atomic_inc(&item->refcount);
29535 ref->object = item->object;
29536 object = item->object;
29537 mutex_unlock(&item->mutex);
29538 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
29539 struct drm_global_item *item = &glob[ref->global_type];
29540
29541 mutex_lock(&item->mutex);
29542 - BUG_ON(item->refcount == 0);
29543 + BUG_ON(atomic_read(&item->refcount) == 0);
29544 BUG_ON(ref->object != item->object);
29545 - if (--item->refcount == 0) {
29546 + if (atomic_dec_and_test(&item->refcount)) {
29547 ref->release(ref);
29548 item->object = NULL;
29549 }
29550 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
29551 index ab1162d..42587b2 100644
29552 --- a/drivers/gpu/drm/drm_info.c
29553 +++ b/drivers/gpu/drm/drm_info.c
29554 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
29555 struct drm_local_map *map;
29556 struct drm_map_list *r_list;
29557
29558 - /* Hardcoded from _DRM_FRAME_BUFFER,
29559 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
29560 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
29561 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
29562 + static const char * const types[] = {
29563 + [_DRM_FRAME_BUFFER] = "FB",
29564 + [_DRM_REGISTERS] = "REG",
29565 + [_DRM_SHM] = "SHM",
29566 + [_DRM_AGP] = "AGP",
29567 + [_DRM_SCATTER_GATHER] = "SG",
29568 + [_DRM_CONSISTENT] = "PCI",
29569 + [_DRM_GEM] = "GEM" };
29570 const char *type;
29571 int i;
29572
29573 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
29574 map = r_list->map;
29575 if (!map)
29576 continue;
29577 - if (map->type < 0 || map->type > 5)
29578 + if (map->type >= ARRAY_SIZE(types))
29579 type = "??";
29580 else
29581 type = types[map->type];
29582 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
29583 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
29584 vma->vm_flags & VM_LOCKED ? 'l' : '-',
29585 vma->vm_flags & VM_IO ? 'i' : '-',
29586 +#ifdef CONFIG_GRKERNSEC_HIDESYM
29587 + 0);
29588 +#else
29589 vma->vm_pgoff);
29590 +#endif
29591
29592 #if defined(__i386__)
29593 pgprot = pgprot_val(vma->vm_page_prot);
29594 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
29595 index ddd70db..40321e6 100644
29596 --- a/drivers/gpu/drm/drm_ioc32.c
29597 +++ b/drivers/gpu/drm/drm_ioc32.c
29598 @@ -456,7 +456,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
29599 request = compat_alloc_user_space(nbytes);
29600 if (!access_ok(VERIFY_WRITE, request, nbytes))
29601 return -EFAULT;
29602 - list = (struct drm_buf_desc *) (request + 1);
29603 + list = (struct drm_buf_desc __user *) (request + 1);
29604
29605 if (__put_user(count, &request->count)
29606 || __put_user(list, &request->list))
29607 @@ -517,7 +517,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
29608 request = compat_alloc_user_space(nbytes);
29609 if (!access_ok(VERIFY_WRITE, request, nbytes))
29610 return -EFAULT;
29611 - list = (struct drm_buf_pub *) (request + 1);
29612 + list = (struct drm_buf_pub __user *) (request + 1);
29613
29614 if (__put_user(count, &request->count)
29615 || __put_user(list, &request->list))
29616 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
29617 index 904d7e9..ab88581 100644
29618 --- a/drivers/gpu/drm/drm_ioctl.c
29619 +++ b/drivers/gpu/drm/drm_ioctl.c
29620 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, void *data,
29621 stats->data[i].value =
29622 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
29623 else
29624 - stats->data[i].value = atomic_read(&dev->counts[i]);
29625 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
29626 stats->data[i].type = dev->types[i];
29627 }
29628
29629 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
29630 index 632ae24..244cf4a 100644
29631 --- a/drivers/gpu/drm/drm_lock.c
29632 +++ b/drivers/gpu/drm/drm_lock.c
29633 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
29634 if (drm_lock_take(&master->lock, lock->context)) {
29635 master->lock.file_priv = file_priv;
29636 master->lock.lock_time = jiffies;
29637 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
29638 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
29639 break; /* Got lock */
29640 }
29641
29642 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
29643 return -EINVAL;
29644 }
29645
29646 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
29647 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
29648
29649 if (drm_lock_free(&master->lock, lock->context)) {
29650 /* FIXME: Should really bail out here. */
29651 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
29652 index 8f371e8..9f85d52 100644
29653 --- a/drivers/gpu/drm/i810/i810_dma.c
29654 +++ b/drivers/gpu/drm/i810/i810_dma.c
29655 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
29656 dma->buflist[vertex->idx],
29657 vertex->discard, vertex->used);
29658
29659 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29660 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29661 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29662 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29663 sarea_priv->last_enqueue = dev_priv->counter - 1;
29664 sarea_priv->last_dispatch = (int)hw_status[5];
29665
29666 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
29667 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
29668 mc->last_render);
29669
29670 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29671 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29672 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29673 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29674 sarea_priv->last_enqueue = dev_priv->counter - 1;
29675 sarea_priv->last_dispatch = (int)hw_status[5];
29676
29677 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
29678 index c9339f4..f5e1b9d 100644
29679 --- a/drivers/gpu/drm/i810/i810_drv.h
29680 +++ b/drivers/gpu/drm/i810/i810_drv.h
29681 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
29682 int page_flipping;
29683
29684 wait_queue_head_t irq_queue;
29685 - atomic_t irq_received;
29686 - atomic_t irq_emitted;
29687 + atomic_unchecked_t irq_received;
29688 + atomic_unchecked_t irq_emitted;
29689
29690 int front_offset;
29691 } drm_i810_private_t;
29692 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
29693 index b2e3c97..58cf079 100644
29694 --- a/drivers/gpu/drm/i915/i915_debugfs.c
29695 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
29696 @@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
29697 I915_READ(GTIMR));
29698 }
29699 seq_printf(m, "Interrupts received: %d\n",
29700 - atomic_read(&dev_priv->irq_received));
29701 + atomic_read_unchecked(&dev_priv->irq_received));
29702 for (i = 0; i < I915_NUM_RINGS; i++) {
29703 if (IS_GEN6(dev) || IS_GEN7(dev)) {
29704 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
29705 @@ -1232,7 +1232,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
29706 return ret;
29707
29708 if (opregion->header)
29709 - seq_write(m, opregion->header, OPREGION_SIZE);
29710 + seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
29711
29712 mutex_unlock(&dev->struct_mutex);
29713
29714 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
29715 index c4da951..3c59c5c 100644
29716 --- a/drivers/gpu/drm/i915/i915_dma.c
29717 +++ b/drivers/gpu/drm/i915/i915_dma.c
29718 @@ -1172,7 +1172,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
29719 bool can_switch;
29720
29721 spin_lock(&dev->count_lock);
29722 - can_switch = (dev->open_count == 0);
29723 + can_switch = (local_read(&dev->open_count) == 0);
29724 spin_unlock(&dev->count_lock);
29725 return can_switch;
29726 }
29727 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
29728 index ae294a0..1755461 100644
29729 --- a/drivers/gpu/drm/i915/i915_drv.h
29730 +++ b/drivers/gpu/drm/i915/i915_drv.h
29731 @@ -229,7 +229,7 @@ struct drm_i915_display_funcs {
29732 /* render clock increase/decrease */
29733 /* display clock increase/decrease */
29734 /* pll clock increase/decrease */
29735 -};
29736 +} __no_const;
29737
29738 struct intel_device_info {
29739 u8 gen;
29740 @@ -318,7 +318,7 @@ typedef struct drm_i915_private {
29741 int current_page;
29742 int page_flipping;
29743
29744 - atomic_t irq_received;
29745 + atomic_unchecked_t irq_received;
29746
29747 /* protects the irq masks */
29748 spinlock_t irq_lock;
29749 @@ -893,7 +893,7 @@ struct drm_i915_gem_object {
29750 * will be page flipped away on the next vblank. When it
29751 * reaches 0, dev_priv->pending_flip_queue will be woken up.
29752 */
29753 - atomic_t pending_flip;
29754 + atomic_unchecked_t pending_flip;
29755 };
29756
29757 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
29758 @@ -1273,7 +1273,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
29759 extern void intel_teardown_gmbus(struct drm_device *dev);
29760 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
29761 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
29762 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
29763 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
29764 {
29765 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
29766 }
29767 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
29768 index b9da890..cad1d98 100644
29769 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
29770 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
29771 @@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
29772 i915_gem_clflush_object(obj);
29773
29774 if (obj->base.pending_write_domain)
29775 - cd->flips |= atomic_read(&obj->pending_flip);
29776 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
29777
29778 /* The actual obj->write_domain will be updated with
29779 * pending_write_domain after we emit the accumulated flush for all
29780 @@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
29781
29782 static int
29783 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
29784 - int count)
29785 + unsigned int count)
29786 {
29787 - int i;
29788 + unsigned int i;
29789
29790 for (i = 0; i < count; i++) {
29791 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
29792 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
29793 index d47a53b..61154c2 100644
29794 --- a/drivers/gpu/drm/i915/i915_irq.c
29795 +++ b/drivers/gpu/drm/i915/i915_irq.c
29796 @@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
29797 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
29798 struct drm_i915_master_private *master_priv;
29799
29800 - atomic_inc(&dev_priv->irq_received);
29801 + atomic_inc_unchecked(&dev_priv->irq_received);
29802
29803 /* disable master interrupt before clearing iir */
29804 de_ier = I915_READ(DEIER);
29805 @@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
29806 struct drm_i915_master_private *master_priv;
29807 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
29808
29809 - atomic_inc(&dev_priv->irq_received);
29810 + atomic_inc_unchecked(&dev_priv->irq_received);
29811
29812 if (IS_GEN6(dev))
29813 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
29814 @@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
29815 int ret = IRQ_NONE, pipe;
29816 bool blc_event = false;
29817
29818 - atomic_inc(&dev_priv->irq_received);
29819 + atomic_inc_unchecked(&dev_priv->irq_received);
29820
29821 iir = I915_READ(IIR);
29822
29823 @@ -1750,7 +1750,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
29824 {
29825 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
29826
29827 - atomic_set(&dev_priv->irq_received, 0);
29828 + atomic_set_unchecked(&dev_priv->irq_received, 0);
29829
29830 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
29831 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
29832 @@ -1938,7 +1938,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
29833 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
29834 int pipe;
29835
29836 - atomic_set(&dev_priv->irq_received, 0);
29837 + atomic_set_unchecked(&dev_priv->irq_received, 0);
29838
29839 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
29840 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
29841 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
29842 index 9ec9755..6d1cf2d 100644
29843 --- a/drivers/gpu/drm/i915/intel_display.c
29844 +++ b/drivers/gpu/drm/i915/intel_display.c
29845 @@ -2230,7 +2230,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
29846
29847 wait_event(dev_priv->pending_flip_queue,
29848 atomic_read(&dev_priv->mm.wedged) ||
29849 - atomic_read(&obj->pending_flip) == 0);
29850 + atomic_read_unchecked(&obj->pending_flip) == 0);
29851
29852 /* Big Hammer, we also need to ensure that any pending
29853 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
29854 @@ -2851,7 +2851,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
29855 obj = to_intel_framebuffer(crtc->fb)->obj;
29856 dev_priv = crtc->dev->dev_private;
29857 wait_event(dev_priv->pending_flip_queue,
29858 - atomic_read(&obj->pending_flip) == 0);
29859 + atomic_read_unchecked(&obj->pending_flip) == 0);
29860 }
29861
29862 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
29863 @@ -6952,7 +6952,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
29864
29865 atomic_clear_mask(1 << intel_crtc->plane,
29866 &obj->pending_flip.counter);
29867 - if (atomic_read(&obj->pending_flip) == 0)
29868 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
29869 wake_up(&dev_priv->pending_flip_queue);
29870
29871 schedule_work(&work->work);
29872 @@ -7242,7 +7242,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
29873 /* Block clients from rendering to the new back buffer until
29874 * the flip occurs and the object is no longer visible.
29875 */
29876 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
29877 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
29878
29879 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
29880 if (ret)
29881 @@ -7256,7 +7256,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
29882 return 0;
29883
29884 cleanup_pending:
29885 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
29886 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
29887 drm_gem_object_unreference(&work->old_fb_obj->base);
29888 drm_gem_object_unreference(&obj->base);
29889 mutex_unlock(&dev->struct_mutex);
29890 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
29891 index 54558a0..2d97005 100644
29892 --- a/drivers/gpu/drm/mga/mga_drv.h
29893 +++ b/drivers/gpu/drm/mga/mga_drv.h
29894 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
29895 u32 clear_cmd;
29896 u32 maccess;
29897
29898 - atomic_t vbl_received; /**< Number of vblanks received. */
29899 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
29900 wait_queue_head_t fence_queue;
29901 - atomic_t last_fence_retired;
29902 + atomic_unchecked_t last_fence_retired;
29903 u32 next_fence_to_post;
29904
29905 unsigned int fb_cpp;
29906 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
29907 index 2581202..f230a8d9 100644
29908 --- a/drivers/gpu/drm/mga/mga_irq.c
29909 +++ b/drivers/gpu/drm/mga/mga_irq.c
29910 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
29911 if (crtc != 0)
29912 return 0;
29913
29914 - return atomic_read(&dev_priv->vbl_received);
29915 + return atomic_read_unchecked(&dev_priv->vbl_received);
29916 }
29917
29918
29919 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
29920 /* VBLANK interrupt */
29921 if (status & MGA_VLINEPEN) {
29922 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
29923 - atomic_inc(&dev_priv->vbl_received);
29924 + atomic_inc_unchecked(&dev_priv->vbl_received);
29925 drm_handle_vblank(dev, 0);
29926 handled = 1;
29927 }
29928 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
29929 if ((prim_start & ~0x03) != (prim_end & ~0x03))
29930 MGA_WRITE(MGA_PRIMEND, prim_end);
29931
29932 - atomic_inc(&dev_priv->last_fence_retired);
29933 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
29934 DRM_WAKEUP(&dev_priv->fence_queue);
29935 handled = 1;
29936 }
29937 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
29938 * using fences.
29939 */
29940 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
29941 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
29942 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
29943 - *sequence) <= (1 << 23)));
29944
29945 *sequence = cur_fence;
29946 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
29947 index 5fc201b..7b032b9 100644
29948 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
29949 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
29950 @@ -201,7 +201,7 @@ struct methods {
29951 const char desc[8];
29952 void (*loadbios)(struct drm_device *, uint8_t *);
29953 const bool rw;
29954 -};
29955 +} __do_const;
29956
29957 static struct methods shadow_methods[] = {
29958 { "PRAMIN", load_vbios_pramin, true },
29959 @@ -5474,7 +5474,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
29960 struct bit_table {
29961 const char id;
29962 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
29963 -};
29964 +} __no_const;
29965
29966 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
29967
29968 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
29969 index 4c0be3a..5757582 100644
29970 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
29971 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
29972 @@ -238,7 +238,7 @@ struct nouveau_channel {
29973 struct list_head pending;
29974 uint32_t sequence;
29975 uint32_t sequence_ack;
29976 - atomic_t last_sequence_irq;
29977 + atomic_unchecked_t last_sequence_irq;
29978 struct nouveau_vma vma;
29979 } fence;
29980
29981 @@ -319,7 +319,7 @@ struct nouveau_exec_engine {
29982 u32 handle, u16 class);
29983 void (*set_tile_region)(struct drm_device *dev, int i);
29984 void (*tlb_flush)(struct drm_device *, int engine);
29985 -};
29986 +} __no_const;
29987
29988 struct nouveau_instmem_engine {
29989 void *priv;
29990 @@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
29991 struct nouveau_mc_engine {
29992 int (*init)(struct drm_device *dev);
29993 void (*takedown)(struct drm_device *dev);
29994 -};
29995 +} __no_const;
29996
29997 struct nouveau_timer_engine {
29998 int (*init)(struct drm_device *dev);
29999 void (*takedown)(struct drm_device *dev);
30000 uint64_t (*read)(struct drm_device *dev);
30001 -};
30002 +} __no_const;
30003
30004 struct nouveau_fb_engine {
30005 int num_tiles;
30006 @@ -558,7 +558,7 @@ struct nouveau_vram_engine {
30007 void (*put)(struct drm_device *, struct nouveau_mem **);
30008
30009 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
30010 -};
30011 +} __no_const;
30012
30013 struct nouveau_engine {
30014 struct nouveau_instmem_engine instmem;
30015 @@ -706,7 +706,7 @@ struct drm_nouveau_private {
30016 struct drm_global_reference mem_global_ref;
30017 struct ttm_bo_global_ref bo_global_ref;
30018 struct ttm_bo_device bdev;
30019 - atomic_t validate_sequence;
30020 + atomic_unchecked_t validate_sequence;
30021 } ttm;
30022
30023 struct {
30024 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
30025 index 2f6daae..c9d7b9e 100644
30026 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
30027 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
30028 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
30029 if (USE_REFCNT(dev))
30030 sequence = nvchan_rd32(chan, 0x48);
30031 else
30032 - sequence = atomic_read(&chan->fence.last_sequence_irq);
30033 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
30034
30035 if (chan->fence.sequence_ack == sequence)
30036 goto out;
30037 @@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
30038 return ret;
30039 }
30040
30041 - atomic_set(&chan->fence.last_sequence_irq, 0);
30042 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
30043 return 0;
30044 }
30045
30046 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
30047 index 7ce3fde..cb3ea04 100644
30048 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
30049 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
30050 @@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
30051 int trycnt = 0;
30052 int ret, i;
30053
30054 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
30055 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
30056 retry:
30057 if (++trycnt > 100000) {
30058 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
30059 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
30060 index d8831ab..0ba8356 100644
30061 --- a/drivers/gpu/drm/nouveau/nouveau_state.c
30062 +++ b/drivers/gpu/drm/nouveau/nouveau_state.c
30063 @@ -542,7 +542,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
30064 bool can_switch;
30065
30066 spin_lock(&dev->count_lock);
30067 - can_switch = (dev->open_count == 0);
30068 + can_switch = (local_read(&dev->open_count) == 0);
30069 spin_unlock(&dev->count_lock);
30070 return can_switch;
30071 }
30072 diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
30073 index dbdea8e..cd6eeeb 100644
30074 --- a/drivers/gpu/drm/nouveau/nv04_graph.c
30075 +++ b/drivers/gpu/drm/nouveau/nv04_graph.c
30076 @@ -554,7 +554,7 @@ static int
30077 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
30078 u32 class, u32 mthd, u32 data)
30079 {
30080 - atomic_set(&chan->fence.last_sequence_irq, data);
30081 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
30082 return 0;
30083 }
30084
30085 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
30086 index bcac90b..53bfc76 100644
30087 --- a/drivers/gpu/drm/r128/r128_cce.c
30088 +++ b/drivers/gpu/drm/r128/r128_cce.c
30089 @@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
30090
30091 /* GH: Simple idle check.
30092 */
30093 - atomic_set(&dev_priv->idle_count, 0);
30094 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30095
30096 /* We don't support anything other than bus-mastering ring mode,
30097 * but the ring can be in either AGP or PCI space for the ring
30098 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
30099 index 930c71b..499aded 100644
30100 --- a/drivers/gpu/drm/r128/r128_drv.h
30101 +++ b/drivers/gpu/drm/r128/r128_drv.h
30102 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
30103 int is_pci;
30104 unsigned long cce_buffers_offset;
30105
30106 - atomic_t idle_count;
30107 + atomic_unchecked_t idle_count;
30108
30109 int page_flipping;
30110 int current_page;
30111 u32 crtc_offset;
30112 u32 crtc_offset_cntl;
30113
30114 - atomic_t vbl_received;
30115 + atomic_unchecked_t vbl_received;
30116
30117 u32 color_fmt;
30118 unsigned int front_offset;
30119 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
30120 index 429d5a0..7e899ed 100644
30121 --- a/drivers/gpu/drm/r128/r128_irq.c
30122 +++ b/drivers/gpu/drm/r128/r128_irq.c
30123 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
30124 if (crtc != 0)
30125 return 0;
30126
30127 - return atomic_read(&dev_priv->vbl_received);
30128 + return atomic_read_unchecked(&dev_priv->vbl_received);
30129 }
30130
30131 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30132 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30133 /* VBLANK interrupt */
30134 if (status & R128_CRTC_VBLANK_INT) {
30135 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
30136 - atomic_inc(&dev_priv->vbl_received);
30137 + atomic_inc_unchecked(&dev_priv->vbl_received);
30138 drm_handle_vblank(dev, 0);
30139 return IRQ_HANDLED;
30140 }
30141 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
30142 index a9e33ce..09edd4b 100644
30143 --- a/drivers/gpu/drm/r128/r128_state.c
30144 +++ b/drivers/gpu/drm/r128/r128_state.c
30145 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
30146
30147 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
30148 {
30149 - if (atomic_read(&dev_priv->idle_count) == 0)
30150 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
30151 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
30152 else
30153 - atomic_set(&dev_priv->idle_count, 0);
30154 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30155 }
30156
30157 #endif
30158 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
30159 index 5a82b6b..9e69c73 100644
30160 --- a/drivers/gpu/drm/radeon/mkregtable.c
30161 +++ b/drivers/gpu/drm/radeon/mkregtable.c
30162 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
30163 regex_t mask_rex;
30164 regmatch_t match[4];
30165 char buf[1024];
30166 - size_t end;
30167 + long end;
30168 int len;
30169 int done = 0;
30170 int r;
30171 unsigned o;
30172 struct offset *offset;
30173 char last_reg_s[10];
30174 - int last_reg;
30175 + unsigned long last_reg;
30176
30177 if (regcomp
30178 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
30179 diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
30180 index cb1acff..8861bc5 100644
30181 --- a/drivers/gpu/drm/radeon/r600_cs.c
30182 +++ b/drivers/gpu/drm/radeon/r600_cs.c
30183 @@ -1304,6 +1304,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
30184 h0 = G_038004_TEX_HEIGHT(word1) + 1;
30185 d0 = G_038004_TEX_DEPTH(word1);
30186 nfaces = 1;
30187 + array = 0;
30188 switch (G_038000_DIM(word0)) {
30189 case V_038000_SQ_TEX_DIM_1D:
30190 case V_038000_SQ_TEX_DIM_2D:
30191 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
30192 index 8227e76..ce0b195 100644
30193 --- a/drivers/gpu/drm/radeon/radeon.h
30194 +++ b/drivers/gpu/drm/radeon/radeon.h
30195 @@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_device *rdev);
30196 */
30197 struct radeon_fence_driver {
30198 uint32_t scratch_reg;
30199 - atomic_t seq;
30200 + atomic_unchecked_t seq;
30201 uint32_t last_seq;
30202 unsigned long last_jiffies;
30203 unsigned long last_timeout;
30204 @@ -530,7 +530,7 @@ struct r600_blit_cp_primitives {
30205 int x2, int y2);
30206 void (*draw_auto)(struct radeon_device *rdev);
30207 void (*set_default_state)(struct radeon_device *rdev);
30208 -};
30209 +} __no_const;
30210
30211 struct r600_blit {
30212 struct mutex mutex;
30213 @@ -954,7 +954,7 @@ struct radeon_asic {
30214 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
30215 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
30216 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
30217 -};
30218 +} __no_const;
30219
30220 /*
30221 * Asic structures
30222 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
30223 index 9231564..78b00fd 100644
30224 --- a/drivers/gpu/drm/radeon/radeon_device.c
30225 +++ b/drivers/gpu/drm/radeon/radeon_device.c
30226 @@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
30227 bool can_switch;
30228
30229 spin_lock(&dev->count_lock);
30230 - can_switch = (dev->open_count == 0);
30231 + can_switch = (local_read(&dev->open_count) == 0);
30232 spin_unlock(&dev->count_lock);
30233 return can_switch;
30234 }
30235 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
30236 index a1b59ca..86f2d44 100644
30237 --- a/drivers/gpu/drm/radeon/radeon_drv.h
30238 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
30239 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
30240
30241 /* SW interrupt */
30242 wait_queue_head_t swi_queue;
30243 - atomic_t swi_emitted;
30244 + atomic_unchecked_t swi_emitted;
30245 int vblank_crtc;
30246 uint32_t irq_enable_reg;
30247 uint32_t r500_disp_irq_reg;
30248 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
30249 index 76ec0e9..6feb1a3 100644
30250 --- a/drivers/gpu/drm/radeon/radeon_fence.c
30251 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
30252 @@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
30253 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
30254 return 0;
30255 }
30256 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
30257 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
30258 if (!rdev->cp.ready)
30259 /* FIXME: cp is not running assume everythings is done right
30260 * away
30261 @@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
30262 return r;
30263 }
30264 radeon_fence_write(rdev, 0);
30265 - atomic_set(&rdev->fence_drv.seq, 0);
30266 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
30267 INIT_LIST_HEAD(&rdev->fence_drv.created);
30268 INIT_LIST_HEAD(&rdev->fence_drv.emited);
30269 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
30270 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
30271 index 48b7cea..342236f 100644
30272 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
30273 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
30274 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
30275 request = compat_alloc_user_space(sizeof(*request));
30276 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
30277 || __put_user(req32.param, &request->param)
30278 - || __put_user((void __user *)(unsigned long)req32.value,
30279 + || __put_user((unsigned long)req32.value,
30280 &request->value))
30281 return -EFAULT;
30282
30283 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
30284 index 00da384..32f972d 100644
30285 --- a/drivers/gpu/drm/radeon/radeon_irq.c
30286 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
30287 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
30288 unsigned int ret;
30289 RING_LOCALS;
30290
30291 - atomic_inc(&dev_priv->swi_emitted);
30292 - ret = atomic_read(&dev_priv->swi_emitted);
30293 + atomic_inc_unchecked(&dev_priv->swi_emitted);
30294 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
30295
30296 BEGIN_RING(4);
30297 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
30298 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
30299 drm_radeon_private_t *dev_priv =
30300 (drm_radeon_private_t *) dev->dev_private;
30301
30302 - atomic_set(&dev_priv->swi_emitted, 0);
30303 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
30304 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
30305
30306 dev->max_vblank_count = 0x001fffff;
30307 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
30308 index e8422ae..d22d4a8 100644
30309 --- a/drivers/gpu/drm/radeon/radeon_state.c
30310 +++ b/drivers/gpu/drm/radeon/radeon_state.c
30311 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
30312 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
30313 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
30314
30315 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
30316 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
30317 sarea_priv->nbox * sizeof(depth_boxes[0])))
30318 return -EFAULT;
30319
30320 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
30321 {
30322 drm_radeon_private_t *dev_priv = dev->dev_private;
30323 drm_radeon_getparam_t *param = data;
30324 - int value;
30325 + int value = 0;
30326
30327 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
30328
30329 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
30330 index 0b5468b..9c4b308 100644
30331 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
30332 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
30333 @@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
30334 }
30335 if (unlikely(ttm_vm_ops == NULL)) {
30336 ttm_vm_ops = vma->vm_ops;
30337 - radeon_ttm_vm_ops = *ttm_vm_ops;
30338 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30339 + pax_open_kernel();
30340 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
30341 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30342 + pax_close_kernel();
30343 }
30344 vma->vm_ops = &radeon_ttm_vm_ops;
30345 return 0;
30346 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
30347 index a9049ed..501f284 100644
30348 --- a/drivers/gpu/drm/radeon/rs690.c
30349 +++ b/drivers/gpu/drm/radeon/rs690.c
30350 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
30351 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
30352 rdev->pm.sideport_bandwidth.full)
30353 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
30354 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
30355 + read_delay_latency.full = dfixed_const(800 * 1000);
30356 read_delay_latency.full = dfixed_div(read_delay_latency,
30357 rdev->pm.igp_sideport_mclk);
30358 + a.full = dfixed_const(370);
30359 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
30360 } else {
30361 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
30362 rdev->pm.k8_bandwidth.full)
30363 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
30364 index 727e93d..1565650 100644
30365 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
30366 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
30367 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
30368 static int ttm_pool_mm_shrink(struct shrinker *shrink,
30369 struct shrink_control *sc)
30370 {
30371 - static atomic_t start_pool = ATOMIC_INIT(0);
30372 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
30373 unsigned i;
30374 - unsigned pool_offset = atomic_add_return(1, &start_pool);
30375 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
30376 struct ttm_page_pool *pool;
30377 int shrink_pages = sc->nr_to_scan;
30378
30379 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
30380 index 9cf87d9..2000b7d 100644
30381 --- a/drivers/gpu/drm/via/via_drv.h
30382 +++ b/drivers/gpu/drm/via/via_drv.h
30383 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
30384 typedef uint32_t maskarray_t[5];
30385
30386 typedef struct drm_via_irq {
30387 - atomic_t irq_received;
30388 + atomic_unchecked_t irq_received;
30389 uint32_t pending_mask;
30390 uint32_t enable_mask;
30391 wait_queue_head_t irq_queue;
30392 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
30393 struct timeval last_vblank;
30394 int last_vblank_valid;
30395 unsigned usec_per_vblank;
30396 - atomic_t vbl_received;
30397 + atomic_unchecked_t vbl_received;
30398 drm_via_state_t hc_state;
30399 char pci_buf[VIA_PCI_BUF_SIZE];
30400 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
30401 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
30402 index d391f48..10c8ca3 100644
30403 --- a/drivers/gpu/drm/via/via_irq.c
30404 +++ b/drivers/gpu/drm/via/via_irq.c
30405 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
30406 if (crtc != 0)
30407 return 0;
30408
30409 - return atomic_read(&dev_priv->vbl_received);
30410 + return atomic_read_unchecked(&dev_priv->vbl_received);
30411 }
30412
30413 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30414 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30415
30416 status = VIA_READ(VIA_REG_INTERRUPT);
30417 if (status & VIA_IRQ_VBLANK_PENDING) {
30418 - atomic_inc(&dev_priv->vbl_received);
30419 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
30420 + atomic_inc_unchecked(&dev_priv->vbl_received);
30421 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
30422 do_gettimeofday(&cur_vblank);
30423 if (dev_priv->last_vblank_valid) {
30424 dev_priv->usec_per_vblank =
30425 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30426 dev_priv->last_vblank = cur_vblank;
30427 dev_priv->last_vblank_valid = 1;
30428 }
30429 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
30430 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
30431 DRM_DEBUG("US per vblank is: %u\n",
30432 dev_priv->usec_per_vblank);
30433 }
30434 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30435
30436 for (i = 0; i < dev_priv->num_irqs; ++i) {
30437 if (status & cur_irq->pending_mask) {
30438 - atomic_inc(&cur_irq->irq_received);
30439 + atomic_inc_unchecked(&cur_irq->irq_received);
30440 DRM_WAKEUP(&cur_irq->irq_queue);
30441 handled = 1;
30442 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
30443 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
30444 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30445 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
30446 masks[irq][4]));
30447 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
30448 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
30449 } else {
30450 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30451 (((cur_irq_sequence =
30452 - atomic_read(&cur_irq->irq_received)) -
30453 + atomic_read_unchecked(&cur_irq->irq_received)) -
30454 *sequence) <= (1 << 23)));
30455 }
30456 *sequence = cur_irq_sequence;
30457 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
30458 }
30459
30460 for (i = 0; i < dev_priv->num_irqs; ++i) {
30461 - atomic_set(&cur_irq->irq_received, 0);
30462 + atomic_set_unchecked(&cur_irq->irq_received, 0);
30463 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
30464 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
30465 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
30466 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
30467 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
30468 case VIA_IRQ_RELATIVE:
30469 irqwait->request.sequence +=
30470 - atomic_read(&cur_irq->irq_received);
30471 + atomic_read_unchecked(&cur_irq->irq_received);
30472 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
30473 case VIA_IRQ_ABSOLUTE:
30474 break;
30475 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
30476 index dc27970..f18b008 100644
30477 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
30478 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
30479 @@ -260,7 +260,7 @@ struct vmw_private {
30480 * Fencing and IRQs.
30481 */
30482
30483 - atomic_t marker_seq;
30484 + atomic_unchecked_t marker_seq;
30485 wait_queue_head_t fence_queue;
30486 wait_queue_head_t fifo_queue;
30487 int fence_queue_waiters; /* Protected by hw_mutex */
30488 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
30489 index a0c2f12..68ae6cb 100644
30490 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
30491 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
30492 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
30493 (unsigned int) min,
30494 (unsigned int) fifo->capabilities);
30495
30496 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
30497 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
30498 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
30499 vmw_marker_queue_init(&fifo->marker_queue);
30500 return vmw_fifo_send_fence(dev_priv, &dummy);
30501 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
30502 if (reserveable)
30503 iowrite32(bytes, fifo_mem +
30504 SVGA_FIFO_RESERVED);
30505 - return fifo_mem + (next_cmd >> 2);
30506 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
30507 } else {
30508 need_bounce = true;
30509 }
30510 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
30511
30512 fm = vmw_fifo_reserve(dev_priv, bytes);
30513 if (unlikely(fm == NULL)) {
30514 - *seqno = atomic_read(&dev_priv->marker_seq);
30515 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
30516 ret = -ENOMEM;
30517 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
30518 false, 3*HZ);
30519 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
30520 }
30521
30522 do {
30523 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
30524 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
30525 } while (*seqno == 0);
30526
30527 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
30528 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
30529 index cabc95f..14b3d77 100644
30530 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
30531 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
30532 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
30533 * emitted. Then the fence is stale and signaled.
30534 */
30535
30536 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
30537 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
30538 > VMW_FENCE_WRAP);
30539
30540 return ret;
30541 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
30542
30543 if (fifo_idle)
30544 down_read(&fifo_state->rwsem);
30545 - signal_seq = atomic_read(&dev_priv->marker_seq);
30546 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
30547 ret = 0;
30548
30549 for (;;) {
30550 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
30551 index 8a8725c..afed796 100644
30552 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
30553 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
30554 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
30555 while (!vmw_lag_lt(queue, us)) {
30556 spin_lock(&queue->lock);
30557 if (list_empty(&queue->head))
30558 - seqno = atomic_read(&dev_priv->marker_seq);
30559 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
30560 else {
30561 marker = list_first_entry(&queue->head,
30562 struct vmw_marker, head);
30563 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
30564 index bb656d8..4169fca 100644
30565 --- a/drivers/hid/hid-core.c
30566 +++ b/drivers/hid/hid-core.c
30567 @@ -2012,7 +2012,7 @@ static bool hid_ignore(struct hid_device *hdev)
30568
30569 int hid_add_device(struct hid_device *hdev)
30570 {
30571 - static atomic_t id = ATOMIC_INIT(0);
30572 + static atomic_unchecked_t id = ATOMIC_INIT(0);
30573 int ret;
30574
30575 if (WARN_ON(hdev->status & HID_STAT_ADDED))
30576 @@ -2027,7 +2027,7 @@ int hid_add_device(struct hid_device *hdev)
30577 /* XXX hack, any other cleaner solution after the driver core
30578 * is converted to allow more than 20 bytes as the device name? */
30579 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
30580 - hdev->vendor, hdev->product, atomic_inc_return(&id));
30581 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
30582
30583 hid_debug_register(hdev, dev_name(&hdev->dev));
30584 ret = device_add(&hdev->dev);
30585 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
30586 index 4ef02b2..8a96831 100644
30587 --- a/drivers/hid/usbhid/hiddev.c
30588 +++ b/drivers/hid/usbhid/hiddev.c
30589 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
30590 break;
30591
30592 case HIDIOCAPPLICATION:
30593 - if (arg < 0 || arg >= hid->maxapplication)
30594 + if (arg >= hid->maxapplication)
30595 break;
30596
30597 for (i = 0; i < hid->maxcollection; i++)
30598 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
30599 index 4065374..10ed7dc 100644
30600 --- a/drivers/hv/channel.c
30601 +++ b/drivers/hv/channel.c
30602 @@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
30603 int ret = 0;
30604 int t;
30605
30606 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
30607 - atomic_inc(&vmbus_connection.next_gpadl_handle);
30608 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
30609 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
30610
30611 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
30612 if (ret)
30613 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
30614 index 0fb100e..baf87e5 100644
30615 --- a/drivers/hv/hv.c
30616 +++ b/drivers/hv/hv.c
30617 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
30618 u64 output_address = (output) ? virt_to_phys(output) : 0;
30619 u32 output_address_hi = output_address >> 32;
30620 u32 output_address_lo = output_address & 0xFFFFFFFF;
30621 - void *hypercall_page = hv_context.hypercall_page;
30622 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
30623
30624 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
30625 "=a"(hv_status_lo) : "d" (control_hi),
30626 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
30627 index 0aee112..b72d21f 100644
30628 --- a/drivers/hv/hyperv_vmbus.h
30629 +++ b/drivers/hv/hyperv_vmbus.h
30630 @@ -556,7 +556,7 @@ enum vmbus_connect_state {
30631 struct vmbus_connection {
30632 enum vmbus_connect_state conn_state;
30633
30634 - atomic_t next_gpadl_handle;
30635 + atomic_unchecked_t next_gpadl_handle;
30636
30637 /*
30638 * Represents channel interrupts. Each bit position represents a
30639 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
30640 index d2d0a2a..90b8f4d 100644
30641 --- a/drivers/hv/vmbus_drv.c
30642 +++ b/drivers/hv/vmbus_drv.c
30643 @@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
30644 {
30645 int ret = 0;
30646
30647 - static atomic_t device_num = ATOMIC_INIT(0);
30648 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
30649
30650 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
30651 - atomic_inc_return(&device_num));
30652 + atomic_inc_return_unchecked(&device_num));
30653
30654 child_device_obj->device.bus = &hv_bus;
30655 child_device_obj->device.parent = &hv_acpi_dev->dev;
30656 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
30657 index 66f6729..2d6de0a 100644
30658 --- a/drivers/hwmon/acpi_power_meter.c
30659 +++ b/drivers/hwmon/acpi_power_meter.c
30660 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
30661 return res;
30662
30663 temp /= 1000;
30664 - if (temp < 0)
30665 - return -EINVAL;
30666
30667 mutex_lock(&resource->lock);
30668 resource->trip[attr->index - 7] = temp;
30669 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
30670 index 5357925..6cf0418 100644
30671 --- a/drivers/hwmon/sht15.c
30672 +++ b/drivers/hwmon/sht15.c
30673 @@ -166,7 +166,7 @@ struct sht15_data {
30674 int supply_uV;
30675 bool supply_uV_valid;
30676 struct work_struct update_supply_work;
30677 - atomic_t interrupt_handled;
30678 + atomic_unchecked_t interrupt_handled;
30679 };
30680
30681 /**
30682 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
30683 return ret;
30684
30685 gpio_direction_input(data->pdata->gpio_data);
30686 - atomic_set(&data->interrupt_handled, 0);
30687 + atomic_set_unchecked(&data->interrupt_handled, 0);
30688
30689 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30690 if (gpio_get_value(data->pdata->gpio_data) == 0) {
30691 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
30692 /* Only relevant if the interrupt hasn't occurred. */
30693 - if (!atomic_read(&data->interrupt_handled))
30694 + if (!atomic_read_unchecked(&data->interrupt_handled))
30695 schedule_work(&data->read_work);
30696 }
30697 ret = wait_event_timeout(data->wait_queue,
30698 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
30699
30700 /* First disable the interrupt */
30701 disable_irq_nosync(irq);
30702 - atomic_inc(&data->interrupt_handled);
30703 + atomic_inc_unchecked(&data->interrupt_handled);
30704 /* Then schedule a reading work struct */
30705 if (data->state != SHT15_READING_NOTHING)
30706 schedule_work(&data->read_work);
30707 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
30708 * If not, then start the interrupt again - care here as could
30709 * have gone low in meantime so verify it hasn't!
30710 */
30711 - atomic_set(&data->interrupt_handled, 0);
30712 + atomic_set_unchecked(&data->interrupt_handled, 0);
30713 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30714 /* If still not occurred or another handler has been scheduled */
30715 if (gpio_get_value(data->pdata->gpio_data)
30716 - || atomic_read(&data->interrupt_handled))
30717 + || atomic_read_unchecked(&data->interrupt_handled))
30718 return;
30719 }
30720
30721 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
30722 index 378fcb5..5e91fa8 100644
30723 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
30724 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
30725 @@ -43,7 +43,7 @@
30726 extern struct i2c_adapter amd756_smbus;
30727
30728 static struct i2c_adapter *s4882_adapter;
30729 -static struct i2c_algorithm *s4882_algo;
30730 +static i2c_algorithm_no_const *s4882_algo;
30731
30732 /* Wrapper access functions for multiplexed SMBus */
30733 static DEFINE_MUTEX(amd756_lock);
30734 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
30735 index 29015eb..af2d8e9 100644
30736 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
30737 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
30738 @@ -41,7 +41,7 @@
30739 extern struct i2c_adapter *nforce2_smbus;
30740
30741 static struct i2c_adapter *s4985_adapter;
30742 -static struct i2c_algorithm *s4985_algo;
30743 +static i2c_algorithm_no_const *s4985_algo;
30744
30745 /* Wrapper access functions for multiplexed SMBus */
30746 static DEFINE_MUTEX(nforce2_lock);
30747 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
30748 index d7a4833..7fae376 100644
30749 --- a/drivers/i2c/i2c-mux.c
30750 +++ b/drivers/i2c/i2c-mux.c
30751 @@ -28,7 +28,7 @@
30752 /* multiplexer per channel data */
30753 struct i2c_mux_priv {
30754 struct i2c_adapter adap;
30755 - struct i2c_algorithm algo;
30756 + i2c_algorithm_no_const algo;
30757
30758 struct i2c_adapter *parent;
30759 void *mux_dev; /* the mux chip/device */
30760 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
30761 index 57d00ca..0145194 100644
30762 --- a/drivers/ide/aec62xx.c
30763 +++ b/drivers/ide/aec62xx.c
30764 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
30765 .cable_detect = atp86x_cable_detect,
30766 };
30767
30768 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
30769 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
30770 { /* 0: AEC6210 */
30771 .name = DRV_NAME,
30772 .init_chipset = init_chipset_aec62xx,
30773 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
30774 index 2c8016a..911a27c 100644
30775 --- a/drivers/ide/alim15x3.c
30776 +++ b/drivers/ide/alim15x3.c
30777 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
30778 .dma_sff_read_status = ide_dma_sff_read_status,
30779 };
30780
30781 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
30782 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
30783 .name = DRV_NAME,
30784 .init_chipset = init_chipset_ali15x3,
30785 .init_hwif = init_hwif_ali15x3,
30786 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
30787 index 3747b25..56fc995 100644
30788 --- a/drivers/ide/amd74xx.c
30789 +++ b/drivers/ide/amd74xx.c
30790 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
30791 .udma_mask = udma, \
30792 }
30793
30794 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
30795 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
30796 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
30797 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
30798 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
30799 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
30800 index 15f0ead..cb43480 100644
30801 --- a/drivers/ide/atiixp.c
30802 +++ b/drivers/ide/atiixp.c
30803 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
30804 .cable_detect = atiixp_cable_detect,
30805 };
30806
30807 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
30808 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
30809 { /* 0: IXP200/300/400/700 */
30810 .name = DRV_NAME,
30811 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
30812 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
30813 index 5f80312..d1fc438 100644
30814 --- a/drivers/ide/cmd64x.c
30815 +++ b/drivers/ide/cmd64x.c
30816 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
30817 .dma_sff_read_status = ide_dma_sff_read_status,
30818 };
30819
30820 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
30821 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
30822 { /* 0: CMD643 */
30823 .name = DRV_NAME,
30824 .init_chipset = init_chipset_cmd64x,
30825 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
30826 index 2c1e5f7..1444762 100644
30827 --- a/drivers/ide/cs5520.c
30828 +++ b/drivers/ide/cs5520.c
30829 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
30830 .set_dma_mode = cs5520_set_dma_mode,
30831 };
30832
30833 -static const struct ide_port_info cyrix_chipset __devinitdata = {
30834 +static const struct ide_port_info cyrix_chipset __devinitconst = {
30835 .name = DRV_NAME,
30836 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
30837 .port_ops = &cs5520_port_ops,
30838 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
30839 index 4dc4eb9..49b40ad 100644
30840 --- a/drivers/ide/cs5530.c
30841 +++ b/drivers/ide/cs5530.c
30842 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
30843 .udma_filter = cs5530_udma_filter,
30844 };
30845
30846 -static const struct ide_port_info cs5530_chipset __devinitdata = {
30847 +static const struct ide_port_info cs5530_chipset __devinitconst = {
30848 .name = DRV_NAME,
30849 .init_chipset = init_chipset_cs5530,
30850 .init_hwif = init_hwif_cs5530,
30851 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
30852 index 5059faf..18d4c85 100644
30853 --- a/drivers/ide/cs5535.c
30854 +++ b/drivers/ide/cs5535.c
30855 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
30856 .cable_detect = cs5535_cable_detect,
30857 };
30858
30859 -static const struct ide_port_info cs5535_chipset __devinitdata = {
30860 +static const struct ide_port_info cs5535_chipset __devinitconst = {
30861 .name = DRV_NAME,
30862 .port_ops = &cs5535_port_ops,
30863 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
30864 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
30865 index 847553f..3ffb49d 100644
30866 --- a/drivers/ide/cy82c693.c
30867 +++ b/drivers/ide/cy82c693.c
30868 @@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
30869 .set_dma_mode = cy82c693_set_dma_mode,
30870 };
30871
30872 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
30873 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
30874 .name = DRV_NAME,
30875 .init_iops = init_iops_cy82c693,
30876 .port_ops = &cy82c693_port_ops,
30877 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
30878 index 58c51cd..4aec3b8 100644
30879 --- a/drivers/ide/hpt366.c
30880 +++ b/drivers/ide/hpt366.c
30881 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
30882 }
30883 };
30884
30885 -static const struct hpt_info hpt36x __devinitdata = {
30886 +static const struct hpt_info hpt36x __devinitconst = {
30887 .chip_name = "HPT36x",
30888 .chip_type = HPT36x,
30889 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
30890 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
30891 .timings = &hpt36x_timings
30892 };
30893
30894 -static const struct hpt_info hpt370 __devinitdata = {
30895 +static const struct hpt_info hpt370 __devinitconst = {
30896 .chip_name = "HPT370",
30897 .chip_type = HPT370,
30898 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
30899 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
30900 .timings = &hpt37x_timings
30901 };
30902
30903 -static const struct hpt_info hpt370a __devinitdata = {
30904 +static const struct hpt_info hpt370a __devinitconst = {
30905 .chip_name = "HPT370A",
30906 .chip_type = HPT370A,
30907 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
30908 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
30909 .timings = &hpt37x_timings
30910 };
30911
30912 -static const struct hpt_info hpt374 __devinitdata = {
30913 +static const struct hpt_info hpt374 __devinitconst = {
30914 .chip_name = "HPT374",
30915 .chip_type = HPT374,
30916 .udma_mask = ATA_UDMA5,
30917 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
30918 .timings = &hpt37x_timings
30919 };
30920
30921 -static const struct hpt_info hpt372 __devinitdata = {
30922 +static const struct hpt_info hpt372 __devinitconst = {
30923 .chip_name = "HPT372",
30924 .chip_type = HPT372,
30925 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30926 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
30927 .timings = &hpt37x_timings
30928 };
30929
30930 -static const struct hpt_info hpt372a __devinitdata = {
30931 +static const struct hpt_info hpt372a __devinitconst = {
30932 .chip_name = "HPT372A",
30933 .chip_type = HPT372A,
30934 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30935 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
30936 .timings = &hpt37x_timings
30937 };
30938
30939 -static const struct hpt_info hpt302 __devinitdata = {
30940 +static const struct hpt_info hpt302 __devinitconst = {
30941 .chip_name = "HPT302",
30942 .chip_type = HPT302,
30943 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30944 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
30945 .timings = &hpt37x_timings
30946 };
30947
30948 -static const struct hpt_info hpt371 __devinitdata = {
30949 +static const struct hpt_info hpt371 __devinitconst = {
30950 .chip_name = "HPT371",
30951 .chip_type = HPT371,
30952 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30953 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
30954 .timings = &hpt37x_timings
30955 };
30956
30957 -static const struct hpt_info hpt372n __devinitdata = {
30958 +static const struct hpt_info hpt372n __devinitconst = {
30959 .chip_name = "HPT372N",
30960 .chip_type = HPT372N,
30961 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30962 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
30963 .timings = &hpt37x_timings
30964 };
30965
30966 -static const struct hpt_info hpt302n __devinitdata = {
30967 +static const struct hpt_info hpt302n __devinitconst = {
30968 .chip_name = "HPT302N",
30969 .chip_type = HPT302N,
30970 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30971 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
30972 .timings = &hpt37x_timings
30973 };
30974
30975 -static const struct hpt_info hpt371n __devinitdata = {
30976 +static const struct hpt_info hpt371n __devinitconst = {
30977 .chip_name = "HPT371N",
30978 .chip_type = HPT371N,
30979 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
30980 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
30981 .dma_sff_read_status = ide_dma_sff_read_status,
30982 };
30983
30984 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
30985 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
30986 { /* 0: HPT36x */
30987 .name = DRV_NAME,
30988 .init_chipset = init_chipset_hpt366,
30989 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
30990 index 8126824..55a2798 100644
30991 --- a/drivers/ide/ide-cd.c
30992 +++ b/drivers/ide/ide-cd.c
30993 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
30994 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
30995 if ((unsigned long)buf & alignment
30996 || blk_rq_bytes(rq) & q->dma_pad_mask
30997 - || object_is_on_stack(buf))
30998 + || object_starts_on_stack(buf))
30999 drive->dma = 0;
31000 }
31001 }
31002 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
31003 index a743e68..1cfd674 100644
31004 --- a/drivers/ide/ide-pci-generic.c
31005 +++ b/drivers/ide/ide-pci-generic.c
31006 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
31007 .udma_mask = ATA_UDMA6, \
31008 }
31009
31010 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
31011 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
31012 /* 0: Unknown */
31013 DECLARE_GENERIC_PCI_DEV(0),
31014
31015 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
31016 index 560e66d..d5dd180 100644
31017 --- a/drivers/ide/it8172.c
31018 +++ b/drivers/ide/it8172.c
31019 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
31020 .set_dma_mode = it8172_set_dma_mode,
31021 };
31022
31023 -static const struct ide_port_info it8172_port_info __devinitdata = {
31024 +static const struct ide_port_info it8172_port_info __devinitconst = {
31025 .name = DRV_NAME,
31026 .port_ops = &it8172_port_ops,
31027 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
31028 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
31029 index 46816ba..1847aeb 100644
31030 --- a/drivers/ide/it8213.c
31031 +++ b/drivers/ide/it8213.c
31032 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
31033 .cable_detect = it8213_cable_detect,
31034 };
31035
31036 -static const struct ide_port_info it8213_chipset __devinitdata = {
31037 +static const struct ide_port_info it8213_chipset __devinitconst = {
31038 .name = DRV_NAME,
31039 .enablebits = { {0x41, 0x80, 0x80} },
31040 .port_ops = &it8213_port_ops,
31041 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
31042 index 2e3169f..c5611db 100644
31043 --- a/drivers/ide/it821x.c
31044 +++ b/drivers/ide/it821x.c
31045 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
31046 .cable_detect = it821x_cable_detect,
31047 };
31048
31049 -static const struct ide_port_info it821x_chipset __devinitdata = {
31050 +static const struct ide_port_info it821x_chipset __devinitconst = {
31051 .name = DRV_NAME,
31052 .init_chipset = init_chipset_it821x,
31053 .init_hwif = init_hwif_it821x,
31054 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
31055 index 74c2c4a..efddd7d 100644
31056 --- a/drivers/ide/jmicron.c
31057 +++ b/drivers/ide/jmicron.c
31058 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
31059 .cable_detect = jmicron_cable_detect,
31060 };
31061
31062 -static const struct ide_port_info jmicron_chipset __devinitdata = {
31063 +static const struct ide_port_info jmicron_chipset __devinitconst = {
31064 .name = DRV_NAME,
31065 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
31066 .port_ops = &jmicron_port_ops,
31067 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
31068 index 95327a2..73f78d8 100644
31069 --- a/drivers/ide/ns87415.c
31070 +++ b/drivers/ide/ns87415.c
31071 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
31072 .dma_sff_read_status = superio_dma_sff_read_status,
31073 };
31074
31075 -static const struct ide_port_info ns87415_chipset __devinitdata = {
31076 +static const struct ide_port_info ns87415_chipset __devinitconst = {
31077 .name = DRV_NAME,
31078 .init_hwif = init_hwif_ns87415,
31079 .tp_ops = &ns87415_tp_ops,
31080 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
31081 index 1a53a4c..39edc66 100644
31082 --- a/drivers/ide/opti621.c
31083 +++ b/drivers/ide/opti621.c
31084 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
31085 .set_pio_mode = opti621_set_pio_mode,
31086 };
31087
31088 -static const struct ide_port_info opti621_chipset __devinitdata = {
31089 +static const struct ide_port_info opti621_chipset __devinitconst = {
31090 .name = DRV_NAME,
31091 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
31092 .port_ops = &opti621_port_ops,
31093 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
31094 index 9546fe2..2e5ceb6 100644
31095 --- a/drivers/ide/pdc202xx_new.c
31096 +++ b/drivers/ide/pdc202xx_new.c
31097 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
31098 .udma_mask = udma, \
31099 }
31100
31101 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
31102 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
31103 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
31104 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
31105 };
31106 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
31107 index 3a35ec6..5634510 100644
31108 --- a/drivers/ide/pdc202xx_old.c
31109 +++ b/drivers/ide/pdc202xx_old.c
31110 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
31111 .max_sectors = sectors, \
31112 }
31113
31114 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
31115 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
31116 { /* 0: PDC20246 */
31117 .name = DRV_NAME,
31118 .init_chipset = init_chipset_pdc202xx,
31119 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
31120 index 1892e81..fe0fd60 100644
31121 --- a/drivers/ide/piix.c
31122 +++ b/drivers/ide/piix.c
31123 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
31124 .udma_mask = udma, \
31125 }
31126
31127 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
31128 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
31129 /* 0: MPIIX */
31130 { /*
31131 * MPIIX actually has only a single IDE channel mapped to
31132 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
31133 index a6414a8..c04173e 100644
31134 --- a/drivers/ide/rz1000.c
31135 +++ b/drivers/ide/rz1000.c
31136 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
31137 }
31138 }
31139
31140 -static const struct ide_port_info rz1000_chipset __devinitdata = {
31141 +static const struct ide_port_info rz1000_chipset __devinitconst = {
31142 .name = DRV_NAME,
31143 .host_flags = IDE_HFLAG_NO_DMA,
31144 };
31145 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
31146 index 356b9b5..d4758eb 100644
31147 --- a/drivers/ide/sc1200.c
31148 +++ b/drivers/ide/sc1200.c
31149 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
31150 .dma_sff_read_status = ide_dma_sff_read_status,
31151 };
31152
31153 -static const struct ide_port_info sc1200_chipset __devinitdata = {
31154 +static const struct ide_port_info sc1200_chipset __devinitconst = {
31155 .name = DRV_NAME,
31156 .port_ops = &sc1200_port_ops,
31157 .dma_ops = &sc1200_dma_ops,
31158 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
31159 index b7f5b0c..9701038 100644
31160 --- a/drivers/ide/scc_pata.c
31161 +++ b/drivers/ide/scc_pata.c
31162 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
31163 .dma_sff_read_status = scc_dma_sff_read_status,
31164 };
31165
31166 -static const struct ide_port_info scc_chipset __devinitdata = {
31167 +static const struct ide_port_info scc_chipset __devinitconst = {
31168 .name = "sccIDE",
31169 .init_iops = init_iops_scc,
31170 .init_dma = scc_init_dma,
31171 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
31172 index 35fb8da..24d72ef 100644
31173 --- a/drivers/ide/serverworks.c
31174 +++ b/drivers/ide/serverworks.c
31175 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
31176 .cable_detect = svwks_cable_detect,
31177 };
31178
31179 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
31180 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
31181 { /* 0: OSB4 */
31182 .name = DRV_NAME,
31183 .init_chipset = init_chipset_svwks,
31184 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
31185 index ddeda44..46f7e30 100644
31186 --- a/drivers/ide/siimage.c
31187 +++ b/drivers/ide/siimage.c
31188 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
31189 .udma_mask = ATA_UDMA6, \
31190 }
31191
31192 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
31193 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
31194 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
31195 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
31196 };
31197 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
31198 index 4a00225..09e61b4 100644
31199 --- a/drivers/ide/sis5513.c
31200 +++ b/drivers/ide/sis5513.c
31201 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
31202 .cable_detect = sis_cable_detect,
31203 };
31204
31205 -static const struct ide_port_info sis5513_chipset __devinitdata = {
31206 +static const struct ide_port_info sis5513_chipset __devinitconst = {
31207 .name = DRV_NAME,
31208 .init_chipset = init_chipset_sis5513,
31209 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
31210 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
31211 index f21dc2a..d051cd2 100644
31212 --- a/drivers/ide/sl82c105.c
31213 +++ b/drivers/ide/sl82c105.c
31214 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
31215 .dma_sff_read_status = ide_dma_sff_read_status,
31216 };
31217
31218 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
31219 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
31220 .name = DRV_NAME,
31221 .init_chipset = init_chipset_sl82c105,
31222 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
31223 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
31224 index 864ffe0..863a5e9 100644
31225 --- a/drivers/ide/slc90e66.c
31226 +++ b/drivers/ide/slc90e66.c
31227 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
31228 .cable_detect = slc90e66_cable_detect,
31229 };
31230
31231 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
31232 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
31233 .name = DRV_NAME,
31234 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
31235 .port_ops = &slc90e66_port_ops,
31236 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
31237 index 4799d5c..1794678 100644
31238 --- a/drivers/ide/tc86c001.c
31239 +++ b/drivers/ide/tc86c001.c
31240 @@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
31241 .dma_sff_read_status = ide_dma_sff_read_status,
31242 };
31243
31244 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
31245 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
31246 .name = DRV_NAME,
31247 .init_hwif = init_hwif_tc86c001,
31248 .port_ops = &tc86c001_port_ops,
31249 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
31250 index 281c914..55ce1b8 100644
31251 --- a/drivers/ide/triflex.c
31252 +++ b/drivers/ide/triflex.c
31253 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
31254 .set_dma_mode = triflex_set_mode,
31255 };
31256
31257 -static const struct ide_port_info triflex_device __devinitdata = {
31258 +static const struct ide_port_info triflex_device __devinitconst = {
31259 .name = DRV_NAME,
31260 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
31261 .port_ops = &triflex_port_ops,
31262 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
31263 index 4b42ca0..e494a98 100644
31264 --- a/drivers/ide/trm290.c
31265 +++ b/drivers/ide/trm290.c
31266 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
31267 .dma_check = trm290_dma_check,
31268 };
31269
31270 -static const struct ide_port_info trm290_chipset __devinitdata = {
31271 +static const struct ide_port_info trm290_chipset __devinitconst = {
31272 .name = DRV_NAME,
31273 .init_hwif = init_hwif_trm290,
31274 .tp_ops = &trm290_tp_ops,
31275 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
31276 index f46f49c..eb77678 100644
31277 --- a/drivers/ide/via82cxxx.c
31278 +++ b/drivers/ide/via82cxxx.c
31279 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
31280 .cable_detect = via82cxxx_cable_detect,
31281 };
31282
31283 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
31284 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
31285 .name = DRV_NAME,
31286 .init_chipset = init_chipset_via82cxxx,
31287 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
31288 diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
31289 index eb0e2cc..14241c7 100644
31290 --- a/drivers/ieee802154/fakehard.c
31291 +++ b/drivers/ieee802154/fakehard.c
31292 @@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
31293 phy->transmit_power = 0xbf;
31294
31295 dev->netdev_ops = &fake_ops;
31296 - dev->ml_priv = &fake_mlme;
31297 + dev->ml_priv = (void *)&fake_mlme;
31298
31299 priv = netdev_priv(dev);
31300 priv->phy = phy;
31301 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
31302 index 8b72f39..55df4c8 100644
31303 --- a/drivers/infiniband/core/cm.c
31304 +++ b/drivers/infiniband/core/cm.c
31305 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
31306
31307 struct cm_counter_group {
31308 struct kobject obj;
31309 - atomic_long_t counter[CM_ATTR_COUNT];
31310 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
31311 };
31312
31313 struct cm_counter_attribute {
31314 @@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
31315 struct ib_mad_send_buf *msg = NULL;
31316 int ret;
31317
31318 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31319 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31320 counter[CM_REQ_COUNTER]);
31321
31322 /* Quick state check to discard duplicate REQs. */
31323 @@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
31324 if (!cm_id_priv)
31325 return;
31326
31327 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31328 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31329 counter[CM_REP_COUNTER]);
31330 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
31331 if (ret)
31332 @@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
31333 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
31334 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
31335 spin_unlock_irq(&cm_id_priv->lock);
31336 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31337 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31338 counter[CM_RTU_COUNTER]);
31339 goto out;
31340 }
31341 @@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
31342 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
31343 dreq_msg->local_comm_id);
31344 if (!cm_id_priv) {
31345 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31346 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31347 counter[CM_DREQ_COUNTER]);
31348 cm_issue_drep(work->port, work->mad_recv_wc);
31349 return -EINVAL;
31350 @@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
31351 case IB_CM_MRA_REP_RCVD:
31352 break;
31353 case IB_CM_TIMEWAIT:
31354 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31355 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31356 counter[CM_DREQ_COUNTER]);
31357 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
31358 goto unlock;
31359 @@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
31360 cm_free_msg(msg);
31361 goto deref;
31362 case IB_CM_DREQ_RCVD:
31363 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31364 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31365 counter[CM_DREQ_COUNTER]);
31366 goto unlock;
31367 default:
31368 @@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
31369 ib_modify_mad(cm_id_priv->av.port->mad_agent,
31370 cm_id_priv->msg, timeout)) {
31371 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
31372 - atomic_long_inc(&work->port->
31373 + atomic_long_inc_unchecked(&work->port->
31374 counter_group[CM_RECV_DUPLICATES].
31375 counter[CM_MRA_COUNTER]);
31376 goto out;
31377 @@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
31378 break;
31379 case IB_CM_MRA_REQ_RCVD:
31380 case IB_CM_MRA_REP_RCVD:
31381 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31382 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31383 counter[CM_MRA_COUNTER]);
31384 /* fall through */
31385 default:
31386 @@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
31387 case IB_CM_LAP_IDLE:
31388 break;
31389 case IB_CM_MRA_LAP_SENT:
31390 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31391 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31392 counter[CM_LAP_COUNTER]);
31393 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
31394 goto unlock;
31395 @@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
31396 cm_free_msg(msg);
31397 goto deref;
31398 case IB_CM_LAP_RCVD:
31399 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31400 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31401 counter[CM_LAP_COUNTER]);
31402 goto unlock;
31403 default:
31404 @@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
31405 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
31406 if (cur_cm_id_priv) {
31407 spin_unlock_irq(&cm.lock);
31408 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31409 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31410 counter[CM_SIDR_REQ_COUNTER]);
31411 goto out; /* Duplicate message. */
31412 }
31413 @@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
31414 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
31415 msg->retries = 1;
31416
31417 - atomic_long_add(1 + msg->retries,
31418 + atomic_long_add_unchecked(1 + msg->retries,
31419 &port->counter_group[CM_XMIT].counter[attr_index]);
31420 if (msg->retries)
31421 - atomic_long_add(msg->retries,
31422 + atomic_long_add_unchecked(msg->retries,
31423 &port->counter_group[CM_XMIT_RETRIES].
31424 counter[attr_index]);
31425
31426 @@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
31427 }
31428
31429 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
31430 - atomic_long_inc(&port->counter_group[CM_RECV].
31431 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
31432 counter[attr_id - CM_ATTR_ID_OFFSET]);
31433
31434 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
31435 @@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
31436 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
31437
31438 return sprintf(buf, "%ld\n",
31439 - atomic_long_read(&group->counter[cm_attr->index]));
31440 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
31441 }
31442
31443 static const struct sysfs_ops cm_counter_ops = {
31444 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
31445 index 176c8f9..2627b62 100644
31446 --- a/drivers/infiniband/core/fmr_pool.c
31447 +++ b/drivers/infiniband/core/fmr_pool.c
31448 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
31449
31450 struct task_struct *thread;
31451
31452 - atomic_t req_ser;
31453 - atomic_t flush_ser;
31454 + atomic_unchecked_t req_ser;
31455 + atomic_unchecked_t flush_ser;
31456
31457 wait_queue_head_t force_wait;
31458 };
31459 @@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
31460 struct ib_fmr_pool *pool = pool_ptr;
31461
31462 do {
31463 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
31464 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
31465 ib_fmr_batch_release(pool);
31466
31467 - atomic_inc(&pool->flush_ser);
31468 + atomic_inc_unchecked(&pool->flush_ser);
31469 wake_up_interruptible(&pool->force_wait);
31470
31471 if (pool->flush_function)
31472 @@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
31473 }
31474
31475 set_current_state(TASK_INTERRUPTIBLE);
31476 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
31477 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
31478 !kthread_should_stop())
31479 schedule();
31480 __set_current_state(TASK_RUNNING);
31481 @@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
31482 pool->dirty_watermark = params->dirty_watermark;
31483 pool->dirty_len = 0;
31484 spin_lock_init(&pool->pool_lock);
31485 - atomic_set(&pool->req_ser, 0);
31486 - atomic_set(&pool->flush_ser, 0);
31487 + atomic_set_unchecked(&pool->req_ser, 0);
31488 + atomic_set_unchecked(&pool->flush_ser, 0);
31489 init_waitqueue_head(&pool->force_wait);
31490
31491 pool->thread = kthread_run(ib_fmr_cleanup_thread,
31492 @@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
31493 }
31494 spin_unlock_irq(&pool->pool_lock);
31495
31496 - serial = atomic_inc_return(&pool->req_ser);
31497 + serial = atomic_inc_return_unchecked(&pool->req_ser);
31498 wake_up_process(pool->thread);
31499
31500 if (wait_event_interruptible(pool->force_wait,
31501 - atomic_read(&pool->flush_ser) - serial >= 0))
31502 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
31503 return -EINTR;
31504
31505 return 0;
31506 @@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
31507 } else {
31508 list_add_tail(&fmr->list, &pool->dirty_list);
31509 if (++pool->dirty_len >= pool->dirty_watermark) {
31510 - atomic_inc(&pool->req_ser);
31511 + atomic_inc_unchecked(&pool->req_ser);
31512 wake_up_process(pool->thread);
31513 }
31514 }
31515 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
31516 index 40c8353..946b0e4 100644
31517 --- a/drivers/infiniband/hw/cxgb4/mem.c
31518 +++ b/drivers/infiniband/hw/cxgb4/mem.c
31519 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
31520 int err;
31521 struct fw_ri_tpte tpt;
31522 u32 stag_idx;
31523 - static atomic_t key;
31524 + static atomic_unchecked_t key;
31525
31526 if (c4iw_fatal_error(rdev))
31527 return -EIO;
31528 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
31529 &rdev->resource.tpt_fifo_lock);
31530 if (!stag_idx)
31531 return -ENOMEM;
31532 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
31533 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
31534 }
31535 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
31536 __func__, stag_state, type, pdid, stag_idx);
31537 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
31538 index 79b3dbc..96e5fcc 100644
31539 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
31540 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
31541 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
31542 struct ib_atomic_eth *ateth;
31543 struct ipath_ack_entry *e;
31544 u64 vaddr;
31545 - atomic64_t *maddr;
31546 + atomic64_unchecked_t *maddr;
31547 u64 sdata;
31548 u32 rkey;
31549 u8 next;
31550 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
31551 IB_ACCESS_REMOTE_ATOMIC)))
31552 goto nack_acc_unlck;
31553 /* Perform atomic OP and save result. */
31554 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
31555 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
31556 sdata = be64_to_cpu(ateth->swap_data);
31557 e = &qp->s_ack_queue[qp->r_head_ack_queue];
31558 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
31559 - (u64) atomic64_add_return(sdata, maddr) - sdata :
31560 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
31561 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
31562 be64_to_cpu(ateth->compare_data),
31563 sdata);
31564 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
31565 index 1f95bba..9530f87 100644
31566 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
31567 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
31568 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
31569 unsigned long flags;
31570 struct ib_wc wc;
31571 u64 sdata;
31572 - atomic64_t *maddr;
31573 + atomic64_unchecked_t *maddr;
31574 enum ib_wc_status send_status;
31575
31576 /*
31577 @@ -382,11 +382,11 @@ again:
31578 IB_ACCESS_REMOTE_ATOMIC)))
31579 goto acc_err;
31580 /* Perform atomic OP and save result. */
31581 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
31582 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
31583 sdata = wqe->wr.wr.atomic.compare_add;
31584 *(u64 *) sqp->s_sge.sge.vaddr =
31585 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
31586 - (u64) atomic64_add_return(sdata, maddr) - sdata :
31587 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
31588 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
31589 sdata, wqe->wr.wr.atomic.swap);
31590 goto send_comp;
31591 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
31592 index 5965b3d..16817fb 100644
31593 --- a/drivers/infiniband/hw/nes/nes.c
31594 +++ b/drivers/infiniband/hw/nes/nes.c
31595 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
31596 LIST_HEAD(nes_adapter_list);
31597 static LIST_HEAD(nes_dev_list);
31598
31599 -atomic_t qps_destroyed;
31600 +atomic_unchecked_t qps_destroyed;
31601
31602 static unsigned int ee_flsh_adapter;
31603 static unsigned int sysfs_nonidx_addr;
31604 @@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
31605 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
31606 struct nes_adapter *nesadapter = nesdev->nesadapter;
31607
31608 - atomic_inc(&qps_destroyed);
31609 + atomic_inc_unchecked(&qps_destroyed);
31610
31611 /* Free the control structures */
31612
31613 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
31614 index 568b4f1..5ea3eff 100644
31615 --- a/drivers/infiniband/hw/nes/nes.h
31616 +++ b/drivers/infiniband/hw/nes/nes.h
31617 @@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
31618 extern unsigned int wqm_quanta;
31619 extern struct list_head nes_adapter_list;
31620
31621 -extern atomic_t cm_connects;
31622 -extern atomic_t cm_accepts;
31623 -extern atomic_t cm_disconnects;
31624 -extern atomic_t cm_closes;
31625 -extern atomic_t cm_connecteds;
31626 -extern atomic_t cm_connect_reqs;
31627 -extern atomic_t cm_rejects;
31628 -extern atomic_t mod_qp_timouts;
31629 -extern atomic_t qps_created;
31630 -extern atomic_t qps_destroyed;
31631 -extern atomic_t sw_qps_destroyed;
31632 +extern atomic_unchecked_t cm_connects;
31633 +extern atomic_unchecked_t cm_accepts;
31634 +extern atomic_unchecked_t cm_disconnects;
31635 +extern atomic_unchecked_t cm_closes;
31636 +extern atomic_unchecked_t cm_connecteds;
31637 +extern atomic_unchecked_t cm_connect_reqs;
31638 +extern atomic_unchecked_t cm_rejects;
31639 +extern atomic_unchecked_t mod_qp_timouts;
31640 +extern atomic_unchecked_t qps_created;
31641 +extern atomic_unchecked_t qps_destroyed;
31642 +extern atomic_unchecked_t sw_qps_destroyed;
31643 extern u32 mh_detected;
31644 extern u32 mh_pauses_sent;
31645 extern u32 cm_packets_sent;
31646 @@ -197,16 +197,16 @@ extern u32 cm_packets_created;
31647 extern u32 cm_packets_received;
31648 extern u32 cm_packets_dropped;
31649 extern u32 cm_packets_retrans;
31650 -extern atomic_t cm_listens_created;
31651 -extern atomic_t cm_listens_destroyed;
31652 +extern atomic_unchecked_t cm_listens_created;
31653 +extern atomic_unchecked_t cm_listens_destroyed;
31654 extern u32 cm_backlog_drops;
31655 -extern atomic_t cm_loopbacks;
31656 -extern atomic_t cm_nodes_created;
31657 -extern atomic_t cm_nodes_destroyed;
31658 -extern atomic_t cm_accel_dropped_pkts;
31659 -extern atomic_t cm_resets_recvd;
31660 -extern atomic_t pau_qps_created;
31661 -extern atomic_t pau_qps_destroyed;
31662 +extern atomic_unchecked_t cm_loopbacks;
31663 +extern atomic_unchecked_t cm_nodes_created;
31664 +extern atomic_unchecked_t cm_nodes_destroyed;
31665 +extern atomic_unchecked_t cm_accel_dropped_pkts;
31666 +extern atomic_unchecked_t cm_resets_recvd;
31667 +extern atomic_unchecked_t pau_qps_created;
31668 +extern atomic_unchecked_t pau_qps_destroyed;
31669
31670 extern u32 int_mod_timer_init;
31671 extern u32 int_mod_cq_depth_256;
31672 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
31673 index 0a52d72..0642f36 100644
31674 --- a/drivers/infiniband/hw/nes/nes_cm.c
31675 +++ b/drivers/infiniband/hw/nes/nes_cm.c
31676 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
31677 u32 cm_packets_retrans;
31678 u32 cm_packets_created;
31679 u32 cm_packets_received;
31680 -atomic_t cm_listens_created;
31681 -atomic_t cm_listens_destroyed;
31682 +atomic_unchecked_t cm_listens_created;
31683 +atomic_unchecked_t cm_listens_destroyed;
31684 u32 cm_backlog_drops;
31685 -atomic_t cm_loopbacks;
31686 -atomic_t cm_nodes_created;
31687 -atomic_t cm_nodes_destroyed;
31688 -atomic_t cm_accel_dropped_pkts;
31689 -atomic_t cm_resets_recvd;
31690 +atomic_unchecked_t cm_loopbacks;
31691 +atomic_unchecked_t cm_nodes_created;
31692 +atomic_unchecked_t cm_nodes_destroyed;
31693 +atomic_unchecked_t cm_accel_dropped_pkts;
31694 +atomic_unchecked_t cm_resets_recvd;
31695
31696 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
31697 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
31698 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
31699
31700 static struct nes_cm_core *g_cm_core;
31701
31702 -atomic_t cm_connects;
31703 -atomic_t cm_accepts;
31704 -atomic_t cm_disconnects;
31705 -atomic_t cm_closes;
31706 -atomic_t cm_connecteds;
31707 -atomic_t cm_connect_reqs;
31708 -atomic_t cm_rejects;
31709 +atomic_unchecked_t cm_connects;
31710 +atomic_unchecked_t cm_accepts;
31711 +atomic_unchecked_t cm_disconnects;
31712 +atomic_unchecked_t cm_closes;
31713 +atomic_unchecked_t cm_connecteds;
31714 +atomic_unchecked_t cm_connect_reqs;
31715 +atomic_unchecked_t cm_rejects;
31716
31717 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
31718 {
31719 @@ -1271,7 +1271,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
31720 kfree(listener);
31721 listener = NULL;
31722 ret = 0;
31723 - atomic_inc(&cm_listens_destroyed);
31724 + atomic_inc_unchecked(&cm_listens_destroyed);
31725 } else {
31726 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
31727 }
31728 @@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
31729 cm_node->rem_mac);
31730
31731 add_hte_node(cm_core, cm_node);
31732 - atomic_inc(&cm_nodes_created);
31733 + atomic_inc_unchecked(&cm_nodes_created);
31734
31735 return cm_node;
31736 }
31737 @@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
31738 }
31739
31740 atomic_dec(&cm_core->node_cnt);
31741 - atomic_inc(&cm_nodes_destroyed);
31742 + atomic_inc_unchecked(&cm_nodes_destroyed);
31743 nesqp = cm_node->nesqp;
31744 if (nesqp) {
31745 nesqp->cm_node = NULL;
31746 @@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
31747
31748 static void drop_packet(struct sk_buff *skb)
31749 {
31750 - atomic_inc(&cm_accel_dropped_pkts);
31751 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
31752 dev_kfree_skb_any(skb);
31753 }
31754
31755 @@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
31756 {
31757
31758 int reset = 0; /* whether to send reset in case of err.. */
31759 - atomic_inc(&cm_resets_recvd);
31760 + atomic_inc_unchecked(&cm_resets_recvd);
31761 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
31762 " refcnt=%d\n", cm_node, cm_node->state,
31763 atomic_read(&cm_node->ref_count));
31764 @@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
31765 rem_ref_cm_node(cm_node->cm_core, cm_node);
31766 return NULL;
31767 }
31768 - atomic_inc(&cm_loopbacks);
31769 + atomic_inc_unchecked(&cm_loopbacks);
31770 loopbackremotenode->loopbackpartner = cm_node;
31771 loopbackremotenode->tcp_cntxt.rcv_wscale =
31772 NES_CM_DEFAULT_RCV_WND_SCALE;
31773 @@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
31774 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
31775 else {
31776 rem_ref_cm_node(cm_core, cm_node);
31777 - atomic_inc(&cm_accel_dropped_pkts);
31778 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
31779 dev_kfree_skb_any(skb);
31780 }
31781 break;
31782 @@ -2880,7 +2880,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
31783
31784 if ((cm_id) && (cm_id->event_handler)) {
31785 if (issue_disconn) {
31786 - atomic_inc(&cm_disconnects);
31787 + atomic_inc_unchecked(&cm_disconnects);
31788 cm_event.event = IW_CM_EVENT_DISCONNECT;
31789 cm_event.status = disconn_status;
31790 cm_event.local_addr = cm_id->local_addr;
31791 @@ -2902,7 +2902,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
31792 }
31793
31794 if (issue_close) {
31795 - atomic_inc(&cm_closes);
31796 + atomic_inc_unchecked(&cm_closes);
31797 nes_disconnect(nesqp, 1);
31798
31799 cm_id->provider_data = nesqp;
31800 @@ -3038,7 +3038,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
31801
31802 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
31803 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
31804 - atomic_inc(&cm_accepts);
31805 + atomic_inc_unchecked(&cm_accepts);
31806
31807 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
31808 netdev_refcnt_read(nesvnic->netdev));
31809 @@ -3240,7 +3240,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
31810 struct nes_cm_core *cm_core;
31811 u8 *start_buff;
31812
31813 - atomic_inc(&cm_rejects);
31814 + atomic_inc_unchecked(&cm_rejects);
31815 cm_node = (struct nes_cm_node *)cm_id->provider_data;
31816 loopback = cm_node->loopbackpartner;
31817 cm_core = cm_node->cm_core;
31818 @@ -3300,7 +3300,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
31819 ntohl(cm_id->local_addr.sin_addr.s_addr),
31820 ntohs(cm_id->local_addr.sin_port));
31821
31822 - atomic_inc(&cm_connects);
31823 + atomic_inc_unchecked(&cm_connects);
31824 nesqp->active_conn = 1;
31825
31826 /* cache the cm_id in the qp */
31827 @@ -3406,7 +3406,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
31828 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
31829 return err;
31830 }
31831 - atomic_inc(&cm_listens_created);
31832 + atomic_inc_unchecked(&cm_listens_created);
31833 }
31834
31835 cm_id->add_ref(cm_id);
31836 @@ -3507,7 +3507,7 @@ static void cm_event_connected(struct nes_cm_event *event)
31837
31838 if (nesqp->destroyed)
31839 return;
31840 - atomic_inc(&cm_connecteds);
31841 + atomic_inc_unchecked(&cm_connecteds);
31842 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
31843 " local port 0x%04X. jiffies = %lu.\n",
31844 nesqp->hwqp.qp_id,
31845 @@ -3694,7 +3694,7 @@ static void cm_event_reset(struct nes_cm_event *event)
31846
31847 cm_id->add_ref(cm_id);
31848 ret = cm_id->event_handler(cm_id, &cm_event);
31849 - atomic_inc(&cm_closes);
31850 + atomic_inc_unchecked(&cm_closes);
31851 cm_event.event = IW_CM_EVENT_CLOSE;
31852 cm_event.status = 0;
31853 cm_event.provider_data = cm_id->provider_data;
31854 @@ -3730,7 +3730,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
31855 return;
31856 cm_id = cm_node->cm_id;
31857
31858 - atomic_inc(&cm_connect_reqs);
31859 + atomic_inc_unchecked(&cm_connect_reqs);
31860 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
31861 cm_node, cm_id, jiffies);
31862
31863 @@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
31864 return;
31865 cm_id = cm_node->cm_id;
31866
31867 - atomic_inc(&cm_connect_reqs);
31868 + atomic_inc_unchecked(&cm_connect_reqs);
31869 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
31870 cm_node, cm_id, jiffies);
31871
31872 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
31873 index b3b2a24..7bfaf1e 100644
31874 --- a/drivers/infiniband/hw/nes/nes_mgt.c
31875 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
31876 @@ -40,8 +40,8 @@
31877 #include "nes.h"
31878 #include "nes_mgt.h"
31879
31880 -atomic_t pau_qps_created;
31881 -atomic_t pau_qps_destroyed;
31882 +atomic_unchecked_t pau_qps_created;
31883 +atomic_unchecked_t pau_qps_destroyed;
31884
31885 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
31886 {
31887 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
31888 {
31889 struct sk_buff *skb;
31890 unsigned long flags;
31891 - atomic_inc(&pau_qps_destroyed);
31892 + atomic_inc_unchecked(&pau_qps_destroyed);
31893
31894 /* Free packets that have not yet been forwarded */
31895 /* Lock is acquired by skb_dequeue when removing the skb */
31896 @@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
31897 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
31898 skb_queue_head_init(&nesqp->pau_list);
31899 spin_lock_init(&nesqp->pau_lock);
31900 - atomic_inc(&pau_qps_created);
31901 + atomic_inc_unchecked(&pau_qps_created);
31902 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
31903 }
31904
31905 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
31906 index c00d2f3..8834298 100644
31907 --- a/drivers/infiniband/hw/nes/nes_nic.c
31908 +++ b/drivers/infiniband/hw/nes/nes_nic.c
31909 @@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
31910 target_stat_values[++index] = mh_detected;
31911 target_stat_values[++index] = mh_pauses_sent;
31912 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
31913 - target_stat_values[++index] = atomic_read(&cm_connects);
31914 - target_stat_values[++index] = atomic_read(&cm_accepts);
31915 - target_stat_values[++index] = atomic_read(&cm_disconnects);
31916 - target_stat_values[++index] = atomic_read(&cm_connecteds);
31917 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
31918 - target_stat_values[++index] = atomic_read(&cm_rejects);
31919 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
31920 - target_stat_values[++index] = atomic_read(&qps_created);
31921 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
31922 - target_stat_values[++index] = atomic_read(&qps_destroyed);
31923 - target_stat_values[++index] = atomic_read(&cm_closes);
31924 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
31925 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
31926 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
31927 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
31928 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
31929 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
31930 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
31931 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
31932 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
31933 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
31934 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
31935 target_stat_values[++index] = cm_packets_sent;
31936 target_stat_values[++index] = cm_packets_bounced;
31937 target_stat_values[++index] = cm_packets_created;
31938 target_stat_values[++index] = cm_packets_received;
31939 target_stat_values[++index] = cm_packets_dropped;
31940 target_stat_values[++index] = cm_packets_retrans;
31941 - target_stat_values[++index] = atomic_read(&cm_listens_created);
31942 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
31943 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
31944 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
31945 target_stat_values[++index] = cm_backlog_drops;
31946 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
31947 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
31948 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
31949 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
31950 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
31951 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
31952 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
31953 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
31954 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
31955 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
31956 target_stat_values[++index] = nesadapter->free_4kpbl;
31957 target_stat_values[++index] = nesadapter->free_256pbl;
31958 target_stat_values[++index] = int_mod_timer_init;
31959 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
31960 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
31961 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
31962 - target_stat_values[++index] = atomic_read(&pau_qps_created);
31963 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
31964 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
31965 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
31966 }
31967
31968 /**
31969 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
31970 index 5095bc4..41e8fff 100644
31971 --- a/drivers/infiniband/hw/nes/nes_verbs.c
31972 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
31973 @@ -46,9 +46,9 @@
31974
31975 #include <rdma/ib_umem.h>
31976
31977 -atomic_t mod_qp_timouts;
31978 -atomic_t qps_created;
31979 -atomic_t sw_qps_destroyed;
31980 +atomic_unchecked_t mod_qp_timouts;
31981 +atomic_unchecked_t qps_created;
31982 +atomic_unchecked_t sw_qps_destroyed;
31983
31984 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
31985
31986 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
31987 if (init_attr->create_flags)
31988 return ERR_PTR(-EINVAL);
31989
31990 - atomic_inc(&qps_created);
31991 + atomic_inc_unchecked(&qps_created);
31992 switch (init_attr->qp_type) {
31993 case IB_QPT_RC:
31994 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
31995 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
31996 struct iw_cm_event cm_event;
31997 int ret = 0;
31998
31999 - atomic_inc(&sw_qps_destroyed);
32000 + atomic_inc_unchecked(&sw_qps_destroyed);
32001 nesqp->destroyed = 1;
32002
32003 /* Blow away the connection if it exists. */
32004 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
32005 index b881bdc..c2e360c 100644
32006 --- a/drivers/infiniband/hw/qib/qib.h
32007 +++ b/drivers/infiniband/hw/qib/qib.h
32008 @@ -51,6 +51,7 @@
32009 #include <linux/completion.h>
32010 #include <linux/kref.h>
32011 #include <linux/sched.h>
32012 +#include <linux/slab.h>
32013
32014 #include "qib_common.h"
32015 #include "qib_verbs.h"
32016 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
32017 index c351aa4..e6967c2 100644
32018 --- a/drivers/input/gameport/gameport.c
32019 +++ b/drivers/input/gameport/gameport.c
32020 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
32021 */
32022 static void gameport_init_port(struct gameport *gameport)
32023 {
32024 - static atomic_t gameport_no = ATOMIC_INIT(0);
32025 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
32026
32027 __module_get(THIS_MODULE);
32028
32029 mutex_init(&gameport->drv_mutex);
32030 device_initialize(&gameport->dev);
32031 dev_set_name(&gameport->dev, "gameport%lu",
32032 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
32033 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
32034 gameport->dev.bus = &gameport_bus;
32035 gameport->dev.release = gameport_release_port;
32036 if (gameport->parent)
32037 diff --git a/drivers/input/input.c b/drivers/input/input.c
32038 index da38d97..2aa0b79 100644
32039 --- a/drivers/input/input.c
32040 +++ b/drivers/input/input.c
32041 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
32042 */
32043 int input_register_device(struct input_dev *dev)
32044 {
32045 - static atomic_t input_no = ATOMIC_INIT(0);
32046 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
32047 struct input_handler *handler;
32048 const char *path;
32049 int error;
32050 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
32051 dev->setkeycode = input_default_setkeycode;
32052
32053 dev_set_name(&dev->dev, "input%ld",
32054 - (unsigned long) atomic_inc_return(&input_no) - 1);
32055 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
32056
32057 error = device_add(&dev->dev);
32058 if (error)
32059 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
32060 index b8d8611..7a4a04b 100644
32061 --- a/drivers/input/joystick/sidewinder.c
32062 +++ b/drivers/input/joystick/sidewinder.c
32063 @@ -30,6 +30,7 @@
32064 #include <linux/kernel.h>
32065 #include <linux/module.h>
32066 #include <linux/slab.h>
32067 +#include <linux/sched.h>
32068 #include <linux/init.h>
32069 #include <linux/input.h>
32070 #include <linux/gameport.h>
32071 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
32072 index d728875..844c89b 100644
32073 --- a/drivers/input/joystick/xpad.c
32074 +++ b/drivers/input/joystick/xpad.c
32075 @@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
32076
32077 static int xpad_led_probe(struct usb_xpad *xpad)
32078 {
32079 - static atomic_t led_seq = ATOMIC_INIT(0);
32080 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
32081 long led_no;
32082 struct xpad_led *led;
32083 struct led_classdev *led_cdev;
32084 @@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
32085 if (!led)
32086 return -ENOMEM;
32087
32088 - led_no = (long)atomic_inc_return(&led_seq) - 1;
32089 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
32090
32091 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
32092 led->xpad = xpad;
32093 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
32094 index 0110b5a..d3ad144 100644
32095 --- a/drivers/input/mousedev.c
32096 +++ b/drivers/input/mousedev.c
32097 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
32098
32099 spin_unlock_irq(&client->packet_lock);
32100
32101 - if (copy_to_user(buffer, data, count))
32102 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
32103 return -EFAULT;
32104
32105 return count;
32106 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
32107 index ba70058..571d25d 100644
32108 --- a/drivers/input/serio/serio.c
32109 +++ b/drivers/input/serio/serio.c
32110 @@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
32111 */
32112 static void serio_init_port(struct serio *serio)
32113 {
32114 - static atomic_t serio_no = ATOMIC_INIT(0);
32115 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
32116
32117 __module_get(THIS_MODULE);
32118
32119 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
32120 mutex_init(&serio->drv_mutex);
32121 device_initialize(&serio->dev);
32122 dev_set_name(&serio->dev, "serio%ld",
32123 - (long)atomic_inc_return(&serio_no) - 1);
32124 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
32125 serio->dev.bus = &serio_bus;
32126 serio->dev.release = serio_release_port;
32127 serio->dev.groups = serio_device_attr_groups;
32128 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
32129 index e44933d..9ba484a 100644
32130 --- a/drivers/isdn/capi/capi.c
32131 +++ b/drivers/isdn/capi/capi.c
32132 @@ -83,8 +83,8 @@ struct capiminor {
32133
32134 struct capi20_appl *ap;
32135 u32 ncci;
32136 - atomic_t datahandle;
32137 - atomic_t msgid;
32138 + atomic_unchecked_t datahandle;
32139 + atomic_unchecked_t msgid;
32140
32141 struct tty_port port;
32142 int ttyinstop;
32143 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
32144 capimsg_setu16(s, 2, mp->ap->applid);
32145 capimsg_setu8 (s, 4, CAPI_DATA_B3);
32146 capimsg_setu8 (s, 5, CAPI_RESP);
32147 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
32148 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
32149 capimsg_setu32(s, 8, mp->ncci);
32150 capimsg_setu16(s, 12, datahandle);
32151 }
32152 @@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
32153 mp->outbytes -= len;
32154 spin_unlock_bh(&mp->outlock);
32155
32156 - datahandle = atomic_inc_return(&mp->datahandle);
32157 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
32158 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
32159 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32160 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32161 capimsg_setu16(skb->data, 2, mp->ap->applid);
32162 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
32163 capimsg_setu8 (skb->data, 5, CAPI_REQ);
32164 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
32165 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
32166 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
32167 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
32168 capimsg_setu16(skb->data, 16, len); /* Data length */
32169 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
32170 index db621db..825ea1a 100644
32171 --- a/drivers/isdn/gigaset/common.c
32172 +++ b/drivers/isdn/gigaset/common.c
32173 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
32174 cs->commands_pending = 0;
32175 cs->cur_at_seq = 0;
32176 cs->gotfwver = -1;
32177 - cs->open_count = 0;
32178 + local_set(&cs->open_count, 0);
32179 cs->dev = NULL;
32180 cs->tty = NULL;
32181 cs->tty_dev = NULL;
32182 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
32183 index 212efaf..f187c6b 100644
32184 --- a/drivers/isdn/gigaset/gigaset.h
32185 +++ b/drivers/isdn/gigaset/gigaset.h
32186 @@ -35,6 +35,7 @@
32187 #include <linux/tty_driver.h>
32188 #include <linux/list.h>
32189 #include <linux/atomic.h>
32190 +#include <asm/local.h>
32191
32192 #define GIG_VERSION {0, 5, 0, 0}
32193 #define GIG_COMPAT {0, 4, 0, 0}
32194 @@ -433,7 +434,7 @@ struct cardstate {
32195 spinlock_t cmdlock;
32196 unsigned curlen, cmdbytes;
32197
32198 - unsigned open_count;
32199 + local_t open_count;
32200 struct tty_struct *tty;
32201 struct tasklet_struct if_wake_tasklet;
32202 unsigned control_state;
32203 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
32204 index ee0a549..a7c9798 100644
32205 --- a/drivers/isdn/gigaset/interface.c
32206 +++ b/drivers/isdn/gigaset/interface.c
32207 @@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
32208 }
32209 tty->driver_data = cs;
32210
32211 - ++cs->open_count;
32212 -
32213 - if (cs->open_count == 1) {
32214 + if (local_inc_return(&cs->open_count) == 1) {
32215 spin_lock_irqsave(&cs->lock, flags);
32216 cs->tty = tty;
32217 spin_unlock_irqrestore(&cs->lock, flags);
32218 @@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
32219
32220 if (!cs->connected)
32221 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32222 - else if (!cs->open_count)
32223 + else if (!local_read(&cs->open_count))
32224 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32225 else {
32226 - if (!--cs->open_count) {
32227 + if (!local_dec_return(&cs->open_count)) {
32228 spin_lock_irqsave(&cs->lock, flags);
32229 cs->tty = NULL;
32230 spin_unlock_irqrestore(&cs->lock, flags);
32231 @@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
32232 if (!cs->connected) {
32233 gig_dbg(DEBUG_IF, "not connected");
32234 retval = -ENODEV;
32235 - } else if (!cs->open_count)
32236 + } else if (!local_read(&cs->open_count))
32237 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32238 else {
32239 retval = 0;
32240 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
32241 retval = -ENODEV;
32242 goto done;
32243 }
32244 - if (!cs->open_count) {
32245 + if (!local_read(&cs->open_count)) {
32246 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32247 retval = -ENODEV;
32248 goto done;
32249 @@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
32250 if (!cs->connected) {
32251 gig_dbg(DEBUG_IF, "not connected");
32252 retval = -ENODEV;
32253 - } else if (!cs->open_count)
32254 + } else if (!local_read(&cs->open_count))
32255 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32256 else if (cs->mstate != MS_LOCKED) {
32257 dev_warn(cs->dev, "can't write to unlocked device\n");
32258 @@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
32259
32260 if (!cs->connected)
32261 gig_dbg(DEBUG_IF, "not connected");
32262 - else if (!cs->open_count)
32263 + else if (!local_read(&cs->open_count))
32264 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32265 else if (cs->mstate != MS_LOCKED)
32266 dev_warn(cs->dev, "can't write to unlocked device\n");
32267 @@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
32268
32269 if (!cs->connected)
32270 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32271 - else if (!cs->open_count)
32272 + else if (!local_read(&cs->open_count))
32273 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32274 else
32275 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
32276 @@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
32277
32278 if (!cs->connected)
32279 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32280 - else if (!cs->open_count)
32281 + else if (!local_read(&cs->open_count))
32282 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32283 else
32284 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
32285 @@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
32286 goto out;
32287 }
32288
32289 - if (!cs->open_count) {
32290 + if (!local_read(&cs->open_count)) {
32291 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32292 goto out;
32293 }
32294 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
32295 index 2a57da59..e7a12ed 100644
32296 --- a/drivers/isdn/hardware/avm/b1.c
32297 +++ b/drivers/isdn/hardware/avm/b1.c
32298 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
32299 }
32300 if (left) {
32301 if (t4file->user) {
32302 - if (copy_from_user(buf, dp, left))
32303 + if (left > sizeof buf || copy_from_user(buf, dp, left))
32304 return -EFAULT;
32305 } else {
32306 memcpy(buf, dp, left);
32307 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
32308 }
32309 if (left) {
32310 if (config->user) {
32311 - if (copy_from_user(buf, dp, left))
32312 + if (left > sizeof buf || copy_from_user(buf, dp, left))
32313 return -EFAULT;
32314 } else {
32315 memcpy(buf, dp, left);
32316 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
32317 index 85784a7..a19ca98 100644
32318 --- a/drivers/isdn/hardware/eicon/divasync.h
32319 +++ b/drivers/isdn/hardware/eicon/divasync.h
32320 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
32321 } diva_didd_add_adapter_t;
32322 typedef struct _diva_didd_remove_adapter {
32323 IDI_CALL p_request;
32324 -} diva_didd_remove_adapter_t;
32325 +} __no_const diva_didd_remove_adapter_t;
32326 typedef struct _diva_didd_read_adapter_array {
32327 void * buffer;
32328 dword length;
32329 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
32330 index a3bd163..8956575 100644
32331 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
32332 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
32333 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
32334 typedef struct _diva_os_idi_adapter_interface {
32335 diva_init_card_proc_t cleanup_adapter_proc;
32336 diva_cmd_card_proc_t cmd_proc;
32337 -} diva_os_idi_adapter_interface_t;
32338 +} __no_const diva_os_idi_adapter_interface_t;
32339
32340 typedef struct _diva_os_xdi_adapter {
32341 struct list_head link;
32342 diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
32343 index 2339d73..802ab87 100644
32344 --- a/drivers/isdn/i4l/isdn_net.c
32345 +++ b/drivers/isdn/i4l/isdn_net.c
32346 @@ -1901,7 +1901,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
32347 {
32348 isdn_net_local *lp = netdev_priv(dev);
32349 unsigned char *p;
32350 - ushort len = 0;
32351 + int len = 0;
32352
32353 switch (lp->p_encap) {
32354 case ISDN_NET_ENCAP_ETHER:
32355 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
32356 index 1f355bb..43f1fea 100644
32357 --- a/drivers/isdn/icn/icn.c
32358 +++ b/drivers/isdn/icn/icn.c
32359 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
32360 if (count > len)
32361 count = len;
32362 if (user) {
32363 - if (copy_from_user(msg, buf, count))
32364 + if (count > sizeof msg || copy_from_user(msg, buf, count))
32365 return -EFAULT;
32366 } else
32367 memcpy(msg, buf, count);
32368 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
32369 index b5fdcb7..5b6c59f 100644
32370 --- a/drivers/lguest/core.c
32371 +++ b/drivers/lguest/core.c
32372 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
32373 * it's worked so far. The end address needs +1 because __get_vm_area
32374 * allocates an extra guard page, so we need space for that.
32375 */
32376 +
32377 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32378 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32379 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
32380 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32381 +#else
32382 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32383 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
32384 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32385 +#endif
32386 +
32387 if (!switcher_vma) {
32388 err = -ENOMEM;
32389 printk("lguest: could not map switcher pages high\n");
32390 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
32391 * Now the Switcher is mapped at the right address, we can't fail!
32392 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
32393 */
32394 - memcpy(switcher_vma->addr, start_switcher_text,
32395 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
32396 end_switcher_text - start_switcher_text);
32397
32398 printk(KERN_INFO "lguest: mapped switcher at %p\n",
32399 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
32400 index 65af42f..530c87a 100644
32401 --- a/drivers/lguest/x86/core.c
32402 +++ b/drivers/lguest/x86/core.c
32403 @@ -59,7 +59,7 @@ static struct {
32404 /* Offset from where switcher.S was compiled to where we've copied it */
32405 static unsigned long switcher_offset(void)
32406 {
32407 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
32408 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
32409 }
32410
32411 /* This cpu's struct lguest_pages. */
32412 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
32413 * These copies are pretty cheap, so we do them unconditionally: */
32414 /* Save the current Host top-level page directory.
32415 */
32416 +
32417 +#ifdef CONFIG_PAX_PER_CPU_PGD
32418 + pages->state.host_cr3 = read_cr3();
32419 +#else
32420 pages->state.host_cr3 = __pa(current->mm->pgd);
32421 +#endif
32422 +
32423 /*
32424 * Set up the Guest's page tables to see this CPU's pages (and no
32425 * other CPU's pages).
32426 @@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
32427 * compiled-in switcher code and the high-mapped copy we just made.
32428 */
32429 for (i = 0; i < IDT_ENTRIES; i++)
32430 - default_idt_entries[i] += switcher_offset();
32431 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
32432
32433 /*
32434 * Set up the Switcher's per-cpu areas.
32435 @@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
32436 * it will be undisturbed when we switch. To change %cs and jump we
32437 * need this structure to feed to Intel's "lcall" instruction.
32438 */
32439 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
32440 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
32441 lguest_entry.segment = LGUEST_CS;
32442
32443 /*
32444 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
32445 index 40634b0..4f5855e 100644
32446 --- a/drivers/lguest/x86/switcher_32.S
32447 +++ b/drivers/lguest/x86/switcher_32.S
32448 @@ -87,6 +87,7 @@
32449 #include <asm/page.h>
32450 #include <asm/segment.h>
32451 #include <asm/lguest.h>
32452 +#include <asm/processor-flags.h>
32453
32454 // We mark the start of the code to copy
32455 // It's placed in .text tho it's never run here
32456 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
32457 // Changes type when we load it: damn Intel!
32458 // For after we switch over our page tables
32459 // That entry will be read-only: we'd crash.
32460 +
32461 +#ifdef CONFIG_PAX_KERNEXEC
32462 + mov %cr0, %edx
32463 + xor $X86_CR0_WP, %edx
32464 + mov %edx, %cr0
32465 +#endif
32466 +
32467 movl $(GDT_ENTRY_TSS*8), %edx
32468 ltr %dx
32469
32470 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
32471 // Let's clear it again for our return.
32472 // The GDT descriptor of the Host
32473 // Points to the table after two "size" bytes
32474 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
32475 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
32476 // Clear "used" from type field (byte 5, bit 2)
32477 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
32478 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
32479 +
32480 +#ifdef CONFIG_PAX_KERNEXEC
32481 + mov %cr0, %eax
32482 + xor $X86_CR0_WP, %eax
32483 + mov %eax, %cr0
32484 +#endif
32485
32486 // Once our page table's switched, the Guest is live!
32487 // The Host fades as we run this final step.
32488 @@ -295,13 +309,12 @@ deliver_to_host:
32489 // I consulted gcc, and it gave
32490 // These instructions, which I gladly credit:
32491 leal (%edx,%ebx,8), %eax
32492 - movzwl (%eax),%edx
32493 - movl 4(%eax), %eax
32494 - xorw %ax, %ax
32495 - orl %eax, %edx
32496 + movl 4(%eax), %edx
32497 + movw (%eax), %dx
32498 // Now the address of the handler's in %edx
32499 // We call it now: its "iret" drops us home.
32500 - jmp *%edx
32501 + ljmp $__KERNEL_CS, $1f
32502 +1: jmp *%edx
32503
32504 // Every interrupt can come to us here
32505 // But we must truly tell each apart.
32506 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
32507 index 4daf9e5..b8d1d0f 100644
32508 --- a/drivers/macintosh/macio_asic.c
32509 +++ b/drivers/macintosh/macio_asic.c
32510 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
32511 * MacIO is matched against any Apple ID, it's probe() function
32512 * will then decide wether it applies or not
32513 */
32514 -static const struct pci_device_id __devinitdata pci_ids [] = { {
32515 +static const struct pci_device_id __devinitconst pci_ids [] = { {
32516 .vendor = PCI_VENDOR_ID_APPLE,
32517 .device = PCI_ANY_ID,
32518 .subvendor = PCI_ANY_ID,
32519 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
32520 index 1ce84ed..0fdd40a 100644
32521 --- a/drivers/md/dm-ioctl.c
32522 +++ b/drivers/md/dm-ioctl.c
32523 @@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
32524 cmd == DM_LIST_VERSIONS_CMD)
32525 return 0;
32526
32527 - if ((cmd == DM_DEV_CREATE_CMD)) {
32528 + if (cmd == DM_DEV_CREATE_CMD) {
32529 if (!*param->name) {
32530 DMWARN("name not supplied when creating device");
32531 return -EINVAL;
32532 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
32533 index 9bfd057..01180bc 100644
32534 --- a/drivers/md/dm-raid1.c
32535 +++ b/drivers/md/dm-raid1.c
32536 @@ -40,7 +40,7 @@ enum dm_raid1_error {
32537
32538 struct mirror {
32539 struct mirror_set *ms;
32540 - atomic_t error_count;
32541 + atomic_unchecked_t error_count;
32542 unsigned long error_type;
32543 struct dm_dev *dev;
32544 sector_t offset;
32545 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
32546 struct mirror *m;
32547
32548 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
32549 - if (!atomic_read(&m->error_count))
32550 + if (!atomic_read_unchecked(&m->error_count))
32551 return m;
32552
32553 return NULL;
32554 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
32555 * simple way to tell if a device has encountered
32556 * errors.
32557 */
32558 - atomic_inc(&m->error_count);
32559 + atomic_inc_unchecked(&m->error_count);
32560
32561 if (test_and_set_bit(error_type, &m->error_type))
32562 return;
32563 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
32564 struct mirror *m = get_default_mirror(ms);
32565
32566 do {
32567 - if (likely(!atomic_read(&m->error_count)))
32568 + if (likely(!atomic_read_unchecked(&m->error_count)))
32569 return m;
32570
32571 if (m-- == ms->mirror)
32572 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
32573 {
32574 struct mirror *default_mirror = get_default_mirror(m->ms);
32575
32576 - return !atomic_read(&default_mirror->error_count);
32577 + return !atomic_read_unchecked(&default_mirror->error_count);
32578 }
32579
32580 static int mirror_available(struct mirror_set *ms, struct bio *bio)
32581 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
32582 */
32583 if (likely(region_in_sync(ms, region, 1)))
32584 m = choose_mirror(ms, bio->bi_sector);
32585 - else if (m && atomic_read(&m->error_count))
32586 + else if (m && atomic_read_unchecked(&m->error_count))
32587 m = NULL;
32588
32589 if (likely(m))
32590 @@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
32591 }
32592
32593 ms->mirror[mirror].ms = ms;
32594 - atomic_set(&(ms->mirror[mirror].error_count), 0);
32595 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
32596 ms->mirror[mirror].error_type = 0;
32597 ms->mirror[mirror].offset = offset;
32598
32599 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
32600 */
32601 static char device_status_char(struct mirror *m)
32602 {
32603 - if (!atomic_read(&(m->error_count)))
32604 + if (!atomic_read_unchecked(&(m->error_count)))
32605 return 'A';
32606
32607 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
32608 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
32609 index 3d80cf0..b77cc47 100644
32610 --- a/drivers/md/dm-stripe.c
32611 +++ b/drivers/md/dm-stripe.c
32612 @@ -20,7 +20,7 @@ struct stripe {
32613 struct dm_dev *dev;
32614 sector_t physical_start;
32615
32616 - atomic_t error_count;
32617 + atomic_unchecked_t error_count;
32618 };
32619
32620 struct stripe_c {
32621 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
32622 kfree(sc);
32623 return r;
32624 }
32625 - atomic_set(&(sc->stripe[i].error_count), 0);
32626 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
32627 }
32628
32629 ti->private = sc;
32630 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti,
32631 DMEMIT("%d ", sc->stripes);
32632 for (i = 0; i < sc->stripes; i++) {
32633 DMEMIT("%s ", sc->stripe[i].dev->name);
32634 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
32635 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
32636 'D' : 'A';
32637 }
32638 buffer[i] = '\0';
32639 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
32640 */
32641 for (i = 0; i < sc->stripes; i++)
32642 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
32643 - atomic_inc(&(sc->stripe[i].error_count));
32644 - if (atomic_read(&(sc->stripe[i].error_count)) <
32645 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
32646 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
32647 DM_IO_ERROR_THRESHOLD)
32648 schedule_work(&sc->trigger_event);
32649 }
32650 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
32651 index 8e91321..fd17aef 100644
32652 --- a/drivers/md/dm-table.c
32653 +++ b/drivers/md/dm-table.c
32654 @@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
32655 if (!dev_size)
32656 return 0;
32657
32658 - if ((start >= dev_size) || (start + len > dev_size)) {
32659 + if ((start >= dev_size) || (len > dev_size - start)) {
32660 DMWARN("%s: %s too small for target: "
32661 "start=%llu, len=%llu, dev_size=%llu",
32662 dm_device_name(ti->table->md), bdevname(bdev, b),
32663 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
32664 index 237571a..fb6d19b 100644
32665 --- a/drivers/md/dm-thin-metadata.c
32666 +++ b/drivers/md/dm-thin-metadata.c
32667 @@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
32668
32669 pmd->info.tm = tm;
32670 pmd->info.levels = 2;
32671 - pmd->info.value_type.context = pmd->data_sm;
32672 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
32673 pmd->info.value_type.size = sizeof(__le64);
32674 pmd->info.value_type.inc = data_block_inc;
32675 pmd->info.value_type.dec = data_block_dec;
32676 @@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
32677
32678 pmd->bl_info.tm = tm;
32679 pmd->bl_info.levels = 1;
32680 - pmd->bl_info.value_type.context = pmd->data_sm;
32681 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
32682 pmd->bl_info.value_type.size = sizeof(__le64);
32683 pmd->bl_info.value_type.inc = data_block_inc;
32684 pmd->bl_info.value_type.dec = data_block_dec;
32685 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
32686 index 4720f68..78d1df7 100644
32687 --- a/drivers/md/dm.c
32688 +++ b/drivers/md/dm.c
32689 @@ -177,9 +177,9 @@ struct mapped_device {
32690 /*
32691 * Event handling.
32692 */
32693 - atomic_t event_nr;
32694 + atomic_unchecked_t event_nr;
32695 wait_queue_head_t eventq;
32696 - atomic_t uevent_seq;
32697 + atomic_unchecked_t uevent_seq;
32698 struct list_head uevent_list;
32699 spinlock_t uevent_lock; /* Protect access to uevent_list */
32700
32701 @@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
32702 rwlock_init(&md->map_lock);
32703 atomic_set(&md->holders, 1);
32704 atomic_set(&md->open_count, 0);
32705 - atomic_set(&md->event_nr, 0);
32706 - atomic_set(&md->uevent_seq, 0);
32707 + atomic_set_unchecked(&md->event_nr, 0);
32708 + atomic_set_unchecked(&md->uevent_seq, 0);
32709 INIT_LIST_HEAD(&md->uevent_list);
32710 spin_lock_init(&md->uevent_lock);
32711
32712 @@ -1980,7 +1980,7 @@ static void event_callback(void *context)
32713
32714 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
32715
32716 - atomic_inc(&md->event_nr);
32717 + atomic_inc_unchecked(&md->event_nr);
32718 wake_up(&md->eventq);
32719 }
32720
32721 @@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
32722
32723 uint32_t dm_next_uevent_seq(struct mapped_device *md)
32724 {
32725 - return atomic_add_return(1, &md->uevent_seq);
32726 + return atomic_add_return_unchecked(1, &md->uevent_seq);
32727 }
32728
32729 uint32_t dm_get_event_nr(struct mapped_device *md)
32730 {
32731 - return atomic_read(&md->event_nr);
32732 + return atomic_read_unchecked(&md->event_nr);
32733 }
32734
32735 int dm_wait_event(struct mapped_device *md, int event_nr)
32736 {
32737 return wait_event_interruptible(md->eventq,
32738 - (event_nr != atomic_read(&md->event_nr)));
32739 + (event_nr != atomic_read_unchecked(&md->event_nr)));
32740 }
32741
32742 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
32743 diff --git a/drivers/md/md.c b/drivers/md/md.c
32744 index f47f1f8..b7f559e 100644
32745 --- a/drivers/md/md.c
32746 +++ b/drivers/md/md.c
32747 @@ -278,10 +278,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
32748 * start build, activate spare
32749 */
32750 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
32751 -static atomic_t md_event_count;
32752 +static atomic_unchecked_t md_event_count;
32753 void md_new_event(struct mddev *mddev)
32754 {
32755 - atomic_inc(&md_event_count);
32756 + atomic_inc_unchecked(&md_event_count);
32757 wake_up(&md_event_waiters);
32758 }
32759 EXPORT_SYMBOL_GPL(md_new_event);
32760 @@ -291,7 +291,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
32761 */
32762 static void md_new_event_inintr(struct mddev *mddev)
32763 {
32764 - atomic_inc(&md_event_count);
32765 + atomic_inc_unchecked(&md_event_count);
32766 wake_up(&md_event_waiters);
32767 }
32768
32769 @@ -1525,7 +1525,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
32770
32771 rdev->preferred_minor = 0xffff;
32772 rdev->data_offset = le64_to_cpu(sb->data_offset);
32773 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
32774 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
32775
32776 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
32777 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
32778 @@ -1742,7 +1742,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
32779 else
32780 sb->resync_offset = cpu_to_le64(0);
32781
32782 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
32783 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
32784
32785 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
32786 sb->size = cpu_to_le64(mddev->dev_sectors);
32787 @@ -2639,7 +2639,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
32788 static ssize_t
32789 errors_show(struct md_rdev *rdev, char *page)
32790 {
32791 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
32792 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
32793 }
32794
32795 static ssize_t
32796 @@ -2648,7 +2648,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
32797 char *e;
32798 unsigned long n = simple_strtoul(buf, &e, 10);
32799 if (*buf && (*e == 0 || *e == '\n')) {
32800 - atomic_set(&rdev->corrected_errors, n);
32801 + atomic_set_unchecked(&rdev->corrected_errors, n);
32802 return len;
32803 }
32804 return -EINVAL;
32805 @@ -3039,8 +3039,8 @@ int md_rdev_init(struct md_rdev *rdev)
32806 rdev->sb_loaded = 0;
32807 rdev->bb_page = NULL;
32808 atomic_set(&rdev->nr_pending, 0);
32809 - atomic_set(&rdev->read_errors, 0);
32810 - atomic_set(&rdev->corrected_errors, 0);
32811 + atomic_set_unchecked(&rdev->read_errors, 0);
32812 + atomic_set_unchecked(&rdev->corrected_errors, 0);
32813
32814 INIT_LIST_HEAD(&rdev->same_set);
32815 init_waitqueue_head(&rdev->blocked_wait);
32816 @@ -6683,7 +6683,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
32817
32818 spin_unlock(&pers_lock);
32819 seq_printf(seq, "\n");
32820 - seq->poll_event = atomic_read(&md_event_count);
32821 + seq->poll_event = atomic_read_unchecked(&md_event_count);
32822 return 0;
32823 }
32824 if (v == (void*)2) {
32825 @@ -6772,7 +6772,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
32826 chunk_kb ? "KB" : "B");
32827 if (bitmap->file) {
32828 seq_printf(seq, ", file: ");
32829 - seq_path(seq, &bitmap->file->f_path, " \t\n");
32830 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
32831 }
32832
32833 seq_printf(seq, "\n");
32834 @@ -6803,7 +6803,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
32835 return error;
32836
32837 seq = file->private_data;
32838 - seq->poll_event = atomic_read(&md_event_count);
32839 + seq->poll_event = atomic_read_unchecked(&md_event_count);
32840 return error;
32841 }
32842
32843 @@ -6817,7 +6817,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
32844 /* always allow read */
32845 mask = POLLIN | POLLRDNORM;
32846
32847 - if (seq->poll_event != atomic_read(&md_event_count))
32848 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
32849 mask |= POLLERR | POLLPRI;
32850 return mask;
32851 }
32852 @@ -6861,7 +6861,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
32853 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
32854 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
32855 (int)part_stat_read(&disk->part0, sectors[1]) -
32856 - atomic_read(&disk->sync_io);
32857 + atomic_read_unchecked(&disk->sync_io);
32858 /* sync IO will cause sync_io to increase before the disk_stats
32859 * as sync_io is counted when a request starts, and
32860 * disk_stats is counted when it completes.
32861 diff --git a/drivers/md/md.h b/drivers/md/md.h
32862 index cf742d9..7c7c745 100644
32863 --- a/drivers/md/md.h
32864 +++ b/drivers/md/md.h
32865 @@ -120,13 +120,13 @@ struct md_rdev {
32866 * only maintained for arrays that
32867 * support hot removal
32868 */
32869 - atomic_t read_errors; /* number of consecutive read errors that
32870 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
32871 * we have tried to ignore.
32872 */
32873 struct timespec last_read_error; /* monotonic time since our
32874 * last read error
32875 */
32876 - atomic_t corrected_errors; /* number of corrected read errors,
32877 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
32878 * for reporting to userspace and storing
32879 * in superblock.
32880 */
32881 @@ -410,7 +410,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
32882
32883 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
32884 {
32885 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32886 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32887 }
32888
32889 struct md_personality
32890 diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
32891 index 50ed53b..4f29d7d 100644
32892 --- a/drivers/md/persistent-data/dm-space-map-checker.c
32893 +++ b/drivers/md/persistent-data/dm-space-map-checker.c
32894 @@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
32895 /*----------------------------------------------------------------*/
32896
32897 struct sm_checker {
32898 - struct dm_space_map sm;
32899 + dm_space_map_no_const sm;
32900
32901 struct count_array old_counts;
32902 struct count_array counts;
32903 diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
32904 index fc469ba..2d91555 100644
32905 --- a/drivers/md/persistent-data/dm-space-map-disk.c
32906 +++ b/drivers/md/persistent-data/dm-space-map-disk.c
32907 @@ -23,7 +23,7 @@
32908 * Space map interface.
32909 */
32910 struct sm_disk {
32911 - struct dm_space_map sm;
32912 + dm_space_map_no_const sm;
32913
32914 struct ll_disk ll;
32915 struct ll_disk old_ll;
32916 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
32917 index e89ae5e..062e4c2 100644
32918 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
32919 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
32920 @@ -43,7 +43,7 @@ struct block_op {
32921 };
32922
32923 struct sm_metadata {
32924 - struct dm_space_map sm;
32925 + dm_space_map_no_const sm;
32926
32927 struct ll_disk ll;
32928 struct ll_disk old_ll;
32929 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
32930 index 1cbfc6b..56e1dbb 100644
32931 --- a/drivers/md/persistent-data/dm-space-map.h
32932 +++ b/drivers/md/persistent-data/dm-space-map.h
32933 @@ -60,6 +60,7 @@ struct dm_space_map {
32934 int (*root_size)(struct dm_space_map *sm, size_t *result);
32935 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
32936 };
32937 +typedef struct dm_space_map __no_const dm_space_map_no_const;
32938
32939 /*----------------------------------------------------------------*/
32940
32941 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
32942 index 7d9e071..015b1d5 100644
32943 --- a/drivers/md/raid1.c
32944 +++ b/drivers/md/raid1.c
32945 @@ -1568,7 +1568,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
32946 if (r1_sync_page_io(rdev, sect, s,
32947 bio->bi_io_vec[idx].bv_page,
32948 READ) != 0)
32949 - atomic_add(s, &rdev->corrected_errors);
32950 + atomic_add_unchecked(s, &rdev->corrected_errors);
32951 }
32952 sectors -= s;
32953 sect += s;
32954 @@ -1781,7 +1781,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
32955 test_bit(In_sync, &rdev->flags)) {
32956 if (r1_sync_page_io(rdev, sect, s,
32957 conf->tmppage, READ)) {
32958 - atomic_add(s, &rdev->corrected_errors);
32959 + atomic_add_unchecked(s, &rdev->corrected_errors);
32960 printk(KERN_INFO
32961 "md/raid1:%s: read error corrected "
32962 "(%d sectors at %llu on %s)\n",
32963 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
32964 index 685ddf3..955b087 100644
32965 --- a/drivers/md/raid10.c
32966 +++ b/drivers/md/raid10.c
32967 @@ -1440,7 +1440,7 @@ static void end_sync_read(struct bio *bio, int error)
32968 /* The write handler will notice the lack of
32969 * R10BIO_Uptodate and record any errors etc
32970 */
32971 - atomic_add(r10_bio->sectors,
32972 + atomic_add_unchecked(r10_bio->sectors,
32973 &conf->mirrors[d].rdev->corrected_errors);
32974
32975 /* for reconstruct, we always reschedule after a read.
32976 @@ -1740,7 +1740,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
32977 {
32978 struct timespec cur_time_mon;
32979 unsigned long hours_since_last;
32980 - unsigned int read_errors = atomic_read(&rdev->read_errors);
32981 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
32982
32983 ktime_get_ts(&cur_time_mon);
32984
32985 @@ -1762,9 +1762,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
32986 * overflowing the shift of read_errors by hours_since_last.
32987 */
32988 if (hours_since_last >= 8 * sizeof(read_errors))
32989 - atomic_set(&rdev->read_errors, 0);
32990 + atomic_set_unchecked(&rdev->read_errors, 0);
32991 else
32992 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
32993 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
32994 }
32995
32996 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
32997 @@ -1814,8 +1814,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
32998 return;
32999
33000 check_decay_read_errors(mddev, rdev);
33001 - atomic_inc(&rdev->read_errors);
33002 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
33003 + atomic_inc_unchecked(&rdev->read_errors);
33004 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
33005 char b[BDEVNAME_SIZE];
33006 bdevname(rdev->bdev, b);
33007
33008 @@ -1823,7 +1823,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33009 "md/raid10:%s: %s: Raid device exceeded "
33010 "read_error threshold [cur %d:max %d]\n",
33011 mdname(mddev), b,
33012 - atomic_read(&rdev->read_errors), max_read_errors);
33013 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
33014 printk(KERN_NOTICE
33015 "md/raid10:%s: %s: Failing raid device\n",
33016 mdname(mddev), b);
33017 @@ -1968,7 +1968,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33018 (unsigned long long)(
33019 sect + rdev->data_offset),
33020 bdevname(rdev->bdev, b));
33021 - atomic_add(s, &rdev->corrected_errors);
33022 + atomic_add_unchecked(s, &rdev->corrected_errors);
33023 }
33024
33025 rdev_dec_pending(rdev, mddev);
33026 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
33027 index 858fdbb..b2dac95 100644
33028 --- a/drivers/md/raid5.c
33029 +++ b/drivers/md/raid5.c
33030 @@ -1610,19 +1610,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
33031 (unsigned long long)(sh->sector
33032 + rdev->data_offset),
33033 bdevname(rdev->bdev, b));
33034 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
33035 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
33036 clear_bit(R5_ReadError, &sh->dev[i].flags);
33037 clear_bit(R5_ReWrite, &sh->dev[i].flags);
33038 }
33039 - if (atomic_read(&conf->disks[i].rdev->read_errors))
33040 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
33041 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
33042 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
33043 } else {
33044 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
33045 int retry = 0;
33046 rdev = conf->disks[i].rdev;
33047
33048 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
33049 - atomic_inc(&rdev->read_errors);
33050 + atomic_inc_unchecked(&rdev->read_errors);
33051 if (conf->mddev->degraded >= conf->max_degraded)
33052 printk_ratelimited(
33053 KERN_WARNING
33054 @@ -1642,7 +1642,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
33055 (unsigned long long)(sh->sector
33056 + rdev->data_offset),
33057 bdn);
33058 - else if (atomic_read(&rdev->read_errors)
33059 + else if (atomic_read_unchecked(&rdev->read_errors)
33060 > conf->max_nr_stripes)
33061 printk(KERN_WARNING
33062 "md/raid:%s: Too many read errors, failing device %s.\n",
33063 diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
33064 index ba9a643..e474ab5 100644
33065 --- a/drivers/media/dvb/ddbridge/ddbridge-core.c
33066 +++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
33067 @@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
33068 .subvendor = _subvend, .subdevice = _subdev, \
33069 .driver_data = (unsigned long)&_driverdata }
33070
33071 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
33072 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
33073 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
33074 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
33075 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
33076 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
33077 index a7d876f..8c21b61 100644
33078 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
33079 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
33080 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
33081 union {
33082 dmx_ts_cb ts;
33083 dmx_section_cb sec;
33084 - } cb;
33085 + } __no_const cb;
33086
33087 struct dvb_demux *demux;
33088 void *priv;
33089 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
33090 index f732877..d38c35a 100644
33091 --- a/drivers/media/dvb/dvb-core/dvbdev.c
33092 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
33093 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
33094 const struct dvb_device *template, void *priv, int type)
33095 {
33096 struct dvb_device *dvbdev;
33097 - struct file_operations *dvbdevfops;
33098 + file_operations_no_const *dvbdevfops;
33099 struct device *clsdev;
33100 int minor;
33101 int id;
33102 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
33103 index 9f2a02c..5920f88 100644
33104 --- a/drivers/media/dvb/dvb-usb/cxusb.c
33105 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
33106 @@ -1069,7 +1069,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
33107 struct dib0700_adapter_state {
33108 int (*set_param_save) (struct dvb_frontend *,
33109 struct dvb_frontend_parameters *);
33110 -};
33111 +} __no_const;
33112
33113 static int dib7070_set_param_override(struct dvb_frontend *fe,
33114 struct dvb_frontend_parameters *fep)
33115 diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
33116 index f103ec1..5e8968b 100644
33117 --- a/drivers/media/dvb/dvb-usb/dw2102.c
33118 +++ b/drivers/media/dvb/dvb-usb/dw2102.c
33119 @@ -95,7 +95,7 @@ struct su3000_state {
33120
33121 struct s6x0_state {
33122 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
33123 -};
33124 +} __no_const;
33125
33126 /* debug */
33127 static int dvb_usb_dw2102_debug;
33128 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
33129 index 404f63a..4796533 100644
33130 --- a/drivers/media/dvb/frontends/dib3000.h
33131 +++ b/drivers/media/dvb/frontends/dib3000.h
33132 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
33133 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
33134 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
33135 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
33136 -};
33137 +} __no_const;
33138
33139 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
33140 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
33141 diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
33142 index 90bf573..e8463da 100644
33143 --- a/drivers/media/dvb/frontends/ds3000.c
33144 +++ b/drivers/media/dvb/frontends/ds3000.c
33145 @@ -1210,7 +1210,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe,
33146
33147 for (i = 0; i < 30 ; i++) {
33148 ds3000_read_status(fe, &status);
33149 - if (status && FE_HAS_LOCK)
33150 + if (status & FE_HAS_LOCK)
33151 break;
33152
33153 msleep(10);
33154 diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
33155 index 0564192..75b16f5 100644
33156 --- a/drivers/media/dvb/ngene/ngene-cards.c
33157 +++ b/drivers/media/dvb/ngene/ngene-cards.c
33158 @@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
33159
33160 /****************************************************************************/
33161
33162 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
33163 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
33164 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
33165 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
33166 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
33167 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
33168 index 16a089f..ab1667d 100644
33169 --- a/drivers/media/radio/radio-cadet.c
33170 +++ b/drivers/media/radio/radio-cadet.c
33171 @@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33172 unsigned char readbuf[RDS_BUFFER];
33173 int i = 0;
33174
33175 + if (count > RDS_BUFFER)
33176 + return -EFAULT;
33177 mutex_lock(&dev->lock);
33178 if (dev->rdsstat == 0) {
33179 dev->rdsstat = 1;
33180 diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
33181 index 61287fc..8b08712 100644
33182 --- a/drivers/media/rc/redrat3.c
33183 +++ b/drivers/media/rc/redrat3.c
33184 @@ -905,7 +905,7 @@ static int redrat3_set_tx_carrier(struct rc_dev *dev, u32 carrier)
33185 return carrier;
33186 }
33187
33188 -static int redrat3_transmit_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
33189 +static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf, u32 n)
33190 {
33191 struct redrat3_dev *rr3 = rcdev->priv;
33192 struct device *dev = rr3->dev;
33193 diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
33194 index 9cde353..8c6a1c3 100644
33195 --- a/drivers/media/video/au0828/au0828.h
33196 +++ b/drivers/media/video/au0828/au0828.h
33197 @@ -191,7 +191,7 @@ struct au0828_dev {
33198
33199 /* I2C */
33200 struct i2c_adapter i2c_adap;
33201 - struct i2c_algorithm i2c_algo;
33202 + i2c_algorithm_no_const i2c_algo;
33203 struct i2c_client i2c_client;
33204 u32 i2c_rc;
33205
33206 diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
33207 index 68d1240..46b32eb 100644
33208 --- a/drivers/media/video/cx88/cx88-alsa.c
33209 +++ b/drivers/media/video/cx88/cx88-alsa.c
33210 @@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
33211 * Only boards with eeprom and byte 1 at eeprom=1 have it
33212 */
33213
33214 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
33215 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
33216 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33217 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33218 {0, }
33219 diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
33220 index ee0d0b3..7db1a4f 100644
33221 --- a/drivers/media/video/omap/omap_vout.c
33222 +++ b/drivers/media/video/omap/omap_vout.c
33223 @@ -64,7 +64,12 @@ enum omap_vout_channels {
33224 OMAP_VIDEO2,
33225 };
33226
33227 -static struct videobuf_queue_ops video_vbq_ops;
33228 +static struct videobuf_queue_ops video_vbq_ops = {
33229 + .buf_setup = omap_vout_buffer_setup,
33230 + .buf_prepare = omap_vout_buffer_prepare,
33231 + .buf_release = omap_vout_buffer_release,
33232 + .buf_queue = omap_vout_buffer_queue,
33233 +};
33234 /* Variables configurable through module params*/
33235 static u32 video1_numbuffers = 3;
33236 static u32 video2_numbuffers = 3;
33237 @@ -1016,10 +1021,6 @@ static int omap_vout_open(struct file *file)
33238 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
33239
33240 q = &vout->vbq;
33241 - video_vbq_ops.buf_setup = omap_vout_buffer_setup;
33242 - video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
33243 - video_vbq_ops.buf_release = omap_vout_buffer_release;
33244 - video_vbq_ops.buf_queue = omap_vout_buffer_queue;
33245 spin_lock_init(&vout->vbq_lock);
33246
33247 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
33248 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33249 index 305e6aa..0143317 100644
33250 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33251 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33252 @@ -196,7 +196,7 @@ struct pvr2_hdw {
33253
33254 /* I2C stuff */
33255 struct i2c_adapter i2c_adap;
33256 - struct i2c_algorithm i2c_algo;
33257 + i2c_algorithm_no_const i2c_algo;
33258 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
33259 int i2c_cx25840_hack_state;
33260 int i2c_linked;
33261 diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
33262 index a0895bf..b7ebb1b 100644
33263 --- a/drivers/media/video/timblogiw.c
33264 +++ b/drivers/media/video/timblogiw.c
33265 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
33266
33267 /* Platform device functions */
33268
33269 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33270 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
33271 .vidioc_querycap = timblogiw_querycap,
33272 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
33273 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
33274 @@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33275 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
33276 };
33277
33278 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
33279 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
33280 .owner = THIS_MODULE,
33281 .open = timblogiw_open,
33282 .release = timblogiw_close,
33283 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
33284 index e9c6a60..daf6a33 100644
33285 --- a/drivers/message/fusion/mptbase.c
33286 +++ b/drivers/message/fusion/mptbase.c
33287 @@ -6753,8 +6753,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
33288 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
33289 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
33290
33291 +#ifdef CONFIG_GRKERNSEC_HIDESYM
33292 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
33293 +#else
33294 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
33295 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
33296 +#endif
33297 +
33298 /*
33299 * Rounding UP to nearest 4-kB boundary here...
33300 */
33301 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
33302 index 9d95042..b808101 100644
33303 --- a/drivers/message/fusion/mptsas.c
33304 +++ b/drivers/message/fusion/mptsas.c
33305 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
33306 return 0;
33307 }
33308
33309 +static inline void
33310 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33311 +{
33312 + if (phy_info->port_details) {
33313 + phy_info->port_details->rphy = rphy;
33314 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33315 + ioc->name, rphy));
33316 + }
33317 +
33318 + if (rphy) {
33319 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33320 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33321 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33322 + ioc->name, rphy, rphy->dev.release));
33323 + }
33324 +}
33325 +
33326 /* no mutex */
33327 static void
33328 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
33329 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
33330 return NULL;
33331 }
33332
33333 -static inline void
33334 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33335 -{
33336 - if (phy_info->port_details) {
33337 - phy_info->port_details->rphy = rphy;
33338 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33339 - ioc->name, rphy));
33340 - }
33341 -
33342 - if (rphy) {
33343 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33344 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33345 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33346 - ioc->name, rphy, rphy->dev.release));
33347 - }
33348 -}
33349 -
33350 static inline struct sas_port *
33351 mptsas_get_port(struct mptsas_phyinfo *phy_info)
33352 {
33353 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
33354 index 0c3ced7..1fe34ec 100644
33355 --- a/drivers/message/fusion/mptscsih.c
33356 +++ b/drivers/message/fusion/mptscsih.c
33357 @@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
33358
33359 h = shost_priv(SChost);
33360
33361 - if (h) {
33362 - if (h->info_kbuf == NULL)
33363 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33364 - return h->info_kbuf;
33365 - h->info_kbuf[0] = '\0';
33366 + if (!h)
33367 + return NULL;
33368
33369 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33370 - h->info_kbuf[size-1] = '\0';
33371 - }
33372 + if (h->info_kbuf == NULL)
33373 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33374 + return h->info_kbuf;
33375 + h->info_kbuf[0] = '\0';
33376 +
33377 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33378 + h->info_kbuf[size-1] = '\0';
33379
33380 return h->info_kbuf;
33381 }
33382 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
33383 index 07dbeaf..5533142 100644
33384 --- a/drivers/message/i2o/i2o_proc.c
33385 +++ b/drivers/message/i2o/i2o_proc.c
33386 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
33387 "Array Controller Device"
33388 };
33389
33390 -static char *chtostr(u8 * chars, int n)
33391 -{
33392 - char tmp[256];
33393 - tmp[0] = 0;
33394 - return strncat(tmp, (char *)chars, n);
33395 -}
33396 -
33397 static int i2o_report_query_status(struct seq_file *seq, int block_status,
33398 char *group)
33399 {
33400 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
33401
33402 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
33403 seq_printf(seq, "%-#8x", ddm_table.module_id);
33404 - seq_printf(seq, "%-29s",
33405 - chtostr(ddm_table.module_name_version, 28));
33406 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
33407 seq_printf(seq, "%9d ", ddm_table.data_size);
33408 seq_printf(seq, "%8d", ddm_table.code_size);
33409
33410 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
33411
33412 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
33413 seq_printf(seq, "%-#8x", dst->module_id);
33414 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
33415 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
33416 + seq_printf(seq, "%-.28s", dst->module_name_version);
33417 + seq_printf(seq, "%-.8s", dst->date);
33418 seq_printf(seq, "%8d ", dst->module_size);
33419 seq_printf(seq, "%8d ", dst->mpb_size);
33420 seq_printf(seq, "0x%04x", dst->module_flags);
33421 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
33422 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
33423 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
33424 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
33425 - seq_printf(seq, "Vendor info : %s\n",
33426 - chtostr((u8 *) (work32 + 2), 16));
33427 - seq_printf(seq, "Product info : %s\n",
33428 - chtostr((u8 *) (work32 + 6), 16));
33429 - seq_printf(seq, "Description : %s\n",
33430 - chtostr((u8 *) (work32 + 10), 16));
33431 - seq_printf(seq, "Product rev. : %s\n",
33432 - chtostr((u8 *) (work32 + 14), 8));
33433 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
33434 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
33435 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
33436 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
33437
33438 seq_printf(seq, "Serial number : ");
33439 print_serial_number(seq, (u8 *) (work32 + 16),
33440 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
33441 }
33442
33443 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
33444 - seq_printf(seq, "Module name : %s\n",
33445 - chtostr(result.module_name, 24));
33446 - seq_printf(seq, "Module revision : %s\n",
33447 - chtostr(result.module_rev, 8));
33448 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
33449 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
33450
33451 seq_printf(seq, "Serial number : ");
33452 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
33453 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
33454 return 0;
33455 }
33456
33457 - seq_printf(seq, "Device name : %s\n",
33458 - chtostr(result.device_name, 64));
33459 - seq_printf(seq, "Service name : %s\n",
33460 - chtostr(result.service_name, 64));
33461 - seq_printf(seq, "Physical name : %s\n",
33462 - chtostr(result.physical_location, 64));
33463 - seq_printf(seq, "Instance number : %s\n",
33464 - chtostr(result.instance_number, 4));
33465 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
33466 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
33467 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
33468 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
33469
33470 return 0;
33471 }
33472 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
33473 index a8c08f3..155fe3d 100644
33474 --- a/drivers/message/i2o/iop.c
33475 +++ b/drivers/message/i2o/iop.c
33476 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
33477
33478 spin_lock_irqsave(&c->context_list_lock, flags);
33479
33480 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
33481 - atomic_inc(&c->context_list_counter);
33482 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
33483 + atomic_inc_unchecked(&c->context_list_counter);
33484
33485 - entry->context = atomic_read(&c->context_list_counter);
33486 + entry->context = atomic_read_unchecked(&c->context_list_counter);
33487
33488 list_add(&entry->list, &c->context_list);
33489
33490 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
33491
33492 #if BITS_PER_LONG == 64
33493 spin_lock_init(&c->context_list_lock);
33494 - atomic_set(&c->context_list_counter, 0);
33495 + atomic_set_unchecked(&c->context_list_counter, 0);
33496 INIT_LIST_HEAD(&c->context_list);
33497 #endif
33498
33499 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
33500 index 7ce65f4..e66e9bc 100644
33501 --- a/drivers/mfd/abx500-core.c
33502 +++ b/drivers/mfd/abx500-core.c
33503 @@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
33504
33505 struct abx500_device_entry {
33506 struct list_head list;
33507 - struct abx500_ops ops;
33508 + abx500_ops_no_const ops;
33509 struct device *dev;
33510 };
33511
33512 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
33513 index 5c2a06a..8fa077c 100644
33514 --- a/drivers/mfd/janz-cmodio.c
33515 +++ b/drivers/mfd/janz-cmodio.c
33516 @@ -13,6 +13,7 @@
33517
33518 #include <linux/kernel.h>
33519 #include <linux/module.h>
33520 +#include <linux/slab.h>
33521 #include <linux/init.h>
33522 #include <linux/pci.h>
33523 #include <linux/interrupt.h>
33524 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
33525 index 29d12a7..f900ba4 100644
33526 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
33527 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
33528 @@ -464,7 +464,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
33529 * the lid is closed. This leads to interrupts as soon as a little move
33530 * is done.
33531 */
33532 - atomic_inc(&lis3->count);
33533 + atomic_inc_unchecked(&lis3->count);
33534
33535 wake_up_interruptible(&lis3->misc_wait);
33536 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
33537 @@ -550,7 +550,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
33538 if (lis3->pm_dev)
33539 pm_runtime_get_sync(lis3->pm_dev);
33540
33541 - atomic_set(&lis3->count, 0);
33542 + atomic_set_unchecked(&lis3->count, 0);
33543 return 0;
33544 }
33545
33546 @@ -583,7 +583,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
33547 add_wait_queue(&lis3->misc_wait, &wait);
33548 while (true) {
33549 set_current_state(TASK_INTERRUPTIBLE);
33550 - data = atomic_xchg(&lis3->count, 0);
33551 + data = atomic_xchg_unchecked(&lis3->count, 0);
33552 if (data)
33553 break;
33554
33555 @@ -624,7 +624,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
33556 struct lis3lv02d, miscdev);
33557
33558 poll_wait(file, &lis3->misc_wait, wait);
33559 - if (atomic_read(&lis3->count))
33560 + if (atomic_read_unchecked(&lis3->count))
33561 return POLLIN | POLLRDNORM;
33562 return 0;
33563 }
33564 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
33565 index 2b1482a..5d33616 100644
33566 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
33567 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
33568 @@ -266,7 +266,7 @@ struct lis3lv02d {
33569 struct input_polled_dev *idev; /* input device */
33570 struct platform_device *pdev; /* platform device */
33571 struct regulator_bulk_data regulators[2];
33572 - atomic_t count; /* interrupt count after last read */
33573 + atomic_unchecked_t count; /* interrupt count after last read */
33574 union axis_conversion ac; /* hw -> logical axis */
33575 int mapped_btns[3];
33576
33577 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
33578 index 2f30bad..c4c13d0 100644
33579 --- a/drivers/misc/sgi-gru/gruhandles.c
33580 +++ b/drivers/misc/sgi-gru/gruhandles.c
33581 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
33582 unsigned long nsec;
33583
33584 nsec = CLKS2NSEC(clks);
33585 - atomic_long_inc(&mcs_op_statistics[op].count);
33586 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
33587 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
33588 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
33589 if (mcs_op_statistics[op].max < nsec)
33590 mcs_op_statistics[op].max = nsec;
33591 }
33592 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
33593 index 7768b87..f8aac38 100644
33594 --- a/drivers/misc/sgi-gru/gruprocfs.c
33595 +++ b/drivers/misc/sgi-gru/gruprocfs.c
33596 @@ -32,9 +32,9 @@
33597
33598 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
33599
33600 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
33601 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
33602 {
33603 - unsigned long val = atomic_long_read(v);
33604 + unsigned long val = atomic_long_read_unchecked(v);
33605
33606 seq_printf(s, "%16lu %s\n", val, id);
33607 }
33608 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
33609
33610 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
33611 for (op = 0; op < mcsop_last; op++) {
33612 - count = atomic_long_read(&mcs_op_statistics[op].count);
33613 - total = atomic_long_read(&mcs_op_statistics[op].total);
33614 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
33615 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
33616 max = mcs_op_statistics[op].max;
33617 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
33618 count ? total / count : 0, max);
33619 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
33620 index 5c3ce24..4915ccb 100644
33621 --- a/drivers/misc/sgi-gru/grutables.h
33622 +++ b/drivers/misc/sgi-gru/grutables.h
33623 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
33624 * GRU statistics.
33625 */
33626 struct gru_stats_s {
33627 - atomic_long_t vdata_alloc;
33628 - atomic_long_t vdata_free;
33629 - atomic_long_t gts_alloc;
33630 - atomic_long_t gts_free;
33631 - atomic_long_t gms_alloc;
33632 - atomic_long_t gms_free;
33633 - atomic_long_t gts_double_allocate;
33634 - atomic_long_t assign_context;
33635 - atomic_long_t assign_context_failed;
33636 - atomic_long_t free_context;
33637 - atomic_long_t load_user_context;
33638 - atomic_long_t load_kernel_context;
33639 - atomic_long_t lock_kernel_context;
33640 - atomic_long_t unlock_kernel_context;
33641 - atomic_long_t steal_user_context;
33642 - atomic_long_t steal_kernel_context;
33643 - atomic_long_t steal_context_failed;
33644 - atomic_long_t nopfn;
33645 - atomic_long_t asid_new;
33646 - atomic_long_t asid_next;
33647 - atomic_long_t asid_wrap;
33648 - atomic_long_t asid_reuse;
33649 - atomic_long_t intr;
33650 - atomic_long_t intr_cbr;
33651 - atomic_long_t intr_tfh;
33652 - atomic_long_t intr_spurious;
33653 - atomic_long_t intr_mm_lock_failed;
33654 - atomic_long_t call_os;
33655 - atomic_long_t call_os_wait_queue;
33656 - atomic_long_t user_flush_tlb;
33657 - atomic_long_t user_unload_context;
33658 - atomic_long_t user_exception;
33659 - atomic_long_t set_context_option;
33660 - atomic_long_t check_context_retarget_intr;
33661 - atomic_long_t check_context_unload;
33662 - atomic_long_t tlb_dropin;
33663 - atomic_long_t tlb_preload_page;
33664 - atomic_long_t tlb_dropin_fail_no_asid;
33665 - atomic_long_t tlb_dropin_fail_upm;
33666 - atomic_long_t tlb_dropin_fail_invalid;
33667 - atomic_long_t tlb_dropin_fail_range_active;
33668 - atomic_long_t tlb_dropin_fail_idle;
33669 - atomic_long_t tlb_dropin_fail_fmm;
33670 - atomic_long_t tlb_dropin_fail_no_exception;
33671 - atomic_long_t tfh_stale_on_fault;
33672 - atomic_long_t mmu_invalidate_range;
33673 - atomic_long_t mmu_invalidate_page;
33674 - atomic_long_t flush_tlb;
33675 - atomic_long_t flush_tlb_gru;
33676 - atomic_long_t flush_tlb_gru_tgh;
33677 - atomic_long_t flush_tlb_gru_zero_asid;
33678 + atomic_long_unchecked_t vdata_alloc;
33679 + atomic_long_unchecked_t vdata_free;
33680 + atomic_long_unchecked_t gts_alloc;
33681 + atomic_long_unchecked_t gts_free;
33682 + atomic_long_unchecked_t gms_alloc;
33683 + atomic_long_unchecked_t gms_free;
33684 + atomic_long_unchecked_t gts_double_allocate;
33685 + atomic_long_unchecked_t assign_context;
33686 + atomic_long_unchecked_t assign_context_failed;
33687 + atomic_long_unchecked_t free_context;
33688 + atomic_long_unchecked_t load_user_context;
33689 + atomic_long_unchecked_t load_kernel_context;
33690 + atomic_long_unchecked_t lock_kernel_context;
33691 + atomic_long_unchecked_t unlock_kernel_context;
33692 + atomic_long_unchecked_t steal_user_context;
33693 + atomic_long_unchecked_t steal_kernel_context;
33694 + atomic_long_unchecked_t steal_context_failed;
33695 + atomic_long_unchecked_t nopfn;
33696 + atomic_long_unchecked_t asid_new;
33697 + atomic_long_unchecked_t asid_next;
33698 + atomic_long_unchecked_t asid_wrap;
33699 + atomic_long_unchecked_t asid_reuse;
33700 + atomic_long_unchecked_t intr;
33701 + atomic_long_unchecked_t intr_cbr;
33702 + atomic_long_unchecked_t intr_tfh;
33703 + atomic_long_unchecked_t intr_spurious;
33704 + atomic_long_unchecked_t intr_mm_lock_failed;
33705 + atomic_long_unchecked_t call_os;
33706 + atomic_long_unchecked_t call_os_wait_queue;
33707 + atomic_long_unchecked_t user_flush_tlb;
33708 + atomic_long_unchecked_t user_unload_context;
33709 + atomic_long_unchecked_t user_exception;
33710 + atomic_long_unchecked_t set_context_option;
33711 + atomic_long_unchecked_t check_context_retarget_intr;
33712 + atomic_long_unchecked_t check_context_unload;
33713 + atomic_long_unchecked_t tlb_dropin;
33714 + atomic_long_unchecked_t tlb_preload_page;
33715 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
33716 + atomic_long_unchecked_t tlb_dropin_fail_upm;
33717 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
33718 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
33719 + atomic_long_unchecked_t tlb_dropin_fail_idle;
33720 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
33721 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
33722 + atomic_long_unchecked_t tfh_stale_on_fault;
33723 + atomic_long_unchecked_t mmu_invalidate_range;
33724 + atomic_long_unchecked_t mmu_invalidate_page;
33725 + atomic_long_unchecked_t flush_tlb;
33726 + atomic_long_unchecked_t flush_tlb_gru;
33727 + atomic_long_unchecked_t flush_tlb_gru_tgh;
33728 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
33729
33730 - atomic_long_t copy_gpa;
33731 - atomic_long_t read_gpa;
33732 + atomic_long_unchecked_t copy_gpa;
33733 + atomic_long_unchecked_t read_gpa;
33734
33735 - atomic_long_t mesq_receive;
33736 - atomic_long_t mesq_receive_none;
33737 - atomic_long_t mesq_send;
33738 - atomic_long_t mesq_send_failed;
33739 - atomic_long_t mesq_noop;
33740 - atomic_long_t mesq_send_unexpected_error;
33741 - atomic_long_t mesq_send_lb_overflow;
33742 - atomic_long_t mesq_send_qlimit_reached;
33743 - atomic_long_t mesq_send_amo_nacked;
33744 - atomic_long_t mesq_send_put_nacked;
33745 - atomic_long_t mesq_page_overflow;
33746 - atomic_long_t mesq_qf_locked;
33747 - atomic_long_t mesq_qf_noop_not_full;
33748 - atomic_long_t mesq_qf_switch_head_failed;
33749 - atomic_long_t mesq_qf_unexpected_error;
33750 - atomic_long_t mesq_noop_unexpected_error;
33751 - atomic_long_t mesq_noop_lb_overflow;
33752 - atomic_long_t mesq_noop_qlimit_reached;
33753 - atomic_long_t mesq_noop_amo_nacked;
33754 - atomic_long_t mesq_noop_put_nacked;
33755 - atomic_long_t mesq_noop_page_overflow;
33756 + atomic_long_unchecked_t mesq_receive;
33757 + atomic_long_unchecked_t mesq_receive_none;
33758 + atomic_long_unchecked_t mesq_send;
33759 + atomic_long_unchecked_t mesq_send_failed;
33760 + atomic_long_unchecked_t mesq_noop;
33761 + atomic_long_unchecked_t mesq_send_unexpected_error;
33762 + atomic_long_unchecked_t mesq_send_lb_overflow;
33763 + atomic_long_unchecked_t mesq_send_qlimit_reached;
33764 + atomic_long_unchecked_t mesq_send_amo_nacked;
33765 + atomic_long_unchecked_t mesq_send_put_nacked;
33766 + atomic_long_unchecked_t mesq_page_overflow;
33767 + atomic_long_unchecked_t mesq_qf_locked;
33768 + atomic_long_unchecked_t mesq_qf_noop_not_full;
33769 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
33770 + atomic_long_unchecked_t mesq_qf_unexpected_error;
33771 + atomic_long_unchecked_t mesq_noop_unexpected_error;
33772 + atomic_long_unchecked_t mesq_noop_lb_overflow;
33773 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
33774 + atomic_long_unchecked_t mesq_noop_amo_nacked;
33775 + atomic_long_unchecked_t mesq_noop_put_nacked;
33776 + atomic_long_unchecked_t mesq_noop_page_overflow;
33777
33778 };
33779
33780 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
33781 tghop_invalidate, mcsop_last};
33782
33783 struct mcs_op_statistic {
33784 - atomic_long_t count;
33785 - atomic_long_t total;
33786 + atomic_long_unchecked_t count;
33787 + atomic_long_unchecked_t total;
33788 unsigned long max;
33789 };
33790
33791 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
33792
33793 #define STAT(id) do { \
33794 if (gru_options & OPT_STATS) \
33795 - atomic_long_inc(&gru_stats.id); \
33796 + atomic_long_inc_unchecked(&gru_stats.id); \
33797 } while (0)
33798
33799 #ifdef CONFIG_SGI_GRU_DEBUG
33800 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
33801 index 851b2f2..a4ec097 100644
33802 --- a/drivers/misc/sgi-xp/xp.h
33803 +++ b/drivers/misc/sgi-xp/xp.h
33804 @@ -289,7 +289,7 @@ struct xpc_interface {
33805 xpc_notify_func, void *);
33806 void (*received) (short, int, void *);
33807 enum xp_retval (*partid_to_nasids) (short, void *);
33808 -};
33809 +} __no_const;
33810
33811 extern struct xpc_interface xpc_interface;
33812
33813 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
33814 index b94d5f7..7f494c5 100644
33815 --- a/drivers/misc/sgi-xp/xpc.h
33816 +++ b/drivers/misc/sgi-xp/xpc.h
33817 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
33818 void (*received_payload) (struct xpc_channel *, void *);
33819 void (*notify_senders_of_disconnect) (struct xpc_channel *);
33820 };
33821 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
33822
33823 /* struct xpc_partition act_state values (for XPC HB) */
33824
33825 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
33826 /* found in xpc_main.c */
33827 extern struct device *xpc_part;
33828 extern struct device *xpc_chan;
33829 -extern struct xpc_arch_operations xpc_arch_ops;
33830 +extern xpc_arch_operations_no_const xpc_arch_ops;
33831 extern int xpc_disengage_timelimit;
33832 extern int xpc_disengage_timedout;
33833 extern int xpc_activate_IRQ_rcvd;
33834 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
33835 index 8d082b4..aa749ae 100644
33836 --- a/drivers/misc/sgi-xp/xpc_main.c
33837 +++ b/drivers/misc/sgi-xp/xpc_main.c
33838 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
33839 .notifier_call = xpc_system_die,
33840 };
33841
33842 -struct xpc_arch_operations xpc_arch_ops;
33843 +xpc_arch_operations_no_const xpc_arch_ops;
33844
33845 /*
33846 * Timer function to enforce the timelimit on the partition disengage.
33847 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
33848 index 6878a94..fe5c5f1 100644
33849 --- a/drivers/mmc/host/sdhci-pci.c
33850 +++ b/drivers/mmc/host/sdhci-pci.c
33851 @@ -673,7 +673,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
33852 .probe = via_probe,
33853 };
33854
33855 -static const struct pci_device_id pci_ids[] __devinitdata = {
33856 +static const struct pci_device_id pci_ids[] __devinitconst = {
33857 {
33858 .vendor = PCI_VENDOR_ID_RICOH,
33859 .device = PCI_DEVICE_ID_RICOH_R5C822,
33860 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
33861 index e9fad91..0a7a16a 100644
33862 --- a/drivers/mtd/devices/doc2000.c
33863 +++ b/drivers/mtd/devices/doc2000.c
33864 @@ -773,7 +773,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
33865
33866 /* The ECC will not be calculated correctly if less than 512 is written */
33867 /* DBB-
33868 - if (len != 0x200 && eccbuf)
33869 + if (len != 0x200)
33870 printk(KERN_WARNING
33871 "ECC needs a full sector write (adr: %lx size %lx)\n",
33872 (long) to, (long) len);
33873 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
33874 index a3f7a27..234016e 100644
33875 --- a/drivers/mtd/devices/doc2001.c
33876 +++ b/drivers/mtd/devices/doc2001.c
33877 @@ -392,7 +392,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
33878 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
33879
33880 /* Don't allow read past end of device */
33881 - if (from >= this->totlen)
33882 + if (from >= this->totlen || !len)
33883 return -EINVAL;
33884
33885 /* Don't allow a single read to cross a 512-byte block boundary */
33886 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
33887 index 3984d48..28aa897 100644
33888 --- a/drivers/mtd/nand/denali.c
33889 +++ b/drivers/mtd/nand/denali.c
33890 @@ -26,6 +26,7 @@
33891 #include <linux/pci.h>
33892 #include <linux/mtd/mtd.h>
33893 #include <linux/module.h>
33894 +#include <linux/slab.h>
33895
33896 #include "denali.h"
33897
33898 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
33899 index ac40925..483b753 100644
33900 --- a/drivers/mtd/nftlmount.c
33901 +++ b/drivers/mtd/nftlmount.c
33902 @@ -24,6 +24,7 @@
33903 #include <asm/errno.h>
33904 #include <linux/delay.h>
33905 #include <linux/slab.h>
33906 +#include <linux/sched.h>
33907 #include <linux/mtd/mtd.h>
33908 #include <linux/mtd/nand.h>
33909 #include <linux/mtd/nftl.h>
33910 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
33911 index 6c3fb5a..c542a81 100644
33912 --- a/drivers/mtd/ubi/build.c
33913 +++ b/drivers/mtd/ubi/build.c
33914 @@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
33915 static int __init bytes_str_to_int(const char *str)
33916 {
33917 char *endp;
33918 - unsigned long result;
33919 + unsigned long result, scale = 1;
33920
33921 result = simple_strtoul(str, &endp, 0);
33922 if (str == endp || result >= INT_MAX) {
33923 @@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str)
33924
33925 switch (*endp) {
33926 case 'G':
33927 - result *= 1024;
33928 + scale *= 1024;
33929 case 'M':
33930 - result *= 1024;
33931 + scale *= 1024;
33932 case 'K':
33933 - result *= 1024;
33934 + scale *= 1024;
33935 if (endp[1] == 'i' && endp[2] == 'B')
33936 endp += 2;
33937 case '\0':
33938 @@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str)
33939 return -EINVAL;
33940 }
33941
33942 - return result;
33943 + if ((intoverflow_t)result*scale >= INT_MAX) {
33944 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
33945 + str);
33946 + return -EINVAL;
33947 + }
33948 +
33949 + return result*scale;
33950 }
33951
33952 /**
33953 diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
33954 index 1feae59..c2a61d2 100644
33955 --- a/drivers/net/ethernet/atheros/atlx/atl2.c
33956 +++ b/drivers/net/ethernet/atheros/atlx/atl2.c
33957 @@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
33958 */
33959
33960 #define ATL2_PARAM(X, desc) \
33961 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
33962 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
33963 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
33964 MODULE_PARM_DESC(X, desc);
33965 #else
33966 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
33967 index 9a517c2..a50cfcb 100644
33968 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
33969 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
33970 @@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
33971
33972 int (*wait_comp)(struct bnx2x *bp,
33973 struct bnx2x_rx_mode_ramrod_params *p);
33974 -};
33975 +} __no_const;
33976
33977 /********************** Set multicast group ***********************************/
33978
33979 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
33980 index 94b4bd0..73c02de 100644
33981 --- a/drivers/net/ethernet/broadcom/tg3.h
33982 +++ b/drivers/net/ethernet/broadcom/tg3.h
33983 @@ -134,6 +134,7 @@
33984 #define CHIPREV_ID_5750_A0 0x4000
33985 #define CHIPREV_ID_5750_A1 0x4001
33986 #define CHIPREV_ID_5750_A3 0x4003
33987 +#define CHIPREV_ID_5750_C1 0x4201
33988 #define CHIPREV_ID_5750_C2 0x4202
33989 #define CHIPREV_ID_5752_A0_HW 0x5000
33990 #define CHIPREV_ID_5752_A0 0x6000
33991 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
33992 index c5f5479..2e8c260 100644
33993 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
33994 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
33995 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
33996 */
33997 struct l2t_skb_cb {
33998 arp_failure_handler_func arp_failure_handler;
33999 -};
34000 +} __no_const;
34001
34002 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
34003
34004 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
34005 index 871bcaa..4043505 100644
34006 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
34007 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
34008 @@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34009 for (i=0; i<ETH_ALEN; i++) {
34010 tmp.addr[i] = dev->dev_addr[i];
34011 }
34012 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34013 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34014 break;
34015
34016 case DE4X5_SET_HWADDR: /* Set the hardware address */
34017 @@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34018 spin_lock_irqsave(&lp->lock, flags);
34019 memcpy(&statbuf, &lp->pktStats, ioc->len);
34020 spin_unlock_irqrestore(&lp->lock, flags);
34021 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
34022 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34023 return -EFAULT;
34024 break;
34025 }
34026 diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
34027 index 14d5b61..1398636 100644
34028 --- a/drivers/net/ethernet/dec/tulip/eeprom.c
34029 +++ b/drivers/net/ethernet/dec/tulip/eeprom.c
34030 @@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
34031 {NULL}};
34032
34033
34034 -static const char *block_name[] __devinitdata = {
34035 +static const char *block_name[] __devinitconst = {
34036 "21140 non-MII",
34037 "21140 MII PHY",
34038 "21142 Serial PHY",
34039 diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
34040 index 4d01219..b58d26d 100644
34041 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c
34042 +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
34043 @@ -236,7 +236,7 @@ struct pci_id_info {
34044 int drv_flags; /* Driver use, intended as capability flags. */
34045 };
34046
34047 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34048 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34049 { /* Sometime a Level-One switch card. */
34050 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
34051 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
34052 diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
34053 index dcd7f7a..ecb7fb3 100644
34054 --- a/drivers/net/ethernet/dlink/sundance.c
34055 +++ b/drivers/net/ethernet/dlink/sundance.c
34056 @@ -218,7 +218,7 @@ enum {
34057 struct pci_id_info {
34058 const char *name;
34059 };
34060 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34061 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34062 {"D-Link DFE-550TX FAST Ethernet Adapter"},
34063 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
34064 {"D-Link DFE-580TX 4 port Server Adapter"},
34065 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
34066 index bf266a0..e024af7 100644
34067 --- a/drivers/net/ethernet/emulex/benet/be_main.c
34068 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
34069 @@ -397,7 +397,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
34070
34071 if (wrapped)
34072 newacc += 65536;
34073 - ACCESS_ONCE(*acc) = newacc;
34074 + ACCESS_ONCE_RW(*acc) = newacc;
34075 }
34076
34077 void be_parse_stats(struct be_adapter *adapter)
34078 diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
34079 index fb5579a..debdffa 100644
34080 --- a/drivers/net/ethernet/faraday/ftgmac100.c
34081 +++ b/drivers/net/ethernet/faraday/ftgmac100.c
34082 @@ -30,6 +30,8 @@
34083 #include <linux/netdevice.h>
34084 #include <linux/phy.h>
34085 #include <linux/platform_device.h>
34086 +#include <linux/interrupt.h>
34087 +#include <linux/irqreturn.h>
34088 #include <net/ip.h>
34089
34090 #include "ftgmac100.h"
34091 diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
34092 index a127cb2..0d043cd 100644
34093 --- a/drivers/net/ethernet/faraday/ftmac100.c
34094 +++ b/drivers/net/ethernet/faraday/ftmac100.c
34095 @@ -30,6 +30,8 @@
34096 #include <linux/module.h>
34097 #include <linux/netdevice.h>
34098 #include <linux/platform_device.h>
34099 +#include <linux/interrupt.h>
34100 +#include <linux/irqreturn.h>
34101
34102 #include "ftmac100.h"
34103
34104 diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
34105 index 61d2bdd..7f1154a 100644
34106 --- a/drivers/net/ethernet/fealnx.c
34107 +++ b/drivers/net/ethernet/fealnx.c
34108 @@ -150,7 +150,7 @@ struct chip_info {
34109 int flags;
34110 };
34111
34112 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
34113 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
34114 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34115 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
34116 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34117 diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
34118 index e1159e5..e18684d 100644
34119 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
34120 +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
34121 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
34122 {
34123 struct e1000_hw *hw = &adapter->hw;
34124 struct e1000_mac_info *mac = &hw->mac;
34125 - struct e1000_mac_operations *func = &mac->ops;
34126 + e1000_mac_operations_no_const *func = &mac->ops;
34127
34128 /* Set media type */
34129 switch (adapter->pdev->device) {
34130 diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
34131 index a3e65fd..f451444 100644
34132 --- a/drivers/net/ethernet/intel/e1000e/82571.c
34133 +++ b/drivers/net/ethernet/intel/e1000e/82571.c
34134 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
34135 {
34136 struct e1000_hw *hw = &adapter->hw;
34137 struct e1000_mac_info *mac = &hw->mac;
34138 - struct e1000_mac_operations *func = &mac->ops;
34139 + e1000_mac_operations_no_const *func = &mac->ops;
34140 u32 swsm = 0;
34141 u32 swsm2 = 0;
34142 bool force_clear_smbi = false;
34143 diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
34144 index 2967039..ca8c40c 100644
34145 --- a/drivers/net/ethernet/intel/e1000e/hw.h
34146 +++ b/drivers/net/ethernet/intel/e1000e/hw.h
34147 @@ -778,6 +778,7 @@ struct e1000_mac_operations {
34148 void (*write_vfta)(struct e1000_hw *, u32, u32);
34149 s32 (*read_mac_addr)(struct e1000_hw *);
34150 };
34151 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34152
34153 /*
34154 * When to use various PHY register access functions:
34155 @@ -818,6 +819,7 @@ struct e1000_phy_operations {
34156 void (*power_up)(struct e1000_hw *);
34157 void (*power_down)(struct e1000_hw *);
34158 };
34159 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34160
34161 /* Function pointers for the NVM. */
34162 struct e1000_nvm_operations {
34163 @@ -829,9 +831,10 @@ struct e1000_nvm_operations {
34164 s32 (*validate)(struct e1000_hw *);
34165 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
34166 };
34167 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34168
34169 struct e1000_mac_info {
34170 - struct e1000_mac_operations ops;
34171 + e1000_mac_operations_no_const ops;
34172 u8 addr[ETH_ALEN];
34173 u8 perm_addr[ETH_ALEN];
34174
34175 @@ -872,7 +875,7 @@ struct e1000_mac_info {
34176 };
34177
34178 struct e1000_phy_info {
34179 - struct e1000_phy_operations ops;
34180 + e1000_phy_operations_no_const ops;
34181
34182 enum e1000_phy_type type;
34183
34184 @@ -906,7 +909,7 @@ struct e1000_phy_info {
34185 };
34186
34187 struct e1000_nvm_info {
34188 - struct e1000_nvm_operations ops;
34189 + e1000_nvm_operations_no_const ops;
34190
34191 enum e1000_nvm_type type;
34192 enum e1000_nvm_override override;
34193 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
34194 index 4519a13..f97fcd0 100644
34195 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h
34196 +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
34197 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
34198 s32 (*read_mac_addr)(struct e1000_hw *);
34199 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
34200 };
34201 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34202
34203 struct e1000_phy_operations {
34204 s32 (*acquire)(struct e1000_hw *);
34205 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
34206 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
34207 s32 (*write_reg)(struct e1000_hw *, u32, u16);
34208 };
34209 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34210
34211 struct e1000_nvm_operations {
34212 s32 (*acquire)(struct e1000_hw *);
34213 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
34214 s32 (*update)(struct e1000_hw *);
34215 s32 (*validate)(struct e1000_hw *);
34216 };
34217 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34218
34219 struct e1000_info {
34220 s32 (*get_invariants)(struct e1000_hw *);
34221 @@ -350,7 +353,7 @@ struct e1000_info {
34222 extern const struct e1000_info e1000_82575_info;
34223
34224 struct e1000_mac_info {
34225 - struct e1000_mac_operations ops;
34226 + e1000_mac_operations_no_const ops;
34227
34228 u8 addr[6];
34229 u8 perm_addr[6];
34230 @@ -388,7 +391,7 @@ struct e1000_mac_info {
34231 };
34232
34233 struct e1000_phy_info {
34234 - struct e1000_phy_operations ops;
34235 + e1000_phy_operations_no_const ops;
34236
34237 enum e1000_phy_type type;
34238
34239 @@ -423,7 +426,7 @@ struct e1000_phy_info {
34240 };
34241
34242 struct e1000_nvm_info {
34243 - struct e1000_nvm_operations ops;
34244 + e1000_nvm_operations_no_const ops;
34245 enum e1000_nvm_type type;
34246 enum e1000_nvm_override override;
34247
34248 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
34249 s32 (*check_for_ack)(struct e1000_hw *, u16);
34250 s32 (*check_for_rst)(struct e1000_hw *, u16);
34251 };
34252 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34253
34254 struct e1000_mbx_stats {
34255 u32 msgs_tx;
34256 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
34257 };
34258
34259 struct e1000_mbx_info {
34260 - struct e1000_mbx_operations ops;
34261 + e1000_mbx_operations_no_const ops;
34262 struct e1000_mbx_stats stats;
34263 u32 timeout;
34264 u32 usec_delay;
34265 diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
34266 index d7ed58f..64cde36 100644
34267 --- a/drivers/net/ethernet/intel/igbvf/vf.h
34268 +++ b/drivers/net/ethernet/intel/igbvf/vf.h
34269 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
34270 s32 (*read_mac_addr)(struct e1000_hw *);
34271 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
34272 };
34273 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34274
34275 struct e1000_mac_info {
34276 - struct e1000_mac_operations ops;
34277 + e1000_mac_operations_no_const ops;
34278 u8 addr[6];
34279 u8 perm_addr[6];
34280
34281 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
34282 s32 (*check_for_ack)(struct e1000_hw *);
34283 s32 (*check_for_rst)(struct e1000_hw *);
34284 };
34285 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34286
34287 struct e1000_mbx_stats {
34288 u32 msgs_tx;
34289 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
34290 };
34291
34292 struct e1000_mbx_info {
34293 - struct e1000_mbx_operations ops;
34294 + e1000_mbx_operations_no_const ops;
34295 struct e1000_mbx_stats stats;
34296 u32 timeout;
34297 u32 usec_delay;
34298 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34299 index 6c5cca8..de8ef63 100644
34300 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34301 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34302 @@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations {
34303 s32 (*update_checksum)(struct ixgbe_hw *);
34304 u16 (*calc_checksum)(struct ixgbe_hw *);
34305 };
34306 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
34307
34308 struct ixgbe_mac_operations {
34309 s32 (*init_hw)(struct ixgbe_hw *);
34310 @@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations {
34311 /* Manageability interface */
34312 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
34313 };
34314 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
34315
34316 struct ixgbe_phy_operations {
34317 s32 (*identify)(struct ixgbe_hw *);
34318 @@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations {
34319 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
34320 s32 (*check_overtemp)(struct ixgbe_hw *);
34321 };
34322 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
34323
34324 struct ixgbe_eeprom_info {
34325 - struct ixgbe_eeprom_operations ops;
34326 + ixgbe_eeprom_operations_no_const ops;
34327 enum ixgbe_eeprom_type type;
34328 u32 semaphore_delay;
34329 u16 word_size;
34330 @@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info {
34331
34332 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
34333 struct ixgbe_mac_info {
34334 - struct ixgbe_mac_operations ops;
34335 + ixgbe_mac_operations_no_const ops;
34336 enum ixgbe_mac_type type;
34337 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
34338 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
34339 @@ -2828,7 +2831,7 @@ struct ixgbe_mac_info {
34340 };
34341
34342 struct ixgbe_phy_info {
34343 - struct ixgbe_phy_operations ops;
34344 + ixgbe_phy_operations_no_const ops;
34345 struct mdio_if_info mdio;
34346 enum ixgbe_phy_type type;
34347 u32 id;
34348 @@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations {
34349 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
34350 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
34351 };
34352 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
34353
34354 struct ixgbe_mbx_stats {
34355 u32 msgs_tx;
34356 @@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats {
34357 };
34358
34359 struct ixgbe_mbx_info {
34360 - struct ixgbe_mbx_operations ops;
34361 + ixgbe_mbx_operations_no_const ops;
34362 struct ixgbe_mbx_stats stats;
34363 u32 timeout;
34364 u32 usec_delay;
34365 diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
34366 index 10306b4..28df758 100644
34367 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h
34368 +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
34369 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
34370 s32 (*clear_vfta)(struct ixgbe_hw *);
34371 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
34372 };
34373 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
34374
34375 enum ixgbe_mac_type {
34376 ixgbe_mac_unknown = 0,
34377 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
34378 };
34379
34380 struct ixgbe_mac_info {
34381 - struct ixgbe_mac_operations ops;
34382 + ixgbe_mac_operations_no_const ops;
34383 u8 addr[6];
34384 u8 perm_addr[6];
34385
34386 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
34387 s32 (*check_for_ack)(struct ixgbe_hw *);
34388 s32 (*check_for_rst)(struct ixgbe_hw *);
34389 };
34390 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
34391
34392 struct ixgbe_mbx_stats {
34393 u32 msgs_tx;
34394 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
34395 };
34396
34397 struct ixgbe_mbx_info {
34398 - struct ixgbe_mbx_operations ops;
34399 + ixgbe_mbx_operations_no_const ops;
34400 struct ixgbe_mbx_stats stats;
34401 u32 timeout;
34402 u32 udelay;
34403 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
34404 index 94bbc85..78c12e6 100644
34405 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
34406 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
34407 @@ -40,6 +40,7 @@
34408 #include <linux/dma-mapping.h>
34409 #include <linux/slab.h>
34410 #include <linux/io-mapping.h>
34411 +#include <linux/sched.h>
34412
34413 #include <linux/mlx4/device.h>
34414 #include <linux/mlx4/doorbell.h>
34415 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
34416 index 5046a64..71ca936 100644
34417 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
34418 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
34419 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
34420 void (*link_down)(struct __vxge_hw_device *devh);
34421 void (*crit_err)(struct __vxge_hw_device *devh,
34422 enum vxge_hw_event type, u64 ext_data);
34423 -};
34424 +} __no_const;
34425
34426 /*
34427 * struct __vxge_hw_blockpool_entry - Block private data structure
34428 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34429 index 4a518a3..936b334 100644
34430 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34431 +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
34432 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
34433 struct vxge_hw_mempool_dma *dma_object,
34434 u32 index,
34435 u32 is_last);
34436 -};
34437 +} __no_const;
34438
34439 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
34440 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
34441 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
34442 index c8f47f1..5da9840 100644
34443 --- a/drivers/net/ethernet/realtek/r8169.c
34444 +++ b/drivers/net/ethernet/realtek/r8169.c
34445 @@ -698,17 +698,17 @@ struct rtl8169_private {
34446 struct mdio_ops {
34447 void (*write)(void __iomem *, int, int);
34448 int (*read)(void __iomem *, int);
34449 - } mdio_ops;
34450 + } __no_const mdio_ops;
34451
34452 struct pll_power_ops {
34453 void (*down)(struct rtl8169_private *);
34454 void (*up)(struct rtl8169_private *);
34455 - } pll_power_ops;
34456 + } __no_const pll_power_ops;
34457
34458 struct jumbo_ops {
34459 void (*enable)(struct rtl8169_private *);
34460 void (*disable)(struct rtl8169_private *);
34461 - } jumbo_ops;
34462 + } __no_const jumbo_ops;
34463
34464 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
34465 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
34466 diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
34467 index 1b4658c..a30dabb 100644
34468 --- a/drivers/net/ethernet/sis/sis190.c
34469 +++ b/drivers/net/ethernet/sis/sis190.c
34470 @@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
34471 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
34472 struct net_device *dev)
34473 {
34474 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
34475 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
34476 struct sis190_private *tp = netdev_priv(dev);
34477 struct pci_dev *isa_bridge;
34478 u8 reg, tmp8;
34479 diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34480 index 41e6b33..8e89b0f 100644
34481 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34482 +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
34483 @@ -139,8 +139,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
34484
34485 writel(value, ioaddr + MMC_CNTRL);
34486
34487 - pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
34488 - MMC_CNTRL, value);
34489 +// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
34490 +// MMC_CNTRL, value);
34491 }
34492
34493 /* To mask all all interrupts.*/
34494 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
34495 index edfa15d..002bfa9 100644
34496 --- a/drivers/net/ppp/ppp_generic.c
34497 +++ b/drivers/net/ppp/ppp_generic.c
34498 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34499 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
34500 struct ppp_stats stats;
34501 struct ppp_comp_stats cstats;
34502 - char *vers;
34503
34504 switch (cmd) {
34505 case SIOCGPPPSTATS:
34506 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34507 break;
34508
34509 case SIOCGPPPVER:
34510 - vers = PPP_VERSION;
34511 - if (copy_to_user(addr, vers, strlen(vers) + 1))
34512 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
34513 break;
34514 err = 0;
34515 break;
34516 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
34517 index 515f122..41dd273 100644
34518 --- a/drivers/net/tokenring/abyss.c
34519 +++ b/drivers/net/tokenring/abyss.c
34520 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
34521
34522 static int __init abyss_init (void)
34523 {
34524 - abyss_netdev_ops = tms380tr_netdev_ops;
34525 + pax_open_kernel();
34526 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34527
34528 - abyss_netdev_ops.ndo_open = abyss_open;
34529 - abyss_netdev_ops.ndo_stop = abyss_close;
34530 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
34531 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
34532 + pax_close_kernel();
34533
34534 return pci_register_driver(&abyss_driver);
34535 }
34536 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
34537 index 6153cfd..cf69c1c 100644
34538 --- a/drivers/net/tokenring/madgemc.c
34539 +++ b/drivers/net/tokenring/madgemc.c
34540 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
34541
34542 static int __init madgemc_init (void)
34543 {
34544 - madgemc_netdev_ops = tms380tr_netdev_ops;
34545 - madgemc_netdev_ops.ndo_open = madgemc_open;
34546 - madgemc_netdev_ops.ndo_stop = madgemc_close;
34547 + pax_open_kernel();
34548 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34549 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
34550 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
34551 + pax_close_kernel();
34552
34553 return mca_register_driver (&madgemc_driver);
34554 }
34555 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
34556 index 8d362e6..f91cc52 100644
34557 --- a/drivers/net/tokenring/proteon.c
34558 +++ b/drivers/net/tokenring/proteon.c
34559 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
34560 struct platform_device *pdev;
34561 int i, num = 0, err = 0;
34562
34563 - proteon_netdev_ops = tms380tr_netdev_ops;
34564 - proteon_netdev_ops.ndo_open = proteon_open;
34565 - proteon_netdev_ops.ndo_stop = tms380tr_close;
34566 + pax_open_kernel();
34567 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34568 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
34569 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
34570 + pax_close_kernel();
34571
34572 err = platform_driver_register(&proteon_driver);
34573 if (err)
34574 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
34575 index 46db5c5..37c1536 100644
34576 --- a/drivers/net/tokenring/skisa.c
34577 +++ b/drivers/net/tokenring/skisa.c
34578 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
34579 struct platform_device *pdev;
34580 int i, num = 0, err = 0;
34581
34582 - sk_isa_netdev_ops = tms380tr_netdev_ops;
34583 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
34584 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34585 + pax_open_kernel();
34586 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34587 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
34588 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34589 + pax_close_kernel();
34590
34591 err = platform_driver_register(&sk_isa_driver);
34592 if (err)
34593 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
34594 index 304fe78..db112fa 100644
34595 --- a/drivers/net/usb/hso.c
34596 +++ b/drivers/net/usb/hso.c
34597 @@ -71,7 +71,7 @@
34598 #include <asm/byteorder.h>
34599 #include <linux/serial_core.h>
34600 #include <linux/serial.h>
34601 -
34602 +#include <asm/local.h>
34603
34604 #define MOD_AUTHOR "Option Wireless"
34605 #define MOD_DESCRIPTION "USB High Speed Option driver"
34606 @@ -257,7 +257,7 @@ struct hso_serial {
34607
34608 /* from usb_serial_port */
34609 struct tty_struct *tty;
34610 - int open_count;
34611 + local_t open_count;
34612 spinlock_t serial_lock;
34613
34614 int (*write_data) (struct hso_serial *serial);
34615 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
34616 struct urb *urb;
34617
34618 urb = serial->rx_urb[0];
34619 - if (serial->open_count > 0) {
34620 + if (local_read(&serial->open_count) > 0) {
34621 count = put_rxbuf_data(urb, serial);
34622 if (count == -1)
34623 return;
34624 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
34625 DUMP1(urb->transfer_buffer, urb->actual_length);
34626
34627 /* Anyone listening? */
34628 - if (serial->open_count == 0)
34629 + if (local_read(&serial->open_count) == 0)
34630 return;
34631
34632 if (status == 0) {
34633 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
34634 spin_unlock_irq(&serial->serial_lock);
34635
34636 /* check for port already opened, if not set the termios */
34637 - serial->open_count++;
34638 - if (serial->open_count == 1) {
34639 + if (local_inc_return(&serial->open_count) == 1) {
34640 serial->rx_state = RX_IDLE;
34641 /* Force default termio settings */
34642 _hso_serial_set_termios(tty, NULL);
34643 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
34644 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
34645 if (result) {
34646 hso_stop_serial_device(serial->parent);
34647 - serial->open_count--;
34648 + local_dec(&serial->open_count);
34649 kref_put(&serial->parent->ref, hso_serial_ref_free);
34650 }
34651 } else {
34652 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
34653
34654 /* reset the rts and dtr */
34655 /* do the actual close */
34656 - serial->open_count--;
34657 + local_dec(&serial->open_count);
34658
34659 - if (serial->open_count <= 0) {
34660 - serial->open_count = 0;
34661 + if (local_read(&serial->open_count) <= 0) {
34662 + local_set(&serial->open_count, 0);
34663 spin_lock_irq(&serial->serial_lock);
34664 if (serial->tty == tty) {
34665 serial->tty->driver_data = NULL;
34666 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
34667
34668 /* the actual setup */
34669 spin_lock_irqsave(&serial->serial_lock, flags);
34670 - if (serial->open_count)
34671 + if (local_read(&serial->open_count))
34672 _hso_serial_set_termios(tty, old);
34673 else
34674 tty->termios = old;
34675 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
34676 D1("Pending read interrupt on port %d\n", i);
34677 spin_lock(&serial->serial_lock);
34678 if (serial->rx_state == RX_IDLE &&
34679 - serial->open_count > 0) {
34680 + local_read(&serial->open_count) > 0) {
34681 /* Setup and send a ctrl req read on
34682 * port i */
34683 if (!serial->rx_urb_filled[0]) {
34684 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
34685 /* Start all serial ports */
34686 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
34687 if (serial_table[i] && (serial_table[i]->interface == iface)) {
34688 - if (dev2ser(serial_table[i])->open_count) {
34689 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
34690 result =
34691 hso_start_serial_device(serial_table[i], GFP_NOIO);
34692 hso_kick_transmit(dev2ser(serial_table[i]));
34693 diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
34694 index e662cbc..8d4a102 100644
34695 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
34696 +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
34697 @@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
34698 * Return with error code if any of the queue indices
34699 * is out of range
34700 */
34701 - if (p->ring_index[i] < 0 ||
34702 - p->ring_index[i] >= adapter->num_rx_queues)
34703 + if (p->ring_index[i] >= adapter->num_rx_queues)
34704 return -EINVAL;
34705 }
34706
34707 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
34708 index 0f9ee46..e2d6e65 100644
34709 --- a/drivers/net/wireless/ath/ath.h
34710 +++ b/drivers/net/wireless/ath/ath.h
34711 @@ -119,6 +119,7 @@ struct ath_ops {
34712 void (*write_flush) (void *);
34713 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
34714 };
34715 +typedef struct ath_ops __no_const ath_ops_no_const;
34716
34717 struct ath_common;
34718 struct ath_bus_ops;
34719 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
34720 index b592016..fe47870 100644
34721 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
34722 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
34723 @@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34724 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
34725 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
34726
34727 - ACCESS_ONCE(ads->ds_link) = i->link;
34728 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
34729 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
34730 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
34731
34732 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
34733 ctl6 = SM(i->keytype, AR_EncrType);
34734 @@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34735
34736 if ((i->is_first || i->is_last) &&
34737 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
34738 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
34739 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
34740 | set11nTries(i->rates, 1)
34741 | set11nTries(i->rates, 2)
34742 | set11nTries(i->rates, 3)
34743 | (i->dur_update ? AR_DurUpdateEna : 0)
34744 | SM(0, AR_BurstDur);
34745
34746 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
34747 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
34748 | set11nRate(i->rates, 1)
34749 | set11nRate(i->rates, 2)
34750 | set11nRate(i->rates, 3);
34751 } else {
34752 - ACCESS_ONCE(ads->ds_ctl2) = 0;
34753 - ACCESS_ONCE(ads->ds_ctl3) = 0;
34754 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
34755 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
34756 }
34757
34758 if (!i->is_first) {
34759 - ACCESS_ONCE(ads->ds_ctl0) = 0;
34760 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
34761 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
34762 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
34763 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
34764 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
34765 return;
34766 }
34767
34768 @@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34769 break;
34770 }
34771
34772 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
34773 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
34774 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
34775 | SM(i->txpower, AR_XmitPower)
34776 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
34777 @@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34778 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
34779 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
34780
34781 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
34782 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
34783 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
34784 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
34785
34786 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
34787 return;
34788
34789 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
34790 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
34791 | set11nPktDurRTSCTS(i->rates, 1);
34792
34793 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
34794 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
34795 | set11nPktDurRTSCTS(i->rates, 3);
34796
34797 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
34798 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
34799 | set11nRateFlags(i->rates, 1)
34800 | set11nRateFlags(i->rates, 2)
34801 | set11nRateFlags(i->rates, 3)
34802 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
34803 index f5ae3c6..7936af3 100644
34804 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
34805 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
34806 @@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34807 (i->qcu << AR_TxQcuNum_S) | 0x17;
34808
34809 checksum += val;
34810 - ACCESS_ONCE(ads->info) = val;
34811 + ACCESS_ONCE_RW(ads->info) = val;
34812
34813 checksum += i->link;
34814 - ACCESS_ONCE(ads->link) = i->link;
34815 + ACCESS_ONCE_RW(ads->link) = i->link;
34816
34817 checksum += i->buf_addr[0];
34818 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
34819 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
34820 checksum += i->buf_addr[1];
34821 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
34822 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
34823 checksum += i->buf_addr[2];
34824 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
34825 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
34826 checksum += i->buf_addr[3];
34827 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
34828 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
34829
34830 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
34831 - ACCESS_ONCE(ads->ctl3) = val;
34832 + ACCESS_ONCE_RW(ads->ctl3) = val;
34833 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
34834 - ACCESS_ONCE(ads->ctl5) = val;
34835 + ACCESS_ONCE_RW(ads->ctl5) = val;
34836 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
34837 - ACCESS_ONCE(ads->ctl7) = val;
34838 + ACCESS_ONCE_RW(ads->ctl7) = val;
34839 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
34840 - ACCESS_ONCE(ads->ctl9) = val;
34841 + ACCESS_ONCE_RW(ads->ctl9) = val;
34842
34843 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
34844 - ACCESS_ONCE(ads->ctl10) = checksum;
34845 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
34846
34847 if (i->is_first || i->is_last) {
34848 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
34849 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
34850 | set11nTries(i->rates, 1)
34851 | set11nTries(i->rates, 2)
34852 | set11nTries(i->rates, 3)
34853 | (i->dur_update ? AR_DurUpdateEna : 0)
34854 | SM(0, AR_BurstDur);
34855
34856 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
34857 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
34858 | set11nRate(i->rates, 1)
34859 | set11nRate(i->rates, 2)
34860 | set11nRate(i->rates, 3);
34861 } else {
34862 - ACCESS_ONCE(ads->ctl13) = 0;
34863 - ACCESS_ONCE(ads->ctl14) = 0;
34864 + ACCESS_ONCE_RW(ads->ctl13) = 0;
34865 + ACCESS_ONCE_RW(ads->ctl14) = 0;
34866 }
34867
34868 ads->ctl20 = 0;
34869 @@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34870
34871 ctl17 = SM(i->keytype, AR_EncrType);
34872 if (!i->is_first) {
34873 - ACCESS_ONCE(ads->ctl11) = 0;
34874 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
34875 - ACCESS_ONCE(ads->ctl15) = 0;
34876 - ACCESS_ONCE(ads->ctl16) = 0;
34877 - ACCESS_ONCE(ads->ctl17) = ctl17;
34878 - ACCESS_ONCE(ads->ctl18) = 0;
34879 - ACCESS_ONCE(ads->ctl19) = 0;
34880 + ACCESS_ONCE_RW(ads->ctl11) = 0;
34881 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
34882 + ACCESS_ONCE_RW(ads->ctl15) = 0;
34883 + ACCESS_ONCE_RW(ads->ctl16) = 0;
34884 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
34885 + ACCESS_ONCE_RW(ads->ctl18) = 0;
34886 + ACCESS_ONCE_RW(ads->ctl19) = 0;
34887 return;
34888 }
34889
34890 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
34891 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
34892 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
34893 | SM(i->txpower, AR_XmitPower)
34894 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
34895 @@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
34896 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
34897 ctl12 |= SM(val, AR_PAPRDChainMask);
34898
34899 - ACCESS_ONCE(ads->ctl12) = ctl12;
34900 - ACCESS_ONCE(ads->ctl17) = ctl17;
34901 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
34902 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
34903
34904 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
34905 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
34906 | set11nPktDurRTSCTS(i->rates, 1);
34907
34908 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
34909 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
34910 | set11nPktDurRTSCTS(i->rates, 3);
34911
34912 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
34913 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
34914 | set11nRateFlags(i->rates, 1)
34915 | set11nRateFlags(i->rates, 2)
34916 | set11nRateFlags(i->rates, 3)
34917 | SM(i->rtscts_rate, AR_RTSCTSRate);
34918
34919 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
34920 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
34921 }
34922
34923 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
34924 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
34925 index 1bd8edf..10c6d30 100644
34926 --- a/drivers/net/wireless/ath/ath9k/hw.h
34927 +++ b/drivers/net/wireless/ath/ath9k/hw.h
34928 @@ -605,7 +605,7 @@ struct ath_hw_private_ops {
34929
34930 /* ANI */
34931 void (*ani_cache_ini_regs)(struct ath_hw *ah);
34932 -};
34933 +} __no_const;
34934
34935 /**
34936 * struct ath_hw_ops - callbacks used by hardware code and driver code
34937 @@ -635,7 +635,7 @@ struct ath_hw_ops {
34938 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
34939 struct ath_hw_antcomb_conf *antconf);
34940
34941 -};
34942 +} __no_const;
34943
34944 struct ath_nf_limits {
34945 s16 max;
34946 @@ -655,7 +655,7 @@ enum ath_cal_list {
34947 #define AH_FASTCC 0x4
34948
34949 struct ath_hw {
34950 - struct ath_ops reg_ops;
34951 + ath_ops_no_const reg_ops;
34952
34953 struct ieee80211_hw *hw;
34954 struct ath_common common;
34955 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
34956 index bea8524..c677c06 100644
34957 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
34958 +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
34959 @@ -547,7 +547,7 @@ struct phy_func_ptr {
34960 void (*carrsuppr)(struct brcms_phy *);
34961 s32 (*rxsigpwr)(struct brcms_phy *, s32);
34962 void (*detach)(struct brcms_phy *);
34963 -};
34964 +} __no_const;
34965
34966 struct brcms_phy {
34967 struct brcms_phy_pub pubpi_ro;
34968 diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
34969 index 05f2ad1..ae00eea 100644
34970 --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
34971 +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
34972 @@ -3685,7 +3685,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
34973 */
34974 if (iwl3945_mod_params.disable_hw_scan) {
34975 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
34976 - iwl3945_hw_ops.hw_scan = NULL;
34977 + pax_open_kernel();
34978 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
34979 + pax_close_kernel();
34980 }
34981
34982 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
34983 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
34984 index 69a77e2..552b42c 100644
34985 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
34986 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
34987 @@ -71,8 +71,8 @@ do { \
34988 } while (0)
34989
34990 #else
34991 -#define IWL_DEBUG(m, level, fmt, args...)
34992 -#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
34993 +#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
34994 +#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
34995 #define iwl_print_hex_dump(m, level, p, len)
34996 #endif /* CONFIG_IWLWIFI_DEBUG */
34997
34998 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
34999 index 523ad55..f8c5dc5 100644
35000 --- a/drivers/net/wireless/mac80211_hwsim.c
35001 +++ b/drivers/net/wireless/mac80211_hwsim.c
35002 @@ -1678,9 +1678,11 @@ static int __init init_mac80211_hwsim(void)
35003 return -EINVAL;
35004
35005 if (fake_hw_scan) {
35006 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35007 - mac80211_hwsim_ops.sw_scan_start = NULL;
35008 - mac80211_hwsim_ops.sw_scan_complete = NULL;
35009 + pax_open_kernel();
35010 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35011 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
35012 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
35013 + pax_close_kernel();
35014 }
35015
35016 spin_lock_init(&hwsim_radio_lock);
35017 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
35018 index 30f138b..c904585 100644
35019 --- a/drivers/net/wireless/mwifiex/main.h
35020 +++ b/drivers/net/wireless/mwifiex/main.h
35021 @@ -543,7 +543,7 @@ struct mwifiex_if_ops {
35022 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
35023 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
35024 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
35025 -};
35026 +} __no_const;
35027
35028 struct mwifiex_adapter {
35029 u8 iface_type;
35030 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
35031 index 0c13840..a5c3ed6 100644
35032 --- a/drivers/net/wireless/rndis_wlan.c
35033 +++ b/drivers/net/wireless/rndis_wlan.c
35034 @@ -1275,7 +1275,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
35035
35036 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
35037
35038 - if (rts_threshold < 0 || rts_threshold > 2347)
35039 + if (rts_threshold > 2347)
35040 rts_threshold = 2347;
35041
35042 tmp = cpu_to_le32(rts_threshold);
35043 diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
35044 index a77f1bb..c608b2b 100644
35045 --- a/drivers/net/wireless/wl1251/wl1251.h
35046 +++ b/drivers/net/wireless/wl1251/wl1251.h
35047 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
35048 void (*reset)(struct wl1251 *wl);
35049 void (*enable_irq)(struct wl1251 *wl);
35050 void (*disable_irq)(struct wl1251 *wl);
35051 -};
35052 +} __no_const;
35053
35054 struct wl1251 {
35055 struct ieee80211_hw *hw;
35056 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
35057 index f34b5b2..b5abb9f 100644
35058 --- a/drivers/oprofile/buffer_sync.c
35059 +++ b/drivers/oprofile/buffer_sync.c
35060 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
35061 if (cookie == NO_COOKIE)
35062 offset = pc;
35063 if (cookie == INVALID_COOKIE) {
35064 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35065 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35066 offset = pc;
35067 }
35068 if (cookie != last_cookie) {
35069 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
35070 /* add userspace sample */
35071
35072 if (!mm) {
35073 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
35074 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35075 return 0;
35076 }
35077
35078 cookie = lookup_dcookie(mm, s->eip, &offset);
35079
35080 if (cookie == INVALID_COOKIE) {
35081 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35082 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35083 return 0;
35084 }
35085
35086 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
35087 /* ignore backtraces if failed to add a sample */
35088 if (state == sb_bt_start) {
35089 state = sb_bt_ignore;
35090 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35091 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35092 }
35093 }
35094 release_mm(mm);
35095 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
35096 index c0cc4e7..44d4e54 100644
35097 --- a/drivers/oprofile/event_buffer.c
35098 +++ b/drivers/oprofile/event_buffer.c
35099 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
35100 }
35101
35102 if (buffer_pos == buffer_size) {
35103 - atomic_inc(&oprofile_stats.event_lost_overflow);
35104 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35105 return;
35106 }
35107
35108 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
35109 index f8c752e..28bf4fc 100644
35110 --- a/drivers/oprofile/oprof.c
35111 +++ b/drivers/oprofile/oprof.c
35112 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
35113 if (oprofile_ops.switch_events())
35114 return;
35115
35116 - atomic_inc(&oprofile_stats.multiplex_counter);
35117 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35118 start_switch_worker();
35119 }
35120
35121 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
35122 index 917d28e..d62d981 100644
35123 --- a/drivers/oprofile/oprofile_stats.c
35124 +++ b/drivers/oprofile/oprofile_stats.c
35125 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35126 cpu_buf->sample_invalid_eip = 0;
35127 }
35128
35129 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35130 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35131 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
35132 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35133 - atomic_set(&oprofile_stats.multiplex_counter, 0);
35134 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35135 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35136 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35137 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35138 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35139 }
35140
35141
35142 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
35143 index 38b6fc0..b5cbfce 100644
35144 --- a/drivers/oprofile/oprofile_stats.h
35145 +++ b/drivers/oprofile/oprofile_stats.h
35146 @@ -13,11 +13,11 @@
35147 #include <linux/atomic.h>
35148
35149 struct oprofile_stat_struct {
35150 - atomic_t sample_lost_no_mm;
35151 - atomic_t sample_lost_no_mapping;
35152 - atomic_t bt_lost_no_mapping;
35153 - atomic_t event_lost_overflow;
35154 - atomic_t multiplex_counter;
35155 + atomic_unchecked_t sample_lost_no_mm;
35156 + atomic_unchecked_t sample_lost_no_mapping;
35157 + atomic_unchecked_t bt_lost_no_mapping;
35158 + atomic_unchecked_t event_lost_overflow;
35159 + atomic_unchecked_t multiplex_counter;
35160 };
35161
35162 extern struct oprofile_stat_struct oprofile_stats;
35163 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
35164 index 2f0aa0f..90fab02 100644
35165 --- a/drivers/oprofile/oprofilefs.c
35166 +++ b/drivers/oprofile/oprofilefs.c
35167 @@ -193,7 +193,7 @@ static const struct file_operations atomic_ro_fops = {
35168
35169
35170 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35171 - char const *name, atomic_t *val)
35172 + char const *name, atomic_unchecked_t *val)
35173 {
35174 return __oprofilefs_create_file(sb, root, name,
35175 &atomic_ro_fops, 0444, val);
35176 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
35177 index 3f56bc0..707d642 100644
35178 --- a/drivers/parport/procfs.c
35179 +++ b/drivers/parport/procfs.c
35180 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
35181
35182 *ppos += len;
35183
35184 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35185 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35186 }
35187
35188 #ifdef CONFIG_PARPORT_1284
35189 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
35190
35191 *ppos += len;
35192
35193 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35194 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35195 }
35196 #endif /* IEEE1284.3 support. */
35197
35198 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
35199 index 9fff878..ad0ad53 100644
35200 --- a/drivers/pci/hotplug/cpci_hotplug.h
35201 +++ b/drivers/pci/hotplug/cpci_hotplug.h
35202 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35203 int (*hardware_test) (struct slot* slot, u32 value);
35204 u8 (*get_power) (struct slot* slot);
35205 int (*set_power) (struct slot* slot, int value);
35206 -};
35207 +} __no_const;
35208
35209 struct cpci_hp_controller {
35210 unsigned int irq;
35211 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
35212 index 76ba8a1..20ca857 100644
35213 --- a/drivers/pci/hotplug/cpqphp_nvram.c
35214 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
35215 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
35216
35217 void compaq_nvram_init (void __iomem *rom_start)
35218 {
35219 +
35220 +#ifndef CONFIG_PAX_KERNEXEC
35221 if (rom_start) {
35222 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35223 }
35224 +#endif
35225 +
35226 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35227
35228 /* initialize our int15 lock */
35229 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
35230 index 1cfbf22..be96487 100644
35231 --- a/drivers/pci/pcie/aspm.c
35232 +++ b/drivers/pci/pcie/aspm.c
35233 @@ -27,9 +27,9 @@
35234 #define MODULE_PARAM_PREFIX "pcie_aspm."
35235
35236 /* Note: those are not register definitions */
35237 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35238 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35239 -#define ASPM_STATE_L1 (4) /* L1 state */
35240 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35241 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35242 +#define ASPM_STATE_L1 (4U) /* L1 state */
35243 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35244 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35245
35246 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
35247 index dfee1b3..a454fb6 100644
35248 --- a/drivers/pci/probe.c
35249 +++ b/drivers/pci/probe.c
35250 @@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
35251 u32 l, sz, mask;
35252 u16 orig_cmd;
35253
35254 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
35255 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
35256
35257 if (!dev->mmio_always_on) {
35258 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
35259 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
35260 index 27911b5..5b6db88 100644
35261 --- a/drivers/pci/proc.c
35262 +++ b/drivers/pci/proc.c
35263 @@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
35264 static int __init pci_proc_init(void)
35265 {
35266 struct pci_dev *dev = NULL;
35267 +
35268 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
35269 +#ifdef CONFIG_GRKERNSEC_PROC_USER
35270 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
35271 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35272 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35273 +#endif
35274 +#else
35275 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35276 +#endif
35277 proc_create("devices", 0, proc_bus_pci_dir,
35278 &proc_bus_pci_dev_operations);
35279 proc_initialized = 1;
35280 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
35281 index 7b82868..b9344c9 100644
35282 --- a/drivers/platform/x86/thinkpad_acpi.c
35283 +++ b/drivers/platform/x86/thinkpad_acpi.c
35284 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
35285 return 0;
35286 }
35287
35288 -void static hotkey_mask_warn_incomplete_mask(void)
35289 +static void hotkey_mask_warn_incomplete_mask(void)
35290 {
35291 /* log only what the user can fix... */
35292 const u32 wantedmask = hotkey_driver_mask &
35293 @@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
35294 }
35295 }
35296
35297 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35298 - struct tp_nvram_state *newn,
35299 - const u32 event_mask)
35300 -{
35301 -
35302 #define TPACPI_COMPARE_KEY(__scancode, __member) \
35303 do { \
35304 if ((event_mask & (1 << __scancode)) && \
35305 @@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35306 tpacpi_hotkey_send_key(__scancode); \
35307 } while (0)
35308
35309 - void issue_volchange(const unsigned int oldvol,
35310 - const unsigned int newvol)
35311 - {
35312 - unsigned int i = oldvol;
35313 +static void issue_volchange(const unsigned int oldvol,
35314 + const unsigned int newvol,
35315 + const u32 event_mask)
35316 +{
35317 + unsigned int i = oldvol;
35318
35319 - while (i > newvol) {
35320 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35321 - i--;
35322 - }
35323 - while (i < newvol) {
35324 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35325 - i++;
35326 - }
35327 + while (i > newvol) {
35328 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35329 + i--;
35330 }
35331 + while (i < newvol) {
35332 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35333 + i++;
35334 + }
35335 +}
35336
35337 - void issue_brightnesschange(const unsigned int oldbrt,
35338 - const unsigned int newbrt)
35339 - {
35340 - unsigned int i = oldbrt;
35341 +static void issue_brightnesschange(const unsigned int oldbrt,
35342 + const unsigned int newbrt,
35343 + const u32 event_mask)
35344 +{
35345 + unsigned int i = oldbrt;
35346
35347 - while (i > newbrt) {
35348 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35349 - i--;
35350 - }
35351 - while (i < newbrt) {
35352 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35353 - i++;
35354 - }
35355 + while (i > newbrt) {
35356 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35357 + i--;
35358 + }
35359 + while (i < newbrt) {
35360 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35361 + i++;
35362 }
35363 +}
35364
35365 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35366 + struct tp_nvram_state *newn,
35367 + const u32 event_mask)
35368 +{
35369 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
35370 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
35371 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
35372 @@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35373 oldn->volume_level != newn->volume_level) {
35374 /* recently muted, or repeated mute keypress, or
35375 * multiple presses ending in mute */
35376 - issue_volchange(oldn->volume_level, newn->volume_level);
35377 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35378 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
35379 }
35380 } else {
35381 @@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35382 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35383 }
35384 if (oldn->volume_level != newn->volume_level) {
35385 - issue_volchange(oldn->volume_level, newn->volume_level);
35386 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35387 } else if (oldn->volume_toggle != newn->volume_toggle) {
35388 /* repeated vol up/down keypress at end of scale ? */
35389 if (newn->volume_level == 0)
35390 @@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35391 /* handle brightness */
35392 if (oldn->brightness_level != newn->brightness_level) {
35393 issue_brightnesschange(oldn->brightness_level,
35394 - newn->brightness_level);
35395 + newn->brightness_level,
35396 + event_mask);
35397 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
35398 /* repeated key presses that didn't change state */
35399 if (newn->brightness_level == 0)
35400 @@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35401 && !tp_features.bright_unkfw)
35402 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35403 }
35404 +}
35405
35406 #undef TPACPI_COMPARE_KEY
35407 #undef TPACPI_MAY_SEND_KEY
35408 -}
35409
35410 /*
35411 * Polling driver
35412 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
35413 index b859d16..5cc6b1a 100644
35414 --- a/drivers/pnp/pnpbios/bioscalls.c
35415 +++ b/drivers/pnp/pnpbios/bioscalls.c
35416 @@ -59,7 +59,7 @@ do { \
35417 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
35418 } while(0)
35419
35420 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
35421 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
35422 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
35423
35424 /*
35425 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35426
35427 cpu = get_cpu();
35428 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
35429 +
35430 + pax_open_kernel();
35431 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
35432 + pax_close_kernel();
35433
35434 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
35435 spin_lock_irqsave(&pnp_bios_lock, flags);
35436 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35437 :"memory");
35438 spin_unlock_irqrestore(&pnp_bios_lock, flags);
35439
35440 + pax_open_kernel();
35441 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
35442 + pax_close_kernel();
35443 +
35444 put_cpu();
35445
35446 /* If we get here and this is set then the PnP BIOS faulted on us. */
35447 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
35448 return status;
35449 }
35450
35451 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
35452 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
35453 {
35454 int i;
35455
35456 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35457 pnp_bios_callpoint.offset = header->fields.pm16offset;
35458 pnp_bios_callpoint.segment = PNP_CS16;
35459
35460 + pax_open_kernel();
35461 +
35462 for_each_possible_cpu(i) {
35463 struct desc_struct *gdt = get_cpu_gdt_table(i);
35464 if (!gdt)
35465 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35466 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
35467 (unsigned long)__va(header->fields.pm16dseg));
35468 }
35469 +
35470 + pax_close_kernel();
35471 }
35472 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
35473 index b0ecacb..7c9da2e 100644
35474 --- a/drivers/pnp/resource.c
35475 +++ b/drivers/pnp/resource.c
35476 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
35477 return 1;
35478
35479 /* check if the resource is valid */
35480 - if (*irq < 0 || *irq > 15)
35481 + if (*irq > 15)
35482 return 0;
35483
35484 /* check if the resource is reserved */
35485 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
35486 return 1;
35487
35488 /* check if the resource is valid */
35489 - if (*dma < 0 || *dma == 4 || *dma > 7)
35490 + if (*dma == 4 || *dma > 7)
35491 return 0;
35492
35493 /* check if the resource is reserved */
35494 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
35495 index bb16f5b..c751eef 100644
35496 --- a/drivers/power/bq27x00_battery.c
35497 +++ b/drivers/power/bq27x00_battery.c
35498 @@ -67,7 +67,7 @@
35499 struct bq27x00_device_info;
35500 struct bq27x00_access_methods {
35501 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
35502 -};
35503 +} __no_const;
35504
35505 enum bq27x00_chip { BQ27000, BQ27500 };
35506
35507 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
35508 index 33f5d9a..d957d3f 100644
35509 --- a/drivers/regulator/max8660.c
35510 +++ b/drivers/regulator/max8660.c
35511 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
35512 max8660->shadow_regs[MAX8660_OVER1] = 5;
35513 } else {
35514 /* Otherwise devices can be toggled via software */
35515 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
35516 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
35517 + pax_open_kernel();
35518 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
35519 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
35520 + pax_close_kernel();
35521 }
35522
35523 /*
35524 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
35525 index 023d17d..74ef35b 100644
35526 --- a/drivers/regulator/mc13892-regulator.c
35527 +++ b/drivers/regulator/mc13892-regulator.c
35528 @@ -565,10 +565,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
35529 }
35530 mc13xxx_unlock(mc13892);
35531
35532 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35533 + pax_open_kernel();
35534 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35535 = mc13892_vcam_set_mode;
35536 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35537 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35538 = mc13892_vcam_get_mode;
35539 + pax_close_kernel();
35540 for (i = 0; i < pdata->num_regulators; i++) {
35541 init_data = &pdata->regulators[i];
35542 priv->regulators[i] = regulator_register(
35543 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
35544 index cace6d3..f623fda 100644
35545 --- a/drivers/rtc/rtc-dev.c
35546 +++ b/drivers/rtc/rtc-dev.c
35547 @@ -14,6 +14,7 @@
35548 #include <linux/module.h>
35549 #include <linux/rtc.h>
35550 #include <linux/sched.h>
35551 +#include <linux/grsecurity.h>
35552 #include "rtc-core.h"
35553
35554 static dev_t rtc_devt;
35555 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
35556 if (copy_from_user(&tm, uarg, sizeof(tm)))
35557 return -EFAULT;
35558
35559 + gr_log_timechange();
35560 +
35561 return rtc_set_time(rtc, &tm);
35562
35563 case RTC_PIE_ON:
35564 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
35565 index ffb5878..e6d785c 100644
35566 --- a/drivers/scsi/aacraid/aacraid.h
35567 +++ b/drivers/scsi/aacraid/aacraid.h
35568 @@ -492,7 +492,7 @@ struct adapter_ops
35569 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
35570 /* Administrative operations */
35571 int (*adapter_comm)(struct aac_dev * dev, int comm);
35572 -};
35573 +} __no_const;
35574
35575 /*
35576 * Define which interrupt handler needs to be installed
35577 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
35578 index 705e13e..91c873c 100644
35579 --- a/drivers/scsi/aacraid/linit.c
35580 +++ b/drivers/scsi/aacraid/linit.c
35581 @@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
35582 #elif defined(__devinitconst)
35583 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35584 #else
35585 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
35586 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35587 #endif
35588 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
35589 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
35590 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
35591 index d5ff142..49c0ebb 100644
35592 --- a/drivers/scsi/aic94xx/aic94xx_init.c
35593 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
35594 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
35595 .lldd_control_phy = asd_control_phy,
35596 };
35597
35598 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
35599 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
35600 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
35601 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
35602 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
35603 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
35604 index a796de9..1ef20e1 100644
35605 --- a/drivers/scsi/bfa/bfa.h
35606 +++ b/drivers/scsi/bfa/bfa.h
35607 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
35608 u32 *end);
35609 int cpe_vec_q0;
35610 int rme_vec_q0;
35611 -};
35612 +} __no_const;
35613 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
35614
35615 struct bfa_faa_cbfn_s {
35616 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
35617 index e07bd47..cd1bbbb 100644
35618 --- a/drivers/scsi/bfa/bfa_fcpim.c
35619 +++ b/drivers/scsi/bfa/bfa_fcpim.c
35620 @@ -4121,7 +4121,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
35621
35622 bfa_iotag_attach(fcp);
35623
35624 - fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
35625 + fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
35626 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
35627 (fcp->num_itns * sizeof(struct bfa_itn_s));
35628 memset(fcp->itn_arr, 0,
35629 @@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
35630 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
35631 {
35632 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
35633 - struct bfa_itn_s *itn;
35634 + bfa_itn_s_no_const *itn;
35635
35636 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
35637 itn->isr = isr;
35638 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
35639 index 1080bcb..a3b39e3 100644
35640 --- a/drivers/scsi/bfa/bfa_fcpim.h
35641 +++ b/drivers/scsi/bfa/bfa_fcpim.h
35642 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
35643 struct bfa_itn_s {
35644 bfa_isr_func_t isr;
35645 };
35646 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
35647
35648 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
35649 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
35650 @@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
35651 struct list_head iotag_tio_free_q; /* free IO resources */
35652 struct list_head iotag_unused_q; /* unused IO resources*/
35653 struct bfa_iotag_s *iotag_arr;
35654 - struct bfa_itn_s *itn_arr;
35655 + bfa_itn_s_no_const *itn_arr;
35656 int num_ioim_reqs;
35657 int num_fwtio_reqs;
35658 int num_itns;
35659 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
35660 index 546d46b..642fa5b 100644
35661 --- a/drivers/scsi/bfa/bfa_ioc.h
35662 +++ b/drivers/scsi/bfa/bfa_ioc.h
35663 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
35664 bfa_ioc_disable_cbfn_t disable_cbfn;
35665 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
35666 bfa_ioc_reset_cbfn_t reset_cbfn;
35667 -};
35668 +} __no_const;
35669
35670 /*
35671 * IOC event notification mechanism.
35672 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
35673 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
35674 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
35675 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
35676 -};
35677 +} __no_const;
35678
35679 /*
35680 * Queue element to wait for room in request queue. FIFO order is
35681 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
35682 index 351dc0b..951dc32 100644
35683 --- a/drivers/scsi/hosts.c
35684 +++ b/drivers/scsi/hosts.c
35685 @@ -42,7 +42,7 @@
35686 #include "scsi_logging.h"
35687
35688
35689 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
35690 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
35691
35692
35693 static void scsi_host_cls_release(struct device *dev)
35694 @@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
35695 * subtract one because we increment first then return, but we need to
35696 * know what the next host number was before increment
35697 */
35698 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
35699 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
35700 shost->dma_channel = 0xff;
35701
35702 /* These three are default values which can be overridden */
35703 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
35704 index 865d452..e9b7fa7 100644
35705 --- a/drivers/scsi/hpsa.c
35706 +++ b/drivers/scsi/hpsa.c
35707 @@ -505,7 +505,7 @@ static inline u32 next_command(struct ctlr_info *h)
35708 u32 a;
35709
35710 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
35711 - return h->access.command_completed(h);
35712 + return h->access->command_completed(h);
35713
35714 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
35715 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
35716 @@ -2989,7 +2989,7 @@ static void start_io(struct ctlr_info *h)
35717 while (!list_empty(&h->reqQ)) {
35718 c = list_entry(h->reqQ.next, struct CommandList, list);
35719 /* can't do anything if fifo is full */
35720 - if ((h->access.fifo_full(h))) {
35721 + if ((h->access->fifo_full(h))) {
35722 dev_warn(&h->pdev->dev, "fifo full\n");
35723 break;
35724 }
35725 @@ -2999,7 +2999,7 @@ static void start_io(struct ctlr_info *h)
35726 h->Qdepth--;
35727
35728 /* Tell the controller execute command */
35729 - h->access.submit_command(h, c);
35730 + h->access->submit_command(h, c);
35731
35732 /* Put job onto the completed Q */
35733 addQ(&h->cmpQ, c);
35734 @@ -3008,17 +3008,17 @@ static void start_io(struct ctlr_info *h)
35735
35736 static inline unsigned long get_next_completion(struct ctlr_info *h)
35737 {
35738 - return h->access.command_completed(h);
35739 + return h->access->command_completed(h);
35740 }
35741
35742 static inline bool interrupt_pending(struct ctlr_info *h)
35743 {
35744 - return h->access.intr_pending(h);
35745 + return h->access->intr_pending(h);
35746 }
35747
35748 static inline long interrupt_not_for_us(struct ctlr_info *h)
35749 {
35750 - return (h->access.intr_pending(h) == 0) ||
35751 + return (h->access->intr_pending(h) == 0) ||
35752 (h->interrupts_enabled == 0);
35753 }
35754
35755 @@ -3917,7 +3917,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
35756 if (prod_index < 0)
35757 return -ENODEV;
35758 h->product_name = products[prod_index].product_name;
35759 - h->access = *(products[prod_index].access);
35760 + h->access = products[prod_index].access;
35761
35762 if (hpsa_board_disabled(h->pdev)) {
35763 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
35764 @@ -4162,7 +4162,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
35765
35766 assert_spin_locked(&lockup_detector_lock);
35767 remove_ctlr_from_lockup_detector_list(h);
35768 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35769 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35770 spin_lock_irqsave(&h->lock, flags);
35771 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
35772 spin_unlock_irqrestore(&h->lock, flags);
35773 @@ -4340,7 +4340,7 @@ reinit_after_soft_reset:
35774 }
35775
35776 /* make sure the board interrupts are off */
35777 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35778 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35779
35780 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
35781 goto clean2;
35782 @@ -4374,7 +4374,7 @@ reinit_after_soft_reset:
35783 * fake ones to scoop up any residual completions.
35784 */
35785 spin_lock_irqsave(&h->lock, flags);
35786 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35787 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35788 spin_unlock_irqrestore(&h->lock, flags);
35789 free_irq(h->intr[h->intr_mode], h);
35790 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
35791 @@ -4393,9 +4393,9 @@ reinit_after_soft_reset:
35792 dev_info(&h->pdev->dev, "Board READY.\n");
35793 dev_info(&h->pdev->dev,
35794 "Waiting for stale completions to drain.\n");
35795 - h->access.set_intr_mask(h, HPSA_INTR_ON);
35796 + h->access->set_intr_mask(h, HPSA_INTR_ON);
35797 msleep(10000);
35798 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35799 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35800
35801 rc = controller_reset_failed(h->cfgtable);
35802 if (rc)
35803 @@ -4416,7 +4416,7 @@ reinit_after_soft_reset:
35804 }
35805
35806 /* Turn the interrupts on so we can service requests */
35807 - h->access.set_intr_mask(h, HPSA_INTR_ON);
35808 + h->access->set_intr_mask(h, HPSA_INTR_ON);
35809
35810 hpsa_hba_inquiry(h);
35811 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
35812 @@ -4468,7 +4468,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
35813 * To write all data in the battery backed cache to disks
35814 */
35815 hpsa_flush_cache(h);
35816 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35817 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35818 free_irq(h->intr[h->intr_mode], h);
35819 #ifdef CONFIG_PCI_MSI
35820 if (h->msix_vector)
35821 @@ -4632,7 +4632,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
35822 return;
35823 }
35824 /* Change the access methods to the performant access methods */
35825 - h->access = SA5_performant_access;
35826 + h->access = &SA5_performant_access;
35827 h->transMethod = CFGTBL_Trans_Performant;
35828 }
35829
35830 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
35831 index 91edafb..a9b88ec 100644
35832 --- a/drivers/scsi/hpsa.h
35833 +++ b/drivers/scsi/hpsa.h
35834 @@ -73,7 +73,7 @@ struct ctlr_info {
35835 unsigned int msix_vector;
35836 unsigned int msi_vector;
35837 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
35838 - struct access_method access;
35839 + struct access_method *access;
35840
35841 /* queue and queue Info */
35842 struct list_head reqQ;
35843 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
35844 index f2df059..a3a9930 100644
35845 --- a/drivers/scsi/ips.h
35846 +++ b/drivers/scsi/ips.h
35847 @@ -1027,7 +1027,7 @@ typedef struct {
35848 int (*intr)(struct ips_ha *);
35849 void (*enableint)(struct ips_ha *);
35850 uint32_t (*statupd)(struct ips_ha *);
35851 -} ips_hw_func_t;
35852 +} __no_const ips_hw_func_t;
35853
35854 typedef struct ips_ha {
35855 uint8_t ha_id[IPS_MAX_CHANNELS+1];
35856 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
35857 index 9de9db2..1e09660 100644
35858 --- a/drivers/scsi/libfc/fc_exch.c
35859 +++ b/drivers/scsi/libfc/fc_exch.c
35860 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
35861 * all together if not used XXX
35862 */
35863 struct {
35864 - atomic_t no_free_exch;
35865 - atomic_t no_free_exch_xid;
35866 - atomic_t xid_not_found;
35867 - atomic_t xid_busy;
35868 - atomic_t seq_not_found;
35869 - atomic_t non_bls_resp;
35870 + atomic_unchecked_t no_free_exch;
35871 + atomic_unchecked_t no_free_exch_xid;
35872 + atomic_unchecked_t xid_not_found;
35873 + atomic_unchecked_t xid_busy;
35874 + atomic_unchecked_t seq_not_found;
35875 + atomic_unchecked_t non_bls_resp;
35876 } stats;
35877 };
35878
35879 @@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
35880 /* allocate memory for exchange */
35881 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
35882 if (!ep) {
35883 - atomic_inc(&mp->stats.no_free_exch);
35884 + atomic_inc_unchecked(&mp->stats.no_free_exch);
35885 goto out;
35886 }
35887 memset(ep, 0, sizeof(*ep));
35888 @@ -780,7 +780,7 @@ out:
35889 return ep;
35890 err:
35891 spin_unlock_bh(&pool->lock);
35892 - atomic_inc(&mp->stats.no_free_exch_xid);
35893 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
35894 mempool_free(ep, mp->ep_pool);
35895 return NULL;
35896 }
35897 @@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35898 xid = ntohs(fh->fh_ox_id); /* we originated exch */
35899 ep = fc_exch_find(mp, xid);
35900 if (!ep) {
35901 - atomic_inc(&mp->stats.xid_not_found);
35902 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35903 reject = FC_RJT_OX_ID;
35904 goto out;
35905 }
35906 @@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35907 ep = fc_exch_find(mp, xid);
35908 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
35909 if (ep) {
35910 - atomic_inc(&mp->stats.xid_busy);
35911 + atomic_inc_unchecked(&mp->stats.xid_busy);
35912 reject = FC_RJT_RX_ID;
35913 goto rel;
35914 }
35915 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35916 }
35917 xid = ep->xid; /* get our XID */
35918 } else if (!ep) {
35919 - atomic_inc(&mp->stats.xid_not_found);
35920 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35921 reject = FC_RJT_RX_ID; /* XID not found */
35922 goto out;
35923 }
35924 @@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35925 } else {
35926 sp = &ep->seq;
35927 if (sp->id != fh->fh_seq_id) {
35928 - atomic_inc(&mp->stats.seq_not_found);
35929 + atomic_inc_unchecked(&mp->stats.seq_not_found);
35930 if (f_ctl & FC_FC_END_SEQ) {
35931 /*
35932 * Update sequence_id based on incoming last
35933 @@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
35934
35935 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
35936 if (!ep) {
35937 - atomic_inc(&mp->stats.xid_not_found);
35938 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35939 goto out;
35940 }
35941 if (ep->esb_stat & ESB_ST_COMPLETE) {
35942 - atomic_inc(&mp->stats.xid_not_found);
35943 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35944 goto rel;
35945 }
35946 if (ep->rxid == FC_XID_UNKNOWN)
35947 ep->rxid = ntohs(fh->fh_rx_id);
35948 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
35949 - atomic_inc(&mp->stats.xid_not_found);
35950 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35951 goto rel;
35952 }
35953 if (ep->did != ntoh24(fh->fh_s_id) &&
35954 ep->did != FC_FID_FLOGI) {
35955 - atomic_inc(&mp->stats.xid_not_found);
35956 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35957 goto rel;
35958 }
35959 sof = fr_sof(fp);
35960 @@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
35961 sp->ssb_stat |= SSB_ST_RESP;
35962 sp->id = fh->fh_seq_id;
35963 } else if (sp->id != fh->fh_seq_id) {
35964 - atomic_inc(&mp->stats.seq_not_found);
35965 + atomic_inc_unchecked(&mp->stats.seq_not_found);
35966 goto rel;
35967 }
35968
35969 @@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
35970 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
35971
35972 if (!sp)
35973 - atomic_inc(&mp->stats.xid_not_found);
35974 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35975 else
35976 - atomic_inc(&mp->stats.non_bls_resp);
35977 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
35978
35979 fc_frame_free(fp);
35980 }
35981 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
35982 index db9238f..4378ed2 100644
35983 --- a/drivers/scsi/libsas/sas_ata.c
35984 +++ b/drivers/scsi/libsas/sas_ata.c
35985 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
35986 .postreset = ata_std_postreset,
35987 .error_handler = ata_std_error_handler,
35988 .post_internal_cmd = sas_ata_post_internal,
35989 - .qc_defer = ata_std_qc_defer,
35990 + .qc_defer = ata_std_qc_defer,
35991 .qc_prep = ata_noop_qc_prep,
35992 .qc_issue = sas_ata_qc_issue,
35993 .qc_fill_rtf = sas_ata_qc_fill_rtf,
35994 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
35995 index bb4c8e0..f33d849 100644
35996 --- a/drivers/scsi/lpfc/lpfc.h
35997 +++ b/drivers/scsi/lpfc/lpfc.h
35998 @@ -425,7 +425,7 @@ struct lpfc_vport {
35999 struct dentry *debug_nodelist;
36000 struct dentry *vport_debugfs_root;
36001 struct lpfc_debugfs_trc *disc_trc;
36002 - atomic_t disc_trc_cnt;
36003 + atomic_unchecked_t disc_trc_cnt;
36004 #endif
36005 uint8_t stat_data_enabled;
36006 uint8_t stat_data_blocked;
36007 @@ -835,8 +835,8 @@ struct lpfc_hba {
36008 struct timer_list fabric_block_timer;
36009 unsigned long bit_flags;
36010 #define FABRIC_COMANDS_BLOCKED 0
36011 - atomic_t num_rsrc_err;
36012 - atomic_t num_cmd_success;
36013 + atomic_unchecked_t num_rsrc_err;
36014 + atomic_unchecked_t num_cmd_success;
36015 unsigned long last_rsrc_error_time;
36016 unsigned long last_ramp_down_time;
36017 unsigned long last_ramp_up_time;
36018 @@ -866,7 +866,7 @@ struct lpfc_hba {
36019
36020 struct dentry *debug_slow_ring_trc;
36021 struct lpfc_debugfs_trc *slow_ring_trc;
36022 - atomic_t slow_ring_trc_cnt;
36023 + atomic_unchecked_t slow_ring_trc_cnt;
36024 /* iDiag debugfs sub-directory */
36025 struct dentry *idiag_root;
36026 struct dentry *idiag_pci_cfg;
36027 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
36028 index 2838259..a07cfb5 100644
36029 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
36030 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
36031 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
36032
36033 #include <linux/debugfs.h>
36034
36035 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36036 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36037 static unsigned long lpfc_debugfs_start_time = 0L;
36038
36039 /* iDiag */
36040 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
36041 lpfc_debugfs_enable = 0;
36042
36043 len = 0;
36044 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36045 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36046 (lpfc_debugfs_max_disc_trc - 1);
36047 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36048 dtp = vport->disc_trc + i;
36049 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
36050 lpfc_debugfs_enable = 0;
36051
36052 len = 0;
36053 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36054 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36055 (lpfc_debugfs_max_slow_ring_trc - 1);
36056 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36057 dtp = phba->slow_ring_trc + i;
36058 @@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
36059 !vport || !vport->disc_trc)
36060 return;
36061
36062 - index = atomic_inc_return(&vport->disc_trc_cnt) &
36063 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36064 (lpfc_debugfs_max_disc_trc - 1);
36065 dtp = vport->disc_trc + index;
36066 dtp->fmt = fmt;
36067 dtp->data1 = data1;
36068 dtp->data2 = data2;
36069 dtp->data3 = data3;
36070 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36071 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36072 dtp->jif = jiffies;
36073 #endif
36074 return;
36075 @@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
36076 !phba || !phba->slow_ring_trc)
36077 return;
36078
36079 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36080 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36081 (lpfc_debugfs_max_slow_ring_trc - 1);
36082 dtp = phba->slow_ring_trc + index;
36083 dtp->fmt = fmt;
36084 dtp->data1 = data1;
36085 dtp->data2 = data2;
36086 dtp->data3 = data3;
36087 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36088 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36089 dtp->jif = jiffies;
36090 #endif
36091 return;
36092 @@ -3986,7 +3986,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36093 "slow_ring buffer\n");
36094 goto debug_failed;
36095 }
36096 - atomic_set(&phba->slow_ring_trc_cnt, 0);
36097 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36098 memset(phba->slow_ring_trc, 0,
36099 (sizeof(struct lpfc_debugfs_trc) *
36100 lpfc_debugfs_max_slow_ring_trc));
36101 @@ -4032,7 +4032,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36102 "buffer\n");
36103 goto debug_failed;
36104 }
36105 - atomic_set(&vport->disc_trc_cnt, 0);
36106 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36107
36108 snprintf(name, sizeof(name), "discovery_trace");
36109 vport->debug_disc_trc =
36110 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
36111 index 55bc4fc..a2a109c 100644
36112 --- a/drivers/scsi/lpfc/lpfc_init.c
36113 +++ b/drivers/scsi/lpfc/lpfc_init.c
36114 @@ -10027,8 +10027,10 @@ lpfc_init(void)
36115 printk(LPFC_COPYRIGHT "\n");
36116
36117 if (lpfc_enable_npiv) {
36118 - lpfc_transport_functions.vport_create = lpfc_vport_create;
36119 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36120 + pax_open_kernel();
36121 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36122 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36123 + pax_close_kernel();
36124 }
36125 lpfc_transport_template =
36126 fc_attach_transport(&lpfc_transport_functions);
36127 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
36128 index 2e1e54e..1af0a0d 100644
36129 --- a/drivers/scsi/lpfc/lpfc_scsi.c
36130 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
36131 @@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
36132 uint32_t evt_posted;
36133
36134 spin_lock_irqsave(&phba->hbalock, flags);
36135 - atomic_inc(&phba->num_rsrc_err);
36136 + atomic_inc_unchecked(&phba->num_rsrc_err);
36137 phba->last_rsrc_error_time = jiffies;
36138
36139 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36140 @@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
36141 unsigned long flags;
36142 struct lpfc_hba *phba = vport->phba;
36143 uint32_t evt_posted;
36144 - atomic_inc(&phba->num_cmd_success);
36145 + atomic_inc_unchecked(&phba->num_cmd_success);
36146
36147 if (vport->cfg_lun_queue_depth <= queue_depth)
36148 return;
36149 @@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36150 unsigned long num_rsrc_err, num_cmd_success;
36151 int i;
36152
36153 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36154 - num_cmd_success = atomic_read(&phba->num_cmd_success);
36155 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36156 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36157
36158 vports = lpfc_create_vport_work_array(phba);
36159 if (vports != NULL)
36160 @@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36161 }
36162 }
36163 lpfc_destroy_vport_work_array(phba, vports);
36164 - atomic_set(&phba->num_rsrc_err, 0);
36165 - atomic_set(&phba->num_cmd_success, 0);
36166 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36167 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36168 }
36169
36170 /**
36171 @@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
36172 }
36173 }
36174 lpfc_destroy_vport_work_array(phba, vports);
36175 - atomic_set(&phba->num_rsrc_err, 0);
36176 - atomic_set(&phba->num_cmd_success, 0);
36177 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36178 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36179 }
36180
36181 /**
36182 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
36183 index 5163edb..7b142bc 100644
36184 --- a/drivers/scsi/pmcraid.c
36185 +++ b/drivers/scsi/pmcraid.c
36186 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
36187 res->scsi_dev = scsi_dev;
36188 scsi_dev->hostdata = res;
36189 res->change_detected = 0;
36190 - atomic_set(&res->read_failures, 0);
36191 - atomic_set(&res->write_failures, 0);
36192 + atomic_set_unchecked(&res->read_failures, 0);
36193 + atomic_set_unchecked(&res->write_failures, 0);
36194 rc = 0;
36195 }
36196 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
36197 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
36198
36199 /* If this was a SCSI read/write command keep count of errors */
36200 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36201 - atomic_inc(&res->read_failures);
36202 + atomic_inc_unchecked(&res->read_failures);
36203 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36204 - atomic_inc(&res->write_failures);
36205 + atomic_inc_unchecked(&res->write_failures);
36206
36207 if (!RES_IS_GSCSI(res->cfg_entry) &&
36208 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
36209 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
36210 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36211 * hrrq_id assigned here in queuecommand
36212 */
36213 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36214 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36215 pinstance->num_hrrq;
36216 cmd->cmd_done = pmcraid_io_done;
36217
36218 @@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
36219 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36220 * hrrq_id assigned here in queuecommand
36221 */
36222 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36223 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36224 pinstance->num_hrrq;
36225
36226 if (request_size) {
36227 @@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
36228
36229 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36230 /* add resources only after host is added into system */
36231 - if (!atomic_read(&pinstance->expose_resources))
36232 + if (!atomic_read_unchecked(&pinstance->expose_resources))
36233 return;
36234
36235 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
36236 @@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
36237 init_waitqueue_head(&pinstance->reset_wait_q);
36238
36239 atomic_set(&pinstance->outstanding_cmds, 0);
36240 - atomic_set(&pinstance->last_message_id, 0);
36241 - atomic_set(&pinstance->expose_resources, 0);
36242 + atomic_set_unchecked(&pinstance->last_message_id, 0);
36243 + atomic_set_unchecked(&pinstance->expose_resources, 0);
36244
36245 INIT_LIST_HEAD(&pinstance->free_res_q);
36246 INIT_LIST_HEAD(&pinstance->used_res_q);
36247 @@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
36248 /* Schedule worker thread to handle CCN and take care of adding and
36249 * removing devices to OS
36250 */
36251 - atomic_set(&pinstance->expose_resources, 1);
36252 + atomic_set_unchecked(&pinstance->expose_resources, 1);
36253 schedule_work(&pinstance->worker_q);
36254 return rc;
36255
36256 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
36257 index ca496c7..9c791d5 100644
36258 --- a/drivers/scsi/pmcraid.h
36259 +++ b/drivers/scsi/pmcraid.h
36260 @@ -748,7 +748,7 @@ struct pmcraid_instance {
36261 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
36262
36263 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
36264 - atomic_t last_message_id;
36265 + atomic_unchecked_t last_message_id;
36266
36267 /* configuration table */
36268 struct pmcraid_config_table *cfg_table;
36269 @@ -777,7 +777,7 @@ struct pmcraid_instance {
36270 atomic_t outstanding_cmds;
36271
36272 /* should add/delete resources to mid-layer now ?*/
36273 - atomic_t expose_resources;
36274 + atomic_unchecked_t expose_resources;
36275
36276
36277
36278 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
36279 struct pmcraid_config_table_entry_ext cfg_entry_ext;
36280 };
36281 struct scsi_device *scsi_dev; /* Link scsi_device structure */
36282 - atomic_t read_failures; /* count of failed READ commands */
36283 - atomic_t write_failures; /* count of failed WRITE commands */
36284 + atomic_unchecked_t read_failures; /* count of failed READ commands */
36285 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
36286
36287 /* To indicate add/delete/modify during CCN */
36288 u8 change_detected;
36289 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
36290 index fcf052c..a8025a4 100644
36291 --- a/drivers/scsi/qla2xxx/qla_def.h
36292 +++ b/drivers/scsi/qla2xxx/qla_def.h
36293 @@ -2244,7 +2244,7 @@ struct isp_operations {
36294 int (*get_flash_version) (struct scsi_qla_host *, void *);
36295 int (*start_scsi) (srb_t *);
36296 int (*abort_isp) (struct scsi_qla_host *);
36297 -};
36298 +} __no_const;
36299
36300 /* MSI-X Support *************************************************************/
36301
36302 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
36303 index fd5edc6..4906148 100644
36304 --- a/drivers/scsi/qla4xxx/ql4_def.h
36305 +++ b/drivers/scsi/qla4xxx/ql4_def.h
36306 @@ -258,7 +258,7 @@ struct ddb_entry {
36307 * (4000 only) */
36308 atomic_t relogin_timer; /* Max Time to wait for
36309 * relogin to complete */
36310 - atomic_t relogin_retry_count; /* Num of times relogin has been
36311 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
36312 * retried */
36313 uint32_t default_time2wait; /* Default Min time between
36314 * relogins (+aens) */
36315 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
36316 index 4169c8b..a8b896b 100644
36317 --- a/drivers/scsi/qla4xxx/ql4_os.c
36318 +++ b/drivers/scsi/qla4xxx/ql4_os.c
36319 @@ -2104,12 +2104,12 @@ void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
36320 */
36321 if (!iscsi_is_session_online(cls_sess)) {
36322 /* Reset retry relogin timer */
36323 - atomic_inc(&ddb_entry->relogin_retry_count);
36324 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
36325 DEBUG2(ql4_printk(KERN_INFO, ha,
36326 "%s: index[%d] relogin timed out-retrying"
36327 " relogin (%d), retry (%d)\n", __func__,
36328 ddb_entry->fw_ddb_index,
36329 - atomic_read(&ddb_entry->relogin_retry_count),
36330 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
36331 ddb_entry->default_time2wait + 4));
36332 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
36333 atomic_set(&ddb_entry->retry_relogin_timer,
36334 @@ -3835,7 +3835,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
36335
36336 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
36337 atomic_set(&ddb_entry->relogin_timer, 0);
36338 - atomic_set(&ddb_entry->relogin_retry_count, 0);
36339 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36340
36341 ddb_entry->default_relogin_timeout =
36342 le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
36343 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
36344 index 2aeb2e9..46e3925 100644
36345 --- a/drivers/scsi/scsi.c
36346 +++ b/drivers/scsi/scsi.c
36347 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
36348 unsigned long timeout;
36349 int rtn = 0;
36350
36351 - atomic_inc(&cmd->device->iorequest_cnt);
36352 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36353
36354 /* check if the device is still usable */
36355 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
36356 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
36357 index f85cfa6..a57c9e8 100644
36358 --- a/drivers/scsi/scsi_lib.c
36359 +++ b/drivers/scsi/scsi_lib.c
36360 @@ -1416,7 +1416,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
36361 shost = sdev->host;
36362 scsi_init_cmd_errh(cmd);
36363 cmd->result = DID_NO_CONNECT << 16;
36364 - atomic_inc(&cmd->device->iorequest_cnt);
36365 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36366
36367 /*
36368 * SCSI request completion path will do scsi_device_unbusy(),
36369 @@ -1442,9 +1442,9 @@ static void scsi_softirq_done(struct request *rq)
36370
36371 INIT_LIST_HEAD(&cmd->eh_entry);
36372
36373 - atomic_inc(&cmd->device->iodone_cnt);
36374 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
36375 if (cmd->result)
36376 - atomic_inc(&cmd->device->ioerr_cnt);
36377 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
36378
36379 disposition = scsi_decide_disposition(cmd);
36380 if (disposition != SUCCESS &&
36381 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
36382 index 04c2a27..9d8bd66 100644
36383 --- a/drivers/scsi/scsi_sysfs.c
36384 +++ b/drivers/scsi/scsi_sysfs.c
36385 @@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
36386 char *buf) \
36387 { \
36388 struct scsi_device *sdev = to_scsi_device(dev); \
36389 - unsigned long long count = atomic_read(&sdev->field); \
36390 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
36391 return snprintf(buf, 20, "0x%llx\n", count); \
36392 } \
36393 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
36394 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
36395 index 84a1fdf..693b0d6 100644
36396 --- a/drivers/scsi/scsi_tgt_lib.c
36397 +++ b/drivers/scsi/scsi_tgt_lib.c
36398 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
36399 int err;
36400
36401 dprintk("%lx %u\n", uaddr, len);
36402 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
36403 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
36404 if (err) {
36405 /*
36406 * TODO: need to fixup sg_tablesize, max_segment_size,
36407 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
36408 index 1b21491..1b7f60e 100644
36409 --- a/drivers/scsi/scsi_transport_fc.c
36410 +++ b/drivers/scsi/scsi_transport_fc.c
36411 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
36412 * Netlink Infrastructure
36413 */
36414
36415 -static atomic_t fc_event_seq;
36416 +static atomic_unchecked_t fc_event_seq;
36417
36418 /**
36419 * fc_get_event_number - Obtain the next sequential FC event number
36420 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
36421 u32
36422 fc_get_event_number(void)
36423 {
36424 - return atomic_add_return(1, &fc_event_seq);
36425 + return atomic_add_return_unchecked(1, &fc_event_seq);
36426 }
36427 EXPORT_SYMBOL(fc_get_event_number);
36428
36429 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
36430 {
36431 int error;
36432
36433 - atomic_set(&fc_event_seq, 0);
36434 + atomic_set_unchecked(&fc_event_seq, 0);
36435
36436 error = transport_class_register(&fc_host_class);
36437 if (error)
36438 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
36439 char *cp;
36440
36441 *val = simple_strtoul(buf, &cp, 0);
36442 - if ((*cp && (*cp != '\n')) || (*val < 0))
36443 + if (*cp && (*cp != '\n'))
36444 return -EINVAL;
36445 /*
36446 * Check for overflow; dev_loss_tmo is u32
36447 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
36448 index 96029e6..4d77fa0 100644
36449 --- a/drivers/scsi/scsi_transport_iscsi.c
36450 +++ b/drivers/scsi/scsi_transport_iscsi.c
36451 @@ -79,7 +79,7 @@ struct iscsi_internal {
36452 struct transport_container session_cont;
36453 };
36454
36455 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
36456 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
36457 static struct workqueue_struct *iscsi_eh_timer_workq;
36458
36459 static DEFINE_IDA(iscsi_sess_ida);
36460 @@ -1062,7 +1062,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
36461 int err;
36462
36463 ihost = shost->shost_data;
36464 - session->sid = atomic_add_return(1, &iscsi_session_nr);
36465 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
36466
36467 if (target_id == ISCSI_MAX_TARGET) {
36468 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
36469 @@ -2663,7 +2663,7 @@ static __init int iscsi_transport_init(void)
36470 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
36471 ISCSI_TRANSPORT_VERSION);
36472
36473 - atomic_set(&iscsi_session_nr, 0);
36474 + atomic_set_unchecked(&iscsi_session_nr, 0);
36475
36476 err = class_register(&iscsi_transport_class);
36477 if (err)
36478 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
36479 index 21a045e..ec89e03 100644
36480 --- a/drivers/scsi/scsi_transport_srp.c
36481 +++ b/drivers/scsi/scsi_transport_srp.c
36482 @@ -33,7 +33,7 @@
36483 #include "scsi_transport_srp_internal.h"
36484
36485 struct srp_host_attrs {
36486 - atomic_t next_port_id;
36487 + atomic_unchecked_t next_port_id;
36488 };
36489 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
36490
36491 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
36492 struct Scsi_Host *shost = dev_to_shost(dev);
36493 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
36494
36495 - atomic_set(&srp_host->next_port_id, 0);
36496 + atomic_set_unchecked(&srp_host->next_port_id, 0);
36497 return 0;
36498 }
36499
36500 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
36501 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
36502 rport->roles = ids->roles;
36503
36504 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
36505 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
36506 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
36507
36508 transport_setup_device(&rport->dev);
36509 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
36510 index 441a1c5..07cece7 100644
36511 --- a/drivers/scsi/sg.c
36512 +++ b/drivers/scsi/sg.c
36513 @@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
36514 sdp->disk->disk_name,
36515 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
36516 NULL,
36517 - (char *)arg);
36518 + (char __user *)arg);
36519 case BLKTRACESTART:
36520 return blk_trace_startstop(sdp->device->request_queue, 1);
36521 case BLKTRACESTOP:
36522 @@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
36523 const struct file_operations * fops;
36524 };
36525
36526 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
36527 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
36528 {"allow_dio", &adio_fops},
36529 {"debug", &debug_fops},
36530 {"def_reserved_size", &dressz_fops},
36531 @@ -2327,7 +2327,7 @@ sg_proc_init(void)
36532 {
36533 int k, mask;
36534 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
36535 - struct sg_proc_leaf * leaf;
36536 + const struct sg_proc_leaf * leaf;
36537
36538 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
36539 if (!sg_proc_sgp)
36540 diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
36541 index f64250e..1ee3049 100644
36542 --- a/drivers/spi/spi-dw-pci.c
36543 +++ b/drivers/spi/spi-dw-pci.c
36544 @@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
36545 #define spi_resume NULL
36546 #endif
36547
36548 -static const struct pci_device_id pci_ids[] __devinitdata = {
36549 +static const struct pci_device_id pci_ids[] __devinitconst = {
36550 /* Intel MID platform SPI controller 0 */
36551 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
36552 {},
36553 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
36554 index 77eae99..b7cdcc9 100644
36555 --- a/drivers/spi/spi.c
36556 +++ b/drivers/spi/spi.c
36557 @@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
36558 EXPORT_SYMBOL_GPL(spi_bus_unlock);
36559
36560 /* portable code must never pass more than 32 bytes */
36561 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
36562 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
36563
36564 static u8 *buf;
36565
36566 diff --git a/drivers/staging/gma500/power.c b/drivers/staging/gma500/power.c
36567 index 436fe97..4082570 100644
36568 --- a/drivers/staging/gma500/power.c
36569 +++ b/drivers/staging/gma500/power.c
36570 @@ -266,7 +266,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
36571 ret = gma_resume_pci(dev->pdev);
36572 if (ret == 0) {
36573 /* FIXME: we want to defer this for Medfield/Oaktrail */
36574 - gma_resume_display(dev);
36575 + gma_resume_display(dev->pdev);
36576 psb_irq_preinstall(dev);
36577 psb_irq_postinstall(dev);
36578 pm_runtime_get(&dev->pdev->dev);
36579 diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
36580 index bafccb3..e3ac78d 100644
36581 --- a/drivers/staging/hv/rndis_filter.c
36582 +++ b/drivers/staging/hv/rndis_filter.c
36583 @@ -42,7 +42,7 @@ struct rndis_device {
36584
36585 enum rndis_device_state state;
36586 bool link_state;
36587 - atomic_t new_req_id;
36588 + atomic_unchecked_t new_req_id;
36589
36590 spinlock_t request_lock;
36591 struct list_head req_list;
36592 @@ -116,7 +116,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
36593 * template
36594 */
36595 set = &rndis_msg->msg.set_req;
36596 - set->req_id = atomic_inc_return(&dev->new_req_id);
36597 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
36598
36599 /* Add to the request list */
36600 spin_lock_irqsave(&dev->request_lock, flags);
36601 @@ -646,7 +646,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
36602
36603 /* Setup the rndis set */
36604 halt = &request->request_msg.msg.halt_req;
36605 - halt->req_id = atomic_inc_return(&dev->new_req_id);
36606 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
36607
36608 /* Ignore return since this msg is optional. */
36609 rndis_filter_send_request(dev, request);
36610 diff --git a/drivers/staging/iio/buffer_generic.h b/drivers/staging/iio/buffer_generic.h
36611 index 9e8f010..af9efb56 100644
36612 --- a/drivers/staging/iio/buffer_generic.h
36613 +++ b/drivers/staging/iio/buffer_generic.h
36614 @@ -64,7 +64,7 @@ struct iio_buffer_access_funcs {
36615
36616 int (*is_enabled)(struct iio_buffer *buffer);
36617 int (*enable)(struct iio_buffer *buffer);
36618 -};
36619 +} __no_const;
36620
36621 /**
36622 * struct iio_buffer_setup_ops - buffer setup related callbacks
36623 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
36624 index 8b307b4..a97ac91 100644
36625 --- a/drivers/staging/octeon/ethernet-rx.c
36626 +++ b/drivers/staging/octeon/ethernet-rx.c
36627 @@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
36628 /* Increment RX stats for virtual ports */
36629 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
36630 #ifdef CONFIG_64BIT
36631 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
36632 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
36633 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
36634 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
36635 #else
36636 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
36637 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
36638 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
36639 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
36640 #endif
36641 }
36642 netif_receive_skb(skb);
36643 @@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
36644 dev->name);
36645 */
36646 #ifdef CONFIG_64BIT
36647 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
36648 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
36649 #else
36650 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
36651 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
36652 #endif
36653 dev_kfree_skb_irq(skb);
36654 }
36655 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
36656 index 076f866..2308070 100644
36657 --- a/drivers/staging/octeon/ethernet.c
36658 +++ b/drivers/staging/octeon/ethernet.c
36659 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
36660 * since the RX tasklet also increments it.
36661 */
36662 #ifdef CONFIG_64BIT
36663 - atomic64_add(rx_status.dropped_packets,
36664 - (atomic64_t *)&priv->stats.rx_dropped);
36665 + atomic64_add_unchecked(rx_status.dropped_packets,
36666 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
36667 #else
36668 - atomic_add(rx_status.dropped_packets,
36669 - (atomic_t *)&priv->stats.rx_dropped);
36670 + atomic_add_unchecked(rx_status.dropped_packets,
36671 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
36672 #endif
36673 }
36674
36675 diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
36676 index 7a19555..466456d 100644
36677 --- a/drivers/staging/pohmelfs/inode.c
36678 +++ b/drivers/staging/pohmelfs/inode.c
36679 @@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
36680 mutex_init(&psb->mcache_lock);
36681 psb->mcache_root = RB_ROOT;
36682 psb->mcache_timeout = msecs_to_jiffies(5000);
36683 - atomic_long_set(&psb->mcache_gen, 0);
36684 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
36685
36686 psb->trans_max_pages = 100;
36687
36688 @@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
36689 INIT_LIST_HEAD(&psb->crypto_ready_list);
36690 INIT_LIST_HEAD(&psb->crypto_active_list);
36691
36692 - atomic_set(&psb->trans_gen, 1);
36693 + atomic_set_unchecked(&psb->trans_gen, 1);
36694 atomic_long_set(&psb->total_inodes, 0);
36695
36696 mutex_init(&psb->state_lock);
36697 diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
36698 index e22665c..a2a9390 100644
36699 --- a/drivers/staging/pohmelfs/mcache.c
36700 +++ b/drivers/staging/pohmelfs/mcache.c
36701 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
36702 m->data = data;
36703 m->start = start;
36704 m->size = size;
36705 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
36706 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
36707
36708 mutex_lock(&psb->mcache_lock);
36709 err = pohmelfs_mcache_insert(psb, m);
36710 diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
36711 index 985b6b7..7699e05 100644
36712 --- a/drivers/staging/pohmelfs/netfs.h
36713 +++ b/drivers/staging/pohmelfs/netfs.h
36714 @@ -571,14 +571,14 @@ struct pohmelfs_config;
36715 struct pohmelfs_sb {
36716 struct rb_root mcache_root;
36717 struct mutex mcache_lock;
36718 - atomic_long_t mcache_gen;
36719 + atomic_long_unchecked_t mcache_gen;
36720 unsigned long mcache_timeout;
36721
36722 unsigned int idx;
36723
36724 unsigned int trans_retries;
36725
36726 - atomic_t trans_gen;
36727 + atomic_unchecked_t trans_gen;
36728
36729 unsigned int crypto_attached_size;
36730 unsigned int crypto_align_size;
36731 diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
36732 index 06c1a74..866eebc 100644
36733 --- a/drivers/staging/pohmelfs/trans.c
36734 +++ b/drivers/staging/pohmelfs/trans.c
36735 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
36736 int err;
36737 struct netfs_cmd *cmd = t->iovec.iov_base;
36738
36739 - t->gen = atomic_inc_return(&psb->trans_gen);
36740 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
36741
36742 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
36743 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
36744 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
36745 index 86308a0..feaa925 100644
36746 --- a/drivers/staging/rtl8712/rtl871x_io.h
36747 +++ b/drivers/staging/rtl8712/rtl871x_io.h
36748 @@ -108,7 +108,7 @@ struct _io_ops {
36749 u8 *pmem);
36750 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
36751 u8 *pmem);
36752 -};
36753 +} __no_const;
36754
36755 struct io_req {
36756 struct list_head list;
36757 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
36758 index c7b5e8b..783d6cb 100644
36759 --- a/drivers/staging/sbe-2t3e3/netdev.c
36760 +++ b/drivers/staging/sbe-2t3e3/netdev.c
36761 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
36762 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
36763
36764 if (rlen)
36765 - if (copy_to_user(data, &resp, rlen))
36766 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
36767 return -EFAULT;
36768
36769 return 0;
36770 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
36771 index be21617..0954e45 100644
36772 --- a/drivers/staging/usbip/usbip_common.h
36773 +++ b/drivers/staging/usbip/usbip_common.h
36774 @@ -289,7 +289,7 @@ struct usbip_device {
36775 void (*shutdown)(struct usbip_device *);
36776 void (*reset)(struct usbip_device *);
36777 void (*unusable)(struct usbip_device *);
36778 - } eh_ops;
36779 + } __no_const eh_ops;
36780 };
36781
36782 #if 0
36783 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
36784 index 88b3298..3783eee 100644
36785 --- a/drivers/staging/usbip/vhci.h
36786 +++ b/drivers/staging/usbip/vhci.h
36787 @@ -88,7 +88,7 @@ struct vhci_hcd {
36788 unsigned resuming:1;
36789 unsigned long re_timeout;
36790
36791 - atomic_t seqnum;
36792 + atomic_unchecked_t seqnum;
36793
36794 /*
36795 * NOTE:
36796 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
36797 index 2ee97e2..0420b86 100644
36798 --- a/drivers/staging/usbip/vhci_hcd.c
36799 +++ b/drivers/staging/usbip/vhci_hcd.c
36800 @@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
36801 return;
36802 }
36803
36804 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
36805 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
36806 if (priv->seqnum == 0xffff)
36807 dev_info(&urb->dev->dev, "seqnum max\n");
36808
36809 @@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
36810 return -ENOMEM;
36811 }
36812
36813 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
36814 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
36815 if (unlink->seqnum == 0xffff)
36816 pr_info("seqnum max\n");
36817
36818 @@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
36819 vdev->rhport = rhport;
36820 }
36821
36822 - atomic_set(&vhci->seqnum, 0);
36823 + atomic_set_unchecked(&vhci->seqnum, 0);
36824 spin_lock_init(&vhci->lock);
36825
36826 hcd->power_budget = 0; /* no limit */
36827 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
36828 index 3872b8c..fe6d2f4 100644
36829 --- a/drivers/staging/usbip/vhci_rx.c
36830 +++ b/drivers/staging/usbip/vhci_rx.c
36831 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
36832 if (!urb) {
36833 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
36834 pr_info("max seqnum %d\n",
36835 - atomic_read(&the_controller->seqnum));
36836 + atomic_read_unchecked(&the_controller->seqnum));
36837 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
36838 return;
36839 }
36840 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
36841 index 7735027..30eed13 100644
36842 --- a/drivers/staging/vt6655/hostap.c
36843 +++ b/drivers/staging/vt6655/hostap.c
36844 @@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
36845 *
36846 */
36847
36848 +static net_device_ops_no_const apdev_netdev_ops;
36849 +
36850 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
36851 {
36852 PSDevice apdev_priv;
36853 struct net_device *dev = pDevice->dev;
36854 int ret;
36855 - const struct net_device_ops apdev_netdev_ops = {
36856 - .ndo_start_xmit = pDevice->tx_80211,
36857 - };
36858
36859 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
36860
36861 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
36862 *apdev_priv = *pDevice;
36863 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
36864
36865 + /* only half broken now */
36866 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
36867 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
36868
36869 pDevice->apdev->type = ARPHRD_IEEE80211;
36870 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
36871 index 51b5adf..098e320 100644
36872 --- a/drivers/staging/vt6656/hostap.c
36873 +++ b/drivers/staging/vt6656/hostap.c
36874 @@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
36875 *
36876 */
36877
36878 +static net_device_ops_no_const apdev_netdev_ops;
36879 +
36880 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
36881 {
36882 PSDevice apdev_priv;
36883 struct net_device *dev = pDevice->dev;
36884 int ret;
36885 - const struct net_device_ops apdev_netdev_ops = {
36886 - .ndo_start_xmit = pDevice->tx_80211,
36887 - };
36888
36889 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
36890
36891 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
36892 *apdev_priv = *pDevice;
36893 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
36894
36895 + /* only half broken now */
36896 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
36897 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
36898
36899 pDevice->apdev->type = ARPHRD_IEEE80211;
36900 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
36901 index 7843dfd..3db105f 100644
36902 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
36903 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
36904 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
36905
36906 struct usbctlx_completor {
36907 int (*complete) (struct usbctlx_completor *);
36908 -};
36909 +} __no_const;
36910
36911 static int
36912 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
36913 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
36914 index 1ca66ea..76f1343 100644
36915 --- a/drivers/staging/zcache/tmem.c
36916 +++ b/drivers/staging/zcache/tmem.c
36917 @@ -39,7 +39,7 @@
36918 * A tmem host implementation must use this function to register callbacks
36919 * for memory allocation.
36920 */
36921 -static struct tmem_hostops tmem_hostops;
36922 +static tmem_hostops_no_const tmem_hostops;
36923
36924 static void tmem_objnode_tree_init(void);
36925
36926 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
36927 * A tmem host implementation must use this function to register
36928 * callbacks for a page-accessible memory (PAM) implementation
36929 */
36930 -static struct tmem_pamops tmem_pamops;
36931 +static tmem_pamops_no_const tmem_pamops;
36932
36933 void tmem_register_pamops(struct tmem_pamops *m)
36934 {
36935 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
36936 index ed147c4..94fc3c6 100644
36937 --- a/drivers/staging/zcache/tmem.h
36938 +++ b/drivers/staging/zcache/tmem.h
36939 @@ -180,6 +180,7 @@ struct tmem_pamops {
36940 void (*new_obj)(struct tmem_obj *);
36941 int (*replace_in_obj)(void *, struct tmem_obj *);
36942 };
36943 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
36944 extern void tmem_register_pamops(struct tmem_pamops *m);
36945
36946 /* memory allocation methods provided by the host implementation */
36947 @@ -189,6 +190,7 @@ struct tmem_hostops {
36948 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
36949 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
36950 };
36951 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
36952 extern void tmem_register_hostops(struct tmem_hostops *m);
36953
36954 /* core tmem accessor functions */
36955 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
36956 index 0c1d5c73..88e90a8 100644
36957 --- a/drivers/target/iscsi/iscsi_target.c
36958 +++ b/drivers/target/iscsi/iscsi_target.c
36959 @@ -1364,7 +1364,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
36960 * outstanding_r2ts reaches zero, go ahead and send the delayed
36961 * TASK_ABORTED status.
36962 */
36963 - if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
36964 + if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
36965 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
36966 if (--cmd->outstanding_r2ts < 1) {
36967 iscsit_stop_dataout_timer(cmd);
36968 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
36969 index 6845228..df77141 100644
36970 --- a/drivers/target/target_core_tmr.c
36971 +++ b/drivers/target/target_core_tmr.c
36972 @@ -250,7 +250,7 @@ static void core_tmr_drain_task_list(
36973 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
36974 cmd->t_task_list_num,
36975 atomic_read(&cmd->t_task_cdbs_left),
36976 - atomic_read(&cmd->t_task_cdbs_sent),
36977 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
36978 atomic_read(&cmd->t_transport_active),
36979 atomic_read(&cmd->t_transport_stop),
36980 atomic_read(&cmd->t_transport_sent));
36981 @@ -281,7 +281,7 @@ static void core_tmr_drain_task_list(
36982 pr_debug("LUN_RESET: got t_transport_active = 1 for"
36983 " task: %p, t_fe_count: %d dev: %p\n", task,
36984 fe_count, dev);
36985 - atomic_set(&cmd->t_transport_aborted, 1);
36986 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
36987 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
36988
36989 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
36990 @@ -289,7 +289,7 @@ static void core_tmr_drain_task_list(
36991 }
36992 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
36993 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
36994 - atomic_set(&cmd->t_transport_aborted, 1);
36995 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
36996 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
36997
36998 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
36999 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
37000 index e4ddb93..2fc6e0f 100644
37001 --- a/drivers/target/target_core_transport.c
37002 +++ b/drivers/target/target_core_transport.c
37003 @@ -1343,7 +1343,7 @@ struct se_device *transport_add_device_to_core_hba(
37004
37005 dev->queue_depth = dev_limits->queue_depth;
37006 atomic_set(&dev->depth_left, dev->queue_depth);
37007 - atomic_set(&dev->dev_ordered_id, 0);
37008 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
37009
37010 se_dev_set_default_attribs(dev, dev_limits);
37011
37012 @@ -1530,7 +1530,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
37013 * Used to determine when ORDERED commands should go from
37014 * Dormant to Active status.
37015 */
37016 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
37017 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
37018 smp_mb__after_atomic_inc();
37019 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
37020 cmd->se_ordered_id, cmd->sam_task_attr,
37021 @@ -1800,7 +1800,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
37022 " t_transport_active: %d t_transport_stop: %d"
37023 " t_transport_sent: %d\n", cmd->t_task_list_num,
37024 atomic_read(&cmd->t_task_cdbs_left),
37025 - atomic_read(&cmd->t_task_cdbs_sent),
37026 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37027 atomic_read(&cmd->t_task_cdbs_ex_left),
37028 atomic_read(&cmd->t_transport_active),
37029 atomic_read(&cmd->t_transport_stop),
37030 @@ -2089,9 +2089,9 @@ check_depth:
37031
37032 spin_lock_irqsave(&cmd->t_state_lock, flags);
37033 task->task_flags |= (TF_ACTIVE | TF_SENT);
37034 - atomic_inc(&cmd->t_task_cdbs_sent);
37035 + atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
37036
37037 - if (atomic_read(&cmd->t_task_cdbs_sent) ==
37038 + if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
37039 cmd->t_task_list_num)
37040 atomic_set(&cmd->t_transport_sent, 1);
37041
37042 @@ -4296,7 +4296,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
37043 atomic_set(&cmd->transport_lun_stop, 0);
37044 }
37045 if (!atomic_read(&cmd->t_transport_active) ||
37046 - atomic_read(&cmd->t_transport_aborted)) {
37047 + atomic_read_unchecked(&cmd->t_transport_aborted)) {
37048 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
37049 return false;
37050 }
37051 @@ -4545,7 +4545,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
37052 {
37053 int ret = 0;
37054
37055 - if (atomic_read(&cmd->t_transport_aborted) != 0) {
37056 + if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
37057 if (!send_status ||
37058 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
37059 return 1;
37060 @@ -4582,7 +4582,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
37061 */
37062 if (cmd->data_direction == DMA_TO_DEVICE) {
37063 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
37064 - atomic_inc(&cmd->t_transport_aborted);
37065 + atomic_inc_unchecked(&cmd->t_transport_aborted);
37066 smp_mb__after_atomic_inc();
37067 }
37068 }
37069 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
37070 index b9040be..e3f5aab 100644
37071 --- a/drivers/tty/hvc/hvcs.c
37072 +++ b/drivers/tty/hvc/hvcs.c
37073 @@ -83,6 +83,7 @@
37074 #include <asm/hvcserver.h>
37075 #include <asm/uaccess.h>
37076 #include <asm/vio.h>
37077 +#include <asm/local.h>
37078
37079 /*
37080 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
37081 @@ -270,7 +271,7 @@ struct hvcs_struct {
37082 unsigned int index;
37083
37084 struct tty_struct *tty;
37085 - int open_count;
37086 + local_t open_count;
37087
37088 /*
37089 * Used to tell the driver kernel_thread what operations need to take
37090 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
37091
37092 spin_lock_irqsave(&hvcsd->lock, flags);
37093
37094 - if (hvcsd->open_count > 0) {
37095 + if (local_read(&hvcsd->open_count) > 0) {
37096 spin_unlock_irqrestore(&hvcsd->lock, flags);
37097 printk(KERN_INFO "HVCS: vterm state unchanged. "
37098 "The hvcs device node is still in use.\n");
37099 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
37100 if ((retval = hvcs_partner_connect(hvcsd)))
37101 goto error_release;
37102
37103 - hvcsd->open_count = 1;
37104 + local_set(&hvcsd->open_count, 1);
37105 hvcsd->tty = tty;
37106 tty->driver_data = hvcsd;
37107
37108 @@ -1179,7 +1180,7 @@ fast_open:
37109
37110 spin_lock_irqsave(&hvcsd->lock, flags);
37111 kref_get(&hvcsd->kref);
37112 - hvcsd->open_count++;
37113 + local_inc(&hvcsd->open_count);
37114 hvcsd->todo_mask |= HVCS_SCHED_READ;
37115 spin_unlock_irqrestore(&hvcsd->lock, flags);
37116
37117 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37118 hvcsd = tty->driver_data;
37119
37120 spin_lock_irqsave(&hvcsd->lock, flags);
37121 - if (--hvcsd->open_count == 0) {
37122 + if (local_dec_and_test(&hvcsd->open_count)) {
37123
37124 vio_disable_interrupts(hvcsd->vdev);
37125
37126 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37127 free_irq(irq, hvcsd);
37128 kref_put(&hvcsd->kref, destroy_hvcs_struct);
37129 return;
37130 - } else if (hvcsd->open_count < 0) {
37131 + } else if (local_read(&hvcsd->open_count) < 0) {
37132 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
37133 " is missmanaged.\n",
37134 - hvcsd->vdev->unit_address, hvcsd->open_count);
37135 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
37136 }
37137
37138 spin_unlock_irqrestore(&hvcsd->lock, flags);
37139 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37140
37141 spin_lock_irqsave(&hvcsd->lock, flags);
37142 /* Preserve this so that we know how many kref refs to put */
37143 - temp_open_count = hvcsd->open_count;
37144 + temp_open_count = local_read(&hvcsd->open_count);
37145
37146 /*
37147 * Don't kref put inside the spinlock because the destruction
37148 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37149 hvcsd->tty->driver_data = NULL;
37150 hvcsd->tty = NULL;
37151
37152 - hvcsd->open_count = 0;
37153 + local_set(&hvcsd->open_count, 0);
37154
37155 /* This will drop any buffered data on the floor which is OK in a hangup
37156 * scenario. */
37157 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
37158 * the middle of a write operation? This is a crummy place to do this
37159 * but we want to keep it all in the spinlock.
37160 */
37161 - if (hvcsd->open_count <= 0) {
37162 + if (local_read(&hvcsd->open_count) <= 0) {
37163 spin_unlock_irqrestore(&hvcsd->lock, flags);
37164 return -ENODEV;
37165 }
37166 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
37167 {
37168 struct hvcs_struct *hvcsd = tty->driver_data;
37169
37170 - if (!hvcsd || hvcsd->open_count <= 0)
37171 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
37172 return 0;
37173
37174 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
37175 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
37176 index ef92869..f4ebd88 100644
37177 --- a/drivers/tty/ipwireless/tty.c
37178 +++ b/drivers/tty/ipwireless/tty.c
37179 @@ -29,6 +29,7 @@
37180 #include <linux/tty_driver.h>
37181 #include <linux/tty_flip.h>
37182 #include <linux/uaccess.h>
37183 +#include <asm/local.h>
37184
37185 #include "tty.h"
37186 #include "network.h"
37187 @@ -51,7 +52,7 @@ struct ipw_tty {
37188 int tty_type;
37189 struct ipw_network *network;
37190 struct tty_struct *linux_tty;
37191 - int open_count;
37192 + local_t open_count;
37193 unsigned int control_lines;
37194 struct mutex ipw_tty_mutex;
37195 int tx_bytes_queued;
37196 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37197 mutex_unlock(&tty->ipw_tty_mutex);
37198 return -ENODEV;
37199 }
37200 - if (tty->open_count == 0)
37201 + if (local_read(&tty->open_count) == 0)
37202 tty->tx_bytes_queued = 0;
37203
37204 - tty->open_count++;
37205 + local_inc(&tty->open_count);
37206
37207 tty->linux_tty = linux_tty;
37208 linux_tty->driver_data = tty;
37209 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37210
37211 static void do_ipw_close(struct ipw_tty *tty)
37212 {
37213 - tty->open_count--;
37214 -
37215 - if (tty->open_count == 0) {
37216 + if (local_dec_return(&tty->open_count) == 0) {
37217 struct tty_struct *linux_tty = tty->linux_tty;
37218
37219 if (linux_tty != NULL) {
37220 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
37221 return;
37222
37223 mutex_lock(&tty->ipw_tty_mutex);
37224 - if (tty->open_count == 0) {
37225 + if (local_read(&tty->open_count) == 0) {
37226 mutex_unlock(&tty->ipw_tty_mutex);
37227 return;
37228 }
37229 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
37230 return;
37231 }
37232
37233 - if (!tty->open_count) {
37234 + if (!local_read(&tty->open_count)) {
37235 mutex_unlock(&tty->ipw_tty_mutex);
37236 return;
37237 }
37238 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
37239 return -ENODEV;
37240
37241 mutex_lock(&tty->ipw_tty_mutex);
37242 - if (!tty->open_count) {
37243 + if (!local_read(&tty->open_count)) {
37244 mutex_unlock(&tty->ipw_tty_mutex);
37245 return -EINVAL;
37246 }
37247 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
37248 if (!tty)
37249 return -ENODEV;
37250
37251 - if (!tty->open_count)
37252 + if (!local_read(&tty->open_count))
37253 return -EINVAL;
37254
37255 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
37256 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
37257 if (!tty)
37258 return 0;
37259
37260 - if (!tty->open_count)
37261 + if (!local_read(&tty->open_count))
37262 return 0;
37263
37264 return tty->tx_bytes_queued;
37265 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
37266 if (!tty)
37267 return -ENODEV;
37268
37269 - if (!tty->open_count)
37270 + if (!local_read(&tty->open_count))
37271 return -EINVAL;
37272
37273 return get_control_lines(tty);
37274 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
37275 if (!tty)
37276 return -ENODEV;
37277
37278 - if (!tty->open_count)
37279 + if (!local_read(&tty->open_count))
37280 return -EINVAL;
37281
37282 return set_control_lines(tty, set, clear);
37283 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
37284 if (!tty)
37285 return -ENODEV;
37286
37287 - if (!tty->open_count)
37288 + if (!local_read(&tty->open_count))
37289 return -EINVAL;
37290
37291 /* FIXME: Exactly how is the tty object locked here .. */
37292 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
37293 against a parallel ioctl etc */
37294 mutex_lock(&ttyj->ipw_tty_mutex);
37295 }
37296 - while (ttyj->open_count)
37297 + while (local_read(&ttyj->open_count))
37298 do_ipw_close(ttyj);
37299 ipwireless_disassociate_network_ttys(network,
37300 ttyj->channel_idx);
37301 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
37302 index fc7bbba..9527e93 100644
37303 --- a/drivers/tty/n_gsm.c
37304 +++ b/drivers/tty/n_gsm.c
37305 @@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
37306 kref_init(&dlci->ref);
37307 mutex_init(&dlci->mutex);
37308 dlci->fifo = &dlci->_fifo;
37309 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
37310 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
37311 kfree(dlci);
37312 return NULL;
37313 }
37314 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
37315 index 39d6ab6..eb97f41 100644
37316 --- a/drivers/tty/n_tty.c
37317 +++ b/drivers/tty/n_tty.c
37318 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
37319 {
37320 *ops = tty_ldisc_N_TTY;
37321 ops->owner = NULL;
37322 - ops->refcount = ops->flags = 0;
37323 + atomic_set(&ops->refcount, 0);
37324 + ops->flags = 0;
37325 }
37326 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
37327 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
37328 index e18604b..a7d5a11 100644
37329 --- a/drivers/tty/pty.c
37330 +++ b/drivers/tty/pty.c
37331 @@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
37332 register_sysctl_table(pty_root_table);
37333
37334 /* Now create the /dev/ptmx special device */
37335 + pax_open_kernel();
37336 tty_default_fops(&ptmx_fops);
37337 - ptmx_fops.open = ptmx_open;
37338 + *(void **)&ptmx_fops.open = ptmx_open;
37339 + pax_close_kernel();
37340
37341 cdev_init(&ptmx_cdev, &ptmx_fops);
37342 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
37343 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
37344 index 2b42a01..32a2ed3 100644
37345 --- a/drivers/tty/serial/kgdboc.c
37346 +++ b/drivers/tty/serial/kgdboc.c
37347 @@ -24,8 +24,9 @@
37348 #define MAX_CONFIG_LEN 40
37349
37350 static struct kgdb_io kgdboc_io_ops;
37351 +static struct kgdb_io kgdboc_io_ops_console;
37352
37353 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37354 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
37355 static int configured = -1;
37356
37357 static char config[MAX_CONFIG_LEN];
37358 @@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
37359 kgdboc_unregister_kbd();
37360 if (configured == 1)
37361 kgdb_unregister_io_module(&kgdboc_io_ops);
37362 + else if (configured == 2)
37363 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
37364 }
37365
37366 static int configure_kgdboc(void)
37367 @@ -157,13 +160,13 @@ static int configure_kgdboc(void)
37368 int err;
37369 char *cptr = config;
37370 struct console *cons;
37371 + int is_console = 0;
37372
37373 err = kgdboc_option_setup(config);
37374 if (err || !strlen(config) || isspace(config[0]))
37375 goto noconfig;
37376
37377 err = -ENODEV;
37378 - kgdboc_io_ops.is_console = 0;
37379 kgdb_tty_driver = NULL;
37380
37381 kgdboc_use_kms = 0;
37382 @@ -184,7 +187,7 @@ static int configure_kgdboc(void)
37383 int idx;
37384 if (cons->device && cons->device(cons, &idx) == p &&
37385 idx == tty_line) {
37386 - kgdboc_io_ops.is_console = 1;
37387 + is_console = 1;
37388 break;
37389 }
37390 cons = cons->next;
37391 @@ -194,12 +197,16 @@ static int configure_kgdboc(void)
37392 kgdb_tty_line = tty_line;
37393
37394 do_register:
37395 - err = kgdb_register_io_module(&kgdboc_io_ops);
37396 + if (is_console) {
37397 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
37398 + configured = 2;
37399 + } else {
37400 + err = kgdb_register_io_module(&kgdboc_io_ops);
37401 + configured = 1;
37402 + }
37403 if (err)
37404 goto noconfig;
37405
37406 - configured = 1;
37407 -
37408 return 0;
37409
37410 noconfig:
37411 @@ -213,7 +220,7 @@ noconfig:
37412 static int __init init_kgdboc(void)
37413 {
37414 /* Already configured? */
37415 - if (configured == 1)
37416 + if (configured >= 1)
37417 return 0;
37418
37419 return configure_kgdboc();
37420 @@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
37421 if (config[len - 1] == '\n')
37422 config[len - 1] = '\0';
37423
37424 - if (configured == 1)
37425 + if (configured >= 1)
37426 cleanup_kgdboc();
37427
37428 /* Go and configure with the new params. */
37429 @@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
37430 .post_exception = kgdboc_post_exp_handler,
37431 };
37432
37433 +static struct kgdb_io kgdboc_io_ops_console = {
37434 + .name = "kgdboc",
37435 + .read_char = kgdboc_get_char,
37436 + .write_char = kgdboc_put_char,
37437 + .pre_exception = kgdboc_pre_exp_handler,
37438 + .post_exception = kgdboc_post_exp_handler,
37439 + .is_console = 1
37440 +};
37441 +
37442 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
37443 /* This is only available if kgdboc is a built in for early debugging */
37444 static int __init kgdboc_early_init(char *opt)
37445 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
37446 index 05085be..67eadb0 100644
37447 --- a/drivers/tty/tty_io.c
37448 +++ b/drivers/tty/tty_io.c
37449 @@ -3240,7 +3240,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
37450
37451 void tty_default_fops(struct file_operations *fops)
37452 {
37453 - *fops = tty_fops;
37454 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
37455 }
37456
37457 /*
37458 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
37459 index 8e0924f..4204eb4 100644
37460 --- a/drivers/tty/tty_ldisc.c
37461 +++ b/drivers/tty/tty_ldisc.c
37462 @@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *ld)
37463 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
37464 struct tty_ldisc_ops *ldo = ld->ops;
37465
37466 - ldo->refcount--;
37467 + atomic_dec(&ldo->refcount);
37468 module_put(ldo->owner);
37469 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37470
37471 @@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
37472 spin_lock_irqsave(&tty_ldisc_lock, flags);
37473 tty_ldiscs[disc] = new_ldisc;
37474 new_ldisc->num = disc;
37475 - new_ldisc->refcount = 0;
37476 + atomic_set(&new_ldisc->refcount, 0);
37477 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37478
37479 return ret;
37480 @@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc)
37481 return -EINVAL;
37482
37483 spin_lock_irqsave(&tty_ldisc_lock, flags);
37484 - if (tty_ldiscs[disc]->refcount)
37485 + if (atomic_read(&tty_ldiscs[disc]->refcount))
37486 ret = -EBUSY;
37487 else
37488 tty_ldiscs[disc] = NULL;
37489 @@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
37490 if (ldops) {
37491 ret = ERR_PTR(-EAGAIN);
37492 if (try_module_get(ldops->owner)) {
37493 - ldops->refcount++;
37494 + atomic_inc(&ldops->refcount);
37495 ret = ldops;
37496 }
37497 }
37498 @@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
37499 unsigned long flags;
37500
37501 spin_lock_irqsave(&tty_ldisc_lock, flags);
37502 - ldops->refcount--;
37503 + atomic_dec(&ldops->refcount);
37504 module_put(ldops->owner);
37505 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37506 }
37507 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
37508 index a605549..6bd3c96 100644
37509 --- a/drivers/tty/vt/keyboard.c
37510 +++ b/drivers/tty/vt/keyboard.c
37511 @@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
37512 kbd->kbdmode == VC_OFF) &&
37513 value != KVAL(K_SAK))
37514 return; /* SAK is allowed even in raw mode */
37515 +
37516 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
37517 + {
37518 + void *func = fn_handler[value];
37519 + if (func == fn_show_state || func == fn_show_ptregs ||
37520 + func == fn_show_mem)
37521 + return;
37522 + }
37523 +#endif
37524 +
37525 fn_handler[value](vc);
37526 }
37527
37528 diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
37529 index 65447c5..0526f0a 100644
37530 --- a/drivers/tty/vt/vt_ioctl.c
37531 +++ b/drivers/tty/vt/vt_ioctl.c
37532 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
37533 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
37534 return -EFAULT;
37535
37536 - if (!capable(CAP_SYS_TTY_CONFIG))
37537 - perm = 0;
37538 -
37539 switch (cmd) {
37540 case KDGKBENT:
37541 key_map = key_maps[s];
37542 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
37543 val = (i ? K_HOLE : K_NOSUCHMAP);
37544 return put_user(val, &user_kbe->kb_value);
37545 case KDSKBENT:
37546 + if (!capable(CAP_SYS_TTY_CONFIG))
37547 + perm = 0;
37548 +
37549 if (!perm)
37550 return -EPERM;
37551 if (!i && v == K_NOSUCHMAP) {
37552 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
37553 int i, j, k;
37554 int ret;
37555
37556 - if (!capable(CAP_SYS_TTY_CONFIG))
37557 - perm = 0;
37558 -
37559 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
37560 if (!kbs) {
37561 ret = -ENOMEM;
37562 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
37563 kfree(kbs);
37564 return ((p && *p) ? -EOVERFLOW : 0);
37565 case KDSKBSENT:
37566 + if (!capable(CAP_SYS_TTY_CONFIG))
37567 + perm = 0;
37568 +
37569 if (!perm) {
37570 ret = -EPERM;
37571 goto reterr;
37572 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
37573 index a783d53..cb30d94 100644
37574 --- a/drivers/uio/uio.c
37575 +++ b/drivers/uio/uio.c
37576 @@ -25,6 +25,7 @@
37577 #include <linux/kobject.h>
37578 #include <linux/cdev.h>
37579 #include <linux/uio_driver.h>
37580 +#include <asm/local.h>
37581
37582 #define UIO_MAX_DEVICES (1U << MINORBITS)
37583
37584 @@ -32,10 +33,10 @@ struct uio_device {
37585 struct module *owner;
37586 struct device *dev;
37587 int minor;
37588 - atomic_t event;
37589 + atomic_unchecked_t event;
37590 struct fasync_struct *async_queue;
37591 wait_queue_head_t wait;
37592 - int vma_count;
37593 + local_t vma_count;
37594 struct uio_info *info;
37595 struct kobject *map_dir;
37596 struct kobject *portio_dir;
37597 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
37598 struct device_attribute *attr, char *buf)
37599 {
37600 struct uio_device *idev = dev_get_drvdata(dev);
37601 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
37602 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
37603 }
37604
37605 static struct device_attribute uio_class_attributes[] = {
37606 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
37607 {
37608 struct uio_device *idev = info->uio_dev;
37609
37610 - atomic_inc(&idev->event);
37611 + atomic_inc_unchecked(&idev->event);
37612 wake_up_interruptible(&idev->wait);
37613 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
37614 }
37615 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
37616 }
37617
37618 listener->dev = idev;
37619 - listener->event_count = atomic_read(&idev->event);
37620 + listener->event_count = atomic_read_unchecked(&idev->event);
37621 filep->private_data = listener;
37622
37623 if (idev->info->open) {
37624 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
37625 return -EIO;
37626
37627 poll_wait(filep, &idev->wait, wait);
37628 - if (listener->event_count != atomic_read(&idev->event))
37629 + if (listener->event_count != atomic_read_unchecked(&idev->event))
37630 return POLLIN | POLLRDNORM;
37631 return 0;
37632 }
37633 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
37634 do {
37635 set_current_state(TASK_INTERRUPTIBLE);
37636
37637 - event_count = atomic_read(&idev->event);
37638 + event_count = atomic_read_unchecked(&idev->event);
37639 if (event_count != listener->event_count) {
37640 if (copy_to_user(buf, &event_count, count))
37641 retval = -EFAULT;
37642 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
37643 static void uio_vma_open(struct vm_area_struct *vma)
37644 {
37645 struct uio_device *idev = vma->vm_private_data;
37646 - idev->vma_count++;
37647 + local_inc(&idev->vma_count);
37648 }
37649
37650 static void uio_vma_close(struct vm_area_struct *vma)
37651 {
37652 struct uio_device *idev = vma->vm_private_data;
37653 - idev->vma_count--;
37654 + local_dec(&idev->vma_count);
37655 }
37656
37657 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
37658 @@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
37659 idev->owner = owner;
37660 idev->info = info;
37661 init_waitqueue_head(&idev->wait);
37662 - atomic_set(&idev->event, 0);
37663 + atomic_set_unchecked(&idev->event, 0);
37664
37665 ret = uio_get_minor(idev);
37666 if (ret)
37667 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
37668 index a845f8b..4f54072 100644
37669 --- a/drivers/usb/atm/cxacru.c
37670 +++ b/drivers/usb/atm/cxacru.c
37671 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
37672 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
37673 if (ret < 2)
37674 return -EINVAL;
37675 - if (index < 0 || index > 0x7f)
37676 + if (index > 0x7f)
37677 return -EINVAL;
37678 pos += tmp;
37679
37680 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
37681 index d3448ca..d2864ca 100644
37682 --- a/drivers/usb/atm/usbatm.c
37683 +++ b/drivers/usb/atm/usbatm.c
37684 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37685 if (printk_ratelimit())
37686 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
37687 __func__, vpi, vci);
37688 - atomic_inc(&vcc->stats->rx_err);
37689 + atomic_inc_unchecked(&vcc->stats->rx_err);
37690 return;
37691 }
37692
37693 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37694 if (length > ATM_MAX_AAL5_PDU) {
37695 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
37696 __func__, length, vcc);
37697 - atomic_inc(&vcc->stats->rx_err);
37698 + atomic_inc_unchecked(&vcc->stats->rx_err);
37699 goto out;
37700 }
37701
37702 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37703 if (sarb->len < pdu_length) {
37704 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
37705 __func__, pdu_length, sarb->len, vcc);
37706 - atomic_inc(&vcc->stats->rx_err);
37707 + atomic_inc_unchecked(&vcc->stats->rx_err);
37708 goto out;
37709 }
37710
37711 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
37712 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
37713 __func__, vcc);
37714 - atomic_inc(&vcc->stats->rx_err);
37715 + atomic_inc_unchecked(&vcc->stats->rx_err);
37716 goto out;
37717 }
37718
37719 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37720 if (printk_ratelimit())
37721 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
37722 __func__, length);
37723 - atomic_inc(&vcc->stats->rx_drop);
37724 + atomic_inc_unchecked(&vcc->stats->rx_drop);
37725 goto out;
37726 }
37727
37728 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
37729
37730 vcc->push(vcc, skb);
37731
37732 - atomic_inc(&vcc->stats->rx);
37733 + atomic_inc_unchecked(&vcc->stats->rx);
37734 out:
37735 skb_trim(sarb, 0);
37736 }
37737 @@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
37738 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
37739
37740 usbatm_pop(vcc, skb);
37741 - atomic_inc(&vcc->stats->tx);
37742 + atomic_inc_unchecked(&vcc->stats->tx);
37743
37744 skb = skb_dequeue(&instance->sndqueue);
37745 }
37746 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
37747 if (!left--)
37748 return sprintf(page,
37749 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
37750 - atomic_read(&atm_dev->stats.aal5.tx),
37751 - atomic_read(&atm_dev->stats.aal5.tx_err),
37752 - atomic_read(&atm_dev->stats.aal5.rx),
37753 - atomic_read(&atm_dev->stats.aal5.rx_err),
37754 - atomic_read(&atm_dev->stats.aal5.rx_drop));
37755 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
37756 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
37757 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
37758 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
37759 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
37760
37761 if (!left--) {
37762 if (instance->disconnected)
37763 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
37764 index d956965..4179a77 100644
37765 --- a/drivers/usb/core/devices.c
37766 +++ b/drivers/usb/core/devices.c
37767 @@ -126,7 +126,7 @@ static const char format_endpt[] =
37768 * time it gets called.
37769 */
37770 static struct device_connect_event {
37771 - atomic_t count;
37772 + atomic_unchecked_t count;
37773 wait_queue_head_t wait;
37774 } device_event = {
37775 .count = ATOMIC_INIT(1),
37776 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
37777
37778 void usbfs_conn_disc_event(void)
37779 {
37780 - atomic_add(2, &device_event.count);
37781 + atomic_add_unchecked(2, &device_event.count);
37782 wake_up(&device_event.wait);
37783 }
37784
37785 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
37786
37787 poll_wait(file, &device_event.wait, wait);
37788
37789 - event_count = atomic_read(&device_event.count);
37790 + event_count = atomic_read_unchecked(&device_event.count);
37791 if (file->f_version != event_count) {
37792 file->f_version = event_count;
37793 return POLLIN | POLLRDNORM;
37794 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
37795 index b3bdfed..a9460e0 100644
37796 --- a/drivers/usb/core/message.c
37797 +++ b/drivers/usb/core/message.c
37798 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
37799 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
37800 if (buf) {
37801 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
37802 - if (len > 0) {
37803 - smallbuf = kmalloc(++len, GFP_NOIO);
37804 + if (len++ > 0) {
37805 + smallbuf = kmalloc(len, GFP_NOIO);
37806 if (!smallbuf)
37807 return buf;
37808 memcpy(smallbuf, buf, len);
37809 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
37810 index 1fc8f12..20647c1 100644
37811 --- a/drivers/usb/early/ehci-dbgp.c
37812 +++ b/drivers/usb/early/ehci-dbgp.c
37813 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
37814
37815 #ifdef CONFIG_KGDB
37816 static struct kgdb_io kgdbdbgp_io_ops;
37817 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
37818 +static struct kgdb_io kgdbdbgp_io_ops_console;
37819 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
37820 #else
37821 #define dbgp_kgdb_mode (0)
37822 #endif
37823 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
37824 .write_char = kgdbdbgp_write_char,
37825 };
37826
37827 +static struct kgdb_io kgdbdbgp_io_ops_console = {
37828 + .name = "kgdbdbgp",
37829 + .read_char = kgdbdbgp_read_char,
37830 + .write_char = kgdbdbgp_write_char,
37831 + .is_console = 1
37832 +};
37833 +
37834 static int kgdbdbgp_wait_time;
37835
37836 static int __init kgdbdbgp_parse_config(char *str)
37837 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
37838 ptr++;
37839 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
37840 }
37841 - kgdb_register_io_module(&kgdbdbgp_io_ops);
37842 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
37843 + if (early_dbgp_console.index != -1)
37844 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
37845 + else
37846 + kgdb_register_io_module(&kgdbdbgp_io_ops);
37847
37848 return 0;
37849 }
37850 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
37851 index d6bea3e..60b250e 100644
37852 --- a/drivers/usb/wusbcore/wa-hc.h
37853 +++ b/drivers/usb/wusbcore/wa-hc.h
37854 @@ -192,7 +192,7 @@ struct wahc {
37855 struct list_head xfer_delayed_list;
37856 spinlock_t xfer_list_lock;
37857 struct work_struct xfer_work;
37858 - atomic_t xfer_id_count;
37859 + atomic_unchecked_t xfer_id_count;
37860 };
37861
37862
37863 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
37864 INIT_LIST_HEAD(&wa->xfer_delayed_list);
37865 spin_lock_init(&wa->xfer_list_lock);
37866 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
37867 - atomic_set(&wa->xfer_id_count, 1);
37868 + atomic_set_unchecked(&wa->xfer_id_count, 1);
37869 }
37870
37871 /**
37872 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
37873 index 57c01ab..8a05959 100644
37874 --- a/drivers/usb/wusbcore/wa-xfer.c
37875 +++ b/drivers/usb/wusbcore/wa-xfer.c
37876 @@ -296,7 +296,7 @@ out:
37877 */
37878 static void wa_xfer_id_init(struct wa_xfer *xfer)
37879 {
37880 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
37881 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
37882 }
37883
37884 /*
37885 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
37886 index c14c42b..f955cc2 100644
37887 --- a/drivers/vhost/vhost.c
37888 +++ b/drivers/vhost/vhost.c
37889 @@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
37890 return 0;
37891 }
37892
37893 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
37894 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
37895 {
37896 struct file *eventfp, *filep = NULL,
37897 *pollstart = NULL, *pollstop = NULL;
37898 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
37899 index b0b2ac3..89a4399 100644
37900 --- a/drivers/video/aty/aty128fb.c
37901 +++ b/drivers/video/aty/aty128fb.c
37902 @@ -148,7 +148,7 @@ enum {
37903 };
37904
37905 /* Must match above enum */
37906 -static const char *r128_family[] __devinitdata = {
37907 +static const char *r128_family[] __devinitconst = {
37908 "AGP",
37909 "PCI",
37910 "PRO AGP",
37911 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
37912 index 5c3960d..15cf8fc 100644
37913 --- a/drivers/video/fbcmap.c
37914 +++ b/drivers/video/fbcmap.c
37915 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
37916 rc = -ENODEV;
37917 goto out;
37918 }
37919 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
37920 - !info->fbops->fb_setcmap)) {
37921 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
37922 rc = -EINVAL;
37923 goto out1;
37924 }
37925 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
37926 index ad93629..e020fc3 100644
37927 --- a/drivers/video/fbmem.c
37928 +++ b/drivers/video/fbmem.c
37929 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
37930 image->dx += image->width + 8;
37931 }
37932 } else if (rotate == FB_ROTATE_UD) {
37933 - for (x = 0; x < num && image->dx >= 0; x++) {
37934 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
37935 info->fbops->fb_imageblit(info, image);
37936 image->dx -= image->width + 8;
37937 }
37938 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
37939 image->dy += image->height + 8;
37940 }
37941 } else if (rotate == FB_ROTATE_CCW) {
37942 - for (x = 0; x < num && image->dy >= 0; x++) {
37943 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
37944 info->fbops->fb_imageblit(info, image);
37945 image->dy -= image->height + 8;
37946 }
37947 @@ -1143,7 +1143,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
37948 return -EFAULT;
37949 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
37950 return -EINVAL;
37951 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
37952 + if (con2fb.framebuffer >= FB_MAX)
37953 return -EINVAL;
37954 if (!registered_fb[con2fb.framebuffer])
37955 request_module("fb%d", con2fb.framebuffer);
37956 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
37957 index 5a5d092..265c5ed 100644
37958 --- a/drivers/video/geode/gx1fb_core.c
37959 +++ b/drivers/video/geode/gx1fb_core.c
37960 @@ -29,7 +29,7 @@ static int crt_option = 1;
37961 static char panel_option[32] = "";
37962
37963 /* Modes relevant to the GX1 (taken from modedb.c) */
37964 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
37965 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
37966 /* 640x480-60 VESA */
37967 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
37968 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
37969 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
37970 index 0fad23f..0e9afa4 100644
37971 --- a/drivers/video/gxt4500.c
37972 +++ b/drivers/video/gxt4500.c
37973 @@ -156,7 +156,7 @@ struct gxt4500_par {
37974 static char *mode_option;
37975
37976 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
37977 -static const struct fb_videomode defaultmode __devinitdata = {
37978 +static const struct fb_videomode defaultmode __devinitconst = {
37979 .refresh = 60,
37980 .xres = 1280,
37981 .yres = 1024,
37982 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
37983 return 0;
37984 }
37985
37986 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
37987 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
37988 .id = "IBM GXT4500P",
37989 .type = FB_TYPE_PACKED_PIXELS,
37990 .visual = FB_VISUAL_PSEUDOCOLOR,
37991 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
37992 index 7672d2e..b56437f 100644
37993 --- a/drivers/video/i810/i810_accel.c
37994 +++ b/drivers/video/i810/i810_accel.c
37995 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
37996 }
37997 }
37998 printk("ringbuffer lockup!!!\n");
37999 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38000 i810_report_error(mmio);
38001 par->dev_flags |= LOCKUP;
38002 info->pixmap.scan_align = 1;
38003 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
38004 index 318f6fb..9a389c1 100644
38005 --- a/drivers/video/i810/i810_main.c
38006 +++ b/drivers/video/i810/i810_main.c
38007 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
38008 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
38009
38010 /* PCI */
38011 -static const char *i810_pci_list[] __devinitdata = {
38012 +static const char *i810_pci_list[] __devinitconst = {
38013 "Intel(R) 810 Framebuffer Device" ,
38014 "Intel(R) 810-DC100 Framebuffer Device" ,
38015 "Intel(R) 810E Framebuffer Device" ,
38016 diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
38017 index de36693..3c63fc2 100644
38018 --- a/drivers/video/jz4740_fb.c
38019 +++ b/drivers/video/jz4740_fb.c
38020 @@ -136,7 +136,7 @@ struct jzfb {
38021 uint32_t pseudo_palette[16];
38022 };
38023
38024 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
38025 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
38026 .id = "JZ4740 FB",
38027 .type = FB_TYPE_PACKED_PIXELS,
38028 .visual = FB_VISUAL_TRUECOLOR,
38029 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
38030 index 3c14e43..eafa544 100644
38031 --- a/drivers/video/logo/logo_linux_clut224.ppm
38032 +++ b/drivers/video/logo/logo_linux_clut224.ppm
38033 @@ -1,1604 +1,1123 @@
38034 P3
38035 -# Standard 224-color Linux logo
38036 80 80
38037 255
38038 - 0 0 0 0 0 0 0 0 0 0 0 0
38039 - 0 0 0 0 0 0 0 0 0 0 0 0
38040 - 0 0 0 0 0 0 0 0 0 0 0 0
38041 - 0 0 0 0 0 0 0 0 0 0 0 0
38042 - 0 0 0 0 0 0 0 0 0 0 0 0
38043 - 0 0 0 0 0 0 0 0 0 0 0 0
38044 - 0 0 0 0 0 0 0 0 0 0 0 0
38045 - 0 0 0 0 0 0 0 0 0 0 0 0
38046 - 0 0 0 0 0 0 0 0 0 0 0 0
38047 - 6 6 6 6 6 6 10 10 10 10 10 10
38048 - 10 10 10 6 6 6 6 6 6 6 6 6
38049 - 0 0 0 0 0 0 0 0 0 0 0 0
38050 - 0 0 0 0 0 0 0 0 0 0 0 0
38051 - 0 0 0 0 0 0 0 0 0 0 0 0
38052 - 0 0 0 0 0 0 0 0 0 0 0 0
38053 - 0 0 0 0 0 0 0 0 0 0 0 0
38054 - 0 0 0 0 0 0 0 0 0 0 0 0
38055 - 0 0 0 0 0 0 0 0 0 0 0 0
38056 - 0 0 0 0 0 0 0 0 0 0 0 0
38057 - 0 0 0 0 0 0 0 0 0 0 0 0
38058 - 0 0 0 0 0 0 0 0 0 0 0 0
38059 - 0 0 0 0 0 0 0 0 0 0 0 0
38060 - 0 0 0 0 0 0 0 0 0 0 0 0
38061 - 0 0 0 0 0 0 0 0 0 0 0 0
38062 - 0 0 0 0 0 0 0 0 0 0 0 0
38063 - 0 0 0 0 0 0 0 0 0 0 0 0
38064 - 0 0 0 0 0 0 0 0 0 0 0 0
38065 - 0 0 0 0 0 0 0 0 0 0 0 0
38066 - 0 0 0 6 6 6 10 10 10 14 14 14
38067 - 22 22 22 26 26 26 30 30 30 34 34 34
38068 - 30 30 30 30 30 30 26 26 26 18 18 18
38069 - 14 14 14 10 10 10 6 6 6 0 0 0
38070 - 0 0 0 0 0 0 0 0 0 0 0 0
38071 - 0 0 0 0 0 0 0 0 0 0 0 0
38072 - 0 0 0 0 0 0 0 0 0 0 0 0
38073 - 0 0 0 0 0 0 0 0 0 0 0 0
38074 - 0 0 0 0 0 0 0 0 0 0 0 0
38075 - 0 0 0 0 0 0 0 0 0 0 0 0
38076 - 0 0 0 0 0 0 0 0 0 0 0 0
38077 - 0 0 0 0 0 0 0 0 0 0 0 0
38078 - 0 0 0 0 0 0 0 0 0 0 0 0
38079 - 0 0 0 0 0 1 0 0 1 0 0 0
38080 - 0 0 0 0 0 0 0 0 0 0 0 0
38081 - 0 0 0 0 0 0 0 0 0 0 0 0
38082 - 0 0 0 0 0 0 0 0 0 0 0 0
38083 - 0 0 0 0 0 0 0 0 0 0 0 0
38084 - 0 0 0 0 0 0 0 0 0 0 0 0
38085 - 0 0 0 0 0 0 0 0 0 0 0 0
38086 - 6 6 6 14 14 14 26 26 26 42 42 42
38087 - 54 54 54 66 66 66 78 78 78 78 78 78
38088 - 78 78 78 74 74 74 66 66 66 54 54 54
38089 - 42 42 42 26 26 26 18 18 18 10 10 10
38090 - 6 6 6 0 0 0 0 0 0 0 0 0
38091 - 0 0 0 0 0 0 0 0 0 0 0 0
38092 - 0 0 0 0 0 0 0 0 0 0 0 0
38093 - 0 0 0 0 0 0 0 0 0 0 0 0
38094 - 0 0 0 0 0 0 0 0 0 0 0 0
38095 - 0 0 0 0 0 0 0 0 0 0 0 0
38096 - 0 0 0 0 0 0 0 0 0 0 0 0
38097 - 0 0 0 0 0 0 0 0 0 0 0 0
38098 - 0 0 0 0 0 0 0 0 0 0 0 0
38099 - 0 0 1 0 0 0 0 0 0 0 0 0
38100 - 0 0 0 0 0 0 0 0 0 0 0 0
38101 - 0 0 0 0 0 0 0 0 0 0 0 0
38102 - 0 0 0 0 0 0 0 0 0 0 0 0
38103 - 0 0 0 0 0 0 0 0 0 0 0 0
38104 - 0 0 0 0 0 0 0 0 0 0 0 0
38105 - 0 0 0 0 0 0 0 0 0 10 10 10
38106 - 22 22 22 42 42 42 66 66 66 86 86 86
38107 - 66 66 66 38 38 38 38 38 38 22 22 22
38108 - 26 26 26 34 34 34 54 54 54 66 66 66
38109 - 86 86 86 70 70 70 46 46 46 26 26 26
38110 - 14 14 14 6 6 6 0 0 0 0 0 0
38111 - 0 0 0 0 0 0 0 0 0 0 0 0
38112 - 0 0 0 0 0 0 0 0 0 0 0 0
38113 - 0 0 0 0 0 0 0 0 0 0 0 0
38114 - 0 0 0 0 0 0 0 0 0 0 0 0
38115 - 0 0 0 0 0 0 0 0 0 0 0 0
38116 - 0 0 0 0 0 0 0 0 0 0 0 0
38117 - 0 0 0 0 0 0 0 0 0 0 0 0
38118 - 0 0 0 0 0 0 0 0 0 0 0 0
38119 - 0 0 1 0 0 1 0 0 1 0 0 0
38120 - 0 0 0 0 0 0 0 0 0 0 0 0
38121 - 0 0 0 0 0 0 0 0 0 0 0 0
38122 - 0 0 0 0 0 0 0 0 0 0 0 0
38123 - 0 0 0 0 0 0 0 0 0 0 0 0
38124 - 0 0 0 0 0 0 0 0 0 0 0 0
38125 - 0 0 0 0 0 0 10 10 10 26 26 26
38126 - 50 50 50 82 82 82 58 58 58 6 6 6
38127 - 2 2 6 2 2 6 2 2 6 2 2 6
38128 - 2 2 6 2 2 6 2 2 6 2 2 6
38129 - 6 6 6 54 54 54 86 86 86 66 66 66
38130 - 38 38 38 18 18 18 6 6 6 0 0 0
38131 - 0 0 0 0 0 0 0 0 0 0 0 0
38132 - 0 0 0 0 0 0 0 0 0 0 0 0
38133 - 0 0 0 0 0 0 0 0 0 0 0 0
38134 - 0 0 0 0 0 0 0 0 0 0 0 0
38135 - 0 0 0 0 0 0 0 0 0 0 0 0
38136 - 0 0 0 0 0 0 0 0 0 0 0 0
38137 - 0 0 0 0 0 0 0 0 0 0 0 0
38138 - 0 0 0 0 0 0 0 0 0 0 0 0
38139 - 0 0 0 0 0 0 0 0 0 0 0 0
38140 - 0 0 0 0 0 0 0 0 0 0 0 0
38141 - 0 0 0 0 0 0 0 0 0 0 0 0
38142 - 0 0 0 0 0 0 0 0 0 0 0 0
38143 - 0 0 0 0 0 0 0 0 0 0 0 0
38144 - 0 0 0 0 0 0 0 0 0 0 0 0
38145 - 0 0 0 6 6 6 22 22 22 50 50 50
38146 - 78 78 78 34 34 34 2 2 6 2 2 6
38147 - 2 2 6 2 2 6 2 2 6 2 2 6
38148 - 2 2 6 2 2 6 2 2 6 2 2 6
38149 - 2 2 6 2 2 6 6 6 6 70 70 70
38150 - 78 78 78 46 46 46 22 22 22 6 6 6
38151 - 0 0 0 0 0 0 0 0 0 0 0 0
38152 - 0 0 0 0 0 0 0 0 0 0 0 0
38153 - 0 0 0 0 0 0 0 0 0 0 0 0
38154 - 0 0 0 0 0 0 0 0 0 0 0 0
38155 - 0 0 0 0 0 0 0 0 0 0 0 0
38156 - 0 0 0 0 0 0 0 0 0 0 0 0
38157 - 0 0 0 0 0 0 0 0 0 0 0 0
38158 - 0 0 0 0 0 0 0 0 0 0 0 0
38159 - 0 0 1 0 0 1 0 0 1 0 0 0
38160 - 0 0 0 0 0 0 0 0 0 0 0 0
38161 - 0 0 0 0 0 0 0 0 0 0 0 0
38162 - 0 0 0 0 0 0 0 0 0 0 0 0
38163 - 0 0 0 0 0 0 0 0 0 0 0 0
38164 - 0 0 0 0 0 0 0 0 0 0 0 0
38165 - 6 6 6 18 18 18 42 42 42 82 82 82
38166 - 26 26 26 2 2 6 2 2 6 2 2 6
38167 - 2 2 6 2 2 6 2 2 6 2 2 6
38168 - 2 2 6 2 2 6 2 2 6 14 14 14
38169 - 46 46 46 34 34 34 6 6 6 2 2 6
38170 - 42 42 42 78 78 78 42 42 42 18 18 18
38171 - 6 6 6 0 0 0 0 0 0 0 0 0
38172 - 0 0 0 0 0 0 0 0 0 0 0 0
38173 - 0 0 0 0 0 0 0 0 0 0 0 0
38174 - 0 0 0 0 0 0 0 0 0 0 0 0
38175 - 0 0 0 0 0 0 0 0 0 0 0 0
38176 - 0 0 0 0 0 0 0 0 0 0 0 0
38177 - 0 0 0 0 0 0 0 0 0 0 0 0
38178 - 0 0 0 0 0 0 0 0 0 0 0 0
38179 - 0 0 1 0 0 0 0 0 1 0 0 0
38180 - 0 0 0 0 0 0 0 0 0 0 0 0
38181 - 0 0 0 0 0 0 0 0 0 0 0 0
38182 - 0 0 0 0 0 0 0 0 0 0 0 0
38183 - 0 0 0 0 0 0 0 0 0 0 0 0
38184 - 0 0 0 0 0 0 0 0 0 0 0 0
38185 - 10 10 10 30 30 30 66 66 66 58 58 58
38186 - 2 2 6 2 2 6 2 2 6 2 2 6
38187 - 2 2 6 2 2 6 2 2 6 2 2 6
38188 - 2 2 6 2 2 6 2 2 6 26 26 26
38189 - 86 86 86 101 101 101 46 46 46 10 10 10
38190 - 2 2 6 58 58 58 70 70 70 34 34 34
38191 - 10 10 10 0 0 0 0 0 0 0 0 0
38192 - 0 0 0 0 0 0 0 0 0 0 0 0
38193 - 0 0 0 0 0 0 0 0 0 0 0 0
38194 - 0 0 0 0 0 0 0 0 0 0 0 0
38195 - 0 0 0 0 0 0 0 0 0 0 0 0
38196 - 0 0 0 0 0 0 0 0 0 0 0 0
38197 - 0 0 0 0 0 0 0 0 0 0 0 0
38198 - 0 0 0 0 0 0 0 0 0 0 0 0
38199 - 0 0 1 0 0 1 0 0 1 0 0 0
38200 - 0 0 0 0 0 0 0 0 0 0 0 0
38201 - 0 0 0 0 0 0 0 0 0 0 0 0
38202 - 0 0 0 0 0 0 0 0 0 0 0 0
38203 - 0 0 0 0 0 0 0 0 0 0 0 0
38204 - 0 0 0 0 0 0 0 0 0 0 0 0
38205 - 14 14 14 42 42 42 86 86 86 10 10 10
38206 - 2 2 6 2 2 6 2 2 6 2 2 6
38207 - 2 2 6 2 2 6 2 2 6 2 2 6
38208 - 2 2 6 2 2 6 2 2 6 30 30 30
38209 - 94 94 94 94 94 94 58 58 58 26 26 26
38210 - 2 2 6 6 6 6 78 78 78 54 54 54
38211 - 22 22 22 6 6 6 0 0 0 0 0 0
38212 - 0 0 0 0 0 0 0 0 0 0 0 0
38213 - 0 0 0 0 0 0 0 0 0 0 0 0
38214 - 0 0 0 0 0 0 0 0 0 0 0 0
38215 - 0 0 0 0 0 0 0 0 0 0 0 0
38216 - 0 0 0 0 0 0 0 0 0 0 0 0
38217 - 0 0 0 0 0 0 0 0 0 0 0 0
38218 - 0 0 0 0 0 0 0 0 0 0 0 0
38219 - 0 0 0 0 0 0 0 0 0 0 0 0
38220 - 0 0 0 0 0 0 0 0 0 0 0 0
38221 - 0 0 0 0 0 0 0 0 0 0 0 0
38222 - 0 0 0 0 0 0 0 0 0 0 0 0
38223 - 0 0 0 0 0 0 0 0 0 0 0 0
38224 - 0 0 0 0 0 0 0 0 0 6 6 6
38225 - 22 22 22 62 62 62 62 62 62 2 2 6
38226 - 2 2 6 2 2 6 2 2 6 2 2 6
38227 - 2 2 6 2 2 6 2 2 6 2 2 6
38228 - 2 2 6 2 2 6 2 2 6 26 26 26
38229 - 54 54 54 38 38 38 18 18 18 10 10 10
38230 - 2 2 6 2 2 6 34 34 34 82 82 82
38231 - 38 38 38 14 14 14 0 0 0 0 0 0
38232 - 0 0 0 0 0 0 0 0 0 0 0 0
38233 - 0 0 0 0 0 0 0 0 0 0 0 0
38234 - 0 0 0 0 0 0 0 0 0 0 0 0
38235 - 0 0 0 0 0 0 0 0 0 0 0 0
38236 - 0 0 0 0 0 0 0 0 0 0 0 0
38237 - 0 0 0 0 0 0 0 0 0 0 0 0
38238 - 0 0 0 0 0 0 0 0 0 0 0 0
38239 - 0 0 0 0 0 1 0 0 1 0 0 0
38240 - 0 0 0 0 0 0 0 0 0 0 0 0
38241 - 0 0 0 0 0 0 0 0 0 0 0 0
38242 - 0 0 0 0 0 0 0 0 0 0 0 0
38243 - 0 0 0 0 0 0 0 0 0 0 0 0
38244 - 0 0 0 0 0 0 0 0 0 6 6 6
38245 - 30 30 30 78 78 78 30 30 30 2 2 6
38246 - 2 2 6 2 2 6 2 2 6 2 2 6
38247 - 2 2 6 2 2 6 2 2 6 2 2 6
38248 - 2 2 6 2 2 6 2 2 6 10 10 10
38249 - 10 10 10 2 2 6 2 2 6 2 2 6
38250 - 2 2 6 2 2 6 2 2 6 78 78 78
38251 - 50 50 50 18 18 18 6 6 6 0 0 0
38252 - 0 0 0 0 0 0 0 0 0 0 0 0
38253 - 0 0 0 0 0 0 0 0 0 0 0 0
38254 - 0 0 0 0 0 0 0 0 0 0 0 0
38255 - 0 0 0 0 0 0 0 0 0 0 0 0
38256 - 0 0 0 0 0 0 0 0 0 0 0 0
38257 - 0 0 0 0 0 0 0 0 0 0 0 0
38258 - 0 0 0 0 0 0 0 0 0 0 0 0
38259 - 0 0 1 0 0 0 0 0 0 0 0 0
38260 - 0 0 0 0 0 0 0 0 0 0 0 0
38261 - 0 0 0 0 0 0 0 0 0 0 0 0
38262 - 0 0 0 0 0 0 0 0 0 0 0 0
38263 - 0 0 0 0 0 0 0 0 0 0 0 0
38264 - 0 0 0 0 0 0 0 0 0 10 10 10
38265 - 38 38 38 86 86 86 14 14 14 2 2 6
38266 - 2 2 6 2 2 6 2 2 6 2 2 6
38267 - 2 2 6 2 2 6 2 2 6 2 2 6
38268 - 2 2 6 2 2 6 2 2 6 2 2 6
38269 - 2 2 6 2 2 6 2 2 6 2 2 6
38270 - 2 2 6 2 2 6 2 2 6 54 54 54
38271 - 66 66 66 26 26 26 6 6 6 0 0 0
38272 - 0 0 0 0 0 0 0 0 0 0 0 0
38273 - 0 0 0 0 0 0 0 0 0 0 0 0
38274 - 0 0 0 0 0 0 0 0 0 0 0 0
38275 - 0 0 0 0 0 0 0 0 0 0 0 0
38276 - 0 0 0 0 0 0 0 0 0 0 0 0
38277 - 0 0 0 0 0 0 0 0 0 0 0 0
38278 - 0 0 0 0 0 0 0 0 0 0 0 0
38279 - 0 0 0 0 0 1 0 0 1 0 0 0
38280 - 0 0 0 0 0 0 0 0 0 0 0 0
38281 - 0 0 0 0 0 0 0 0 0 0 0 0
38282 - 0 0 0 0 0 0 0 0 0 0 0 0
38283 - 0 0 0 0 0 0 0 0 0 0 0 0
38284 - 0 0 0 0 0 0 0 0 0 14 14 14
38285 - 42 42 42 82 82 82 2 2 6 2 2 6
38286 - 2 2 6 6 6 6 10 10 10 2 2 6
38287 - 2 2 6 2 2 6 2 2 6 2 2 6
38288 - 2 2 6 2 2 6 2 2 6 6 6 6
38289 - 14 14 14 10 10 10 2 2 6 2 2 6
38290 - 2 2 6 2 2 6 2 2 6 18 18 18
38291 - 82 82 82 34 34 34 10 10 10 0 0 0
38292 - 0 0 0 0 0 0 0 0 0 0 0 0
38293 - 0 0 0 0 0 0 0 0 0 0 0 0
38294 - 0 0 0 0 0 0 0 0 0 0 0 0
38295 - 0 0 0 0 0 0 0 0 0 0 0 0
38296 - 0 0 0 0 0 0 0 0 0 0 0 0
38297 - 0 0 0 0 0 0 0 0 0 0 0 0
38298 - 0 0 0 0 0 0 0 0 0 0 0 0
38299 - 0 0 1 0 0 0 0 0 0 0 0 0
38300 - 0 0 0 0 0 0 0 0 0 0 0 0
38301 - 0 0 0 0 0 0 0 0 0 0 0 0
38302 - 0 0 0 0 0 0 0 0 0 0 0 0
38303 - 0 0 0 0 0 0 0 0 0 0 0 0
38304 - 0 0 0 0 0 0 0 0 0 14 14 14
38305 - 46 46 46 86 86 86 2 2 6 2 2 6
38306 - 6 6 6 6 6 6 22 22 22 34 34 34
38307 - 6 6 6 2 2 6 2 2 6 2 2 6
38308 - 2 2 6 2 2 6 18 18 18 34 34 34
38309 - 10 10 10 50 50 50 22 22 22 2 2 6
38310 - 2 2 6 2 2 6 2 2 6 10 10 10
38311 - 86 86 86 42 42 42 14 14 14 0 0 0
38312 - 0 0 0 0 0 0 0 0 0 0 0 0
38313 - 0 0 0 0 0 0 0 0 0 0 0 0
38314 - 0 0 0 0 0 0 0 0 0 0 0 0
38315 - 0 0 0 0 0 0 0 0 0 0 0 0
38316 - 0 0 0 0 0 0 0 0 0 0 0 0
38317 - 0 0 0 0 0 0 0 0 0 0 0 0
38318 - 0 0 0 0 0 0 0 0 0 0 0 0
38319 - 0 0 1 0 0 1 0 0 1 0 0 0
38320 - 0 0 0 0 0 0 0 0 0 0 0 0
38321 - 0 0 0 0 0 0 0 0 0 0 0 0
38322 - 0 0 0 0 0 0 0 0 0 0 0 0
38323 - 0 0 0 0 0 0 0 0 0 0 0 0
38324 - 0 0 0 0 0 0 0 0 0 14 14 14
38325 - 46 46 46 86 86 86 2 2 6 2 2 6
38326 - 38 38 38 116 116 116 94 94 94 22 22 22
38327 - 22 22 22 2 2 6 2 2 6 2 2 6
38328 - 14 14 14 86 86 86 138 138 138 162 162 162
38329 -154 154 154 38 38 38 26 26 26 6 6 6
38330 - 2 2 6 2 2 6 2 2 6 2 2 6
38331 - 86 86 86 46 46 46 14 14 14 0 0 0
38332 - 0 0 0 0 0 0 0 0 0 0 0 0
38333 - 0 0 0 0 0 0 0 0 0 0 0 0
38334 - 0 0 0 0 0 0 0 0 0 0 0 0
38335 - 0 0 0 0 0 0 0 0 0 0 0 0
38336 - 0 0 0 0 0 0 0 0 0 0 0 0
38337 - 0 0 0 0 0 0 0 0 0 0 0 0
38338 - 0 0 0 0 0 0 0 0 0 0 0 0
38339 - 0 0 0 0 0 0 0 0 0 0 0 0
38340 - 0 0 0 0 0 0 0 0 0 0 0 0
38341 - 0 0 0 0 0 0 0 0 0 0 0 0
38342 - 0 0 0 0 0 0 0 0 0 0 0 0
38343 - 0 0 0 0 0 0 0 0 0 0 0 0
38344 - 0 0 0 0 0 0 0 0 0 14 14 14
38345 - 46 46 46 86 86 86 2 2 6 14 14 14
38346 -134 134 134 198 198 198 195 195 195 116 116 116
38347 - 10 10 10 2 2 6 2 2 6 6 6 6
38348 -101 98 89 187 187 187 210 210 210 218 218 218
38349 -214 214 214 134 134 134 14 14 14 6 6 6
38350 - 2 2 6 2 2 6 2 2 6 2 2 6
38351 - 86 86 86 50 50 50 18 18 18 6 6 6
38352 - 0 0 0 0 0 0 0 0 0 0 0 0
38353 - 0 0 0 0 0 0 0 0 0 0 0 0
38354 - 0 0 0 0 0 0 0 0 0 0 0 0
38355 - 0 0 0 0 0 0 0 0 0 0 0 0
38356 - 0 0 0 0 0 0 0 0 0 0 0 0
38357 - 0 0 0 0 0 0 0 0 0 0 0 0
38358 - 0 0 0 0 0 0 0 0 1 0 0 0
38359 - 0 0 1 0 0 1 0 0 1 0 0 0
38360 - 0 0 0 0 0 0 0 0 0 0 0 0
38361 - 0 0 0 0 0 0 0 0 0 0 0 0
38362 - 0 0 0 0 0 0 0 0 0 0 0 0
38363 - 0 0 0 0 0 0 0 0 0 0 0 0
38364 - 0 0 0 0 0 0 0 0 0 14 14 14
38365 - 46 46 46 86 86 86 2 2 6 54 54 54
38366 -218 218 218 195 195 195 226 226 226 246 246 246
38367 - 58 58 58 2 2 6 2 2 6 30 30 30
38368 -210 210 210 253 253 253 174 174 174 123 123 123
38369 -221 221 221 234 234 234 74 74 74 2 2 6
38370 - 2 2 6 2 2 6 2 2 6 2 2 6
38371 - 70 70 70 58 58 58 22 22 22 6 6 6
38372 - 0 0 0 0 0 0 0 0 0 0 0 0
38373 - 0 0 0 0 0 0 0 0 0 0 0 0
38374 - 0 0 0 0 0 0 0 0 0 0 0 0
38375 - 0 0 0 0 0 0 0 0 0 0 0 0
38376 - 0 0 0 0 0 0 0 0 0 0 0 0
38377 - 0 0 0 0 0 0 0 0 0 0 0 0
38378 - 0 0 0 0 0 0 0 0 0 0 0 0
38379 - 0 0 0 0 0 0 0 0 0 0 0 0
38380 - 0 0 0 0 0 0 0 0 0 0 0 0
38381 - 0 0 0 0 0 0 0 0 0 0 0 0
38382 - 0 0 0 0 0 0 0 0 0 0 0 0
38383 - 0 0 0 0 0 0 0 0 0 0 0 0
38384 - 0 0 0 0 0 0 0 0 0 14 14 14
38385 - 46 46 46 82 82 82 2 2 6 106 106 106
38386 -170 170 170 26 26 26 86 86 86 226 226 226
38387 -123 123 123 10 10 10 14 14 14 46 46 46
38388 -231 231 231 190 190 190 6 6 6 70 70 70
38389 - 90 90 90 238 238 238 158 158 158 2 2 6
38390 - 2 2 6 2 2 6 2 2 6 2 2 6
38391 - 70 70 70 58 58 58 22 22 22 6 6 6
38392 - 0 0 0 0 0 0 0 0 0 0 0 0
38393 - 0 0 0 0 0 0 0 0 0 0 0 0
38394 - 0 0 0 0 0 0 0 0 0 0 0 0
38395 - 0 0 0 0 0 0 0 0 0 0 0 0
38396 - 0 0 0 0 0 0 0 0 0 0 0 0
38397 - 0 0 0 0 0 0 0 0 0 0 0 0
38398 - 0 0 0 0 0 0 0 0 1 0 0 0
38399 - 0 0 1 0 0 1 0 0 1 0 0 0
38400 - 0 0 0 0 0 0 0 0 0 0 0 0
38401 - 0 0 0 0 0 0 0 0 0 0 0 0
38402 - 0 0 0 0 0 0 0 0 0 0 0 0
38403 - 0 0 0 0 0 0 0 0 0 0 0 0
38404 - 0 0 0 0 0 0 0 0 0 14 14 14
38405 - 42 42 42 86 86 86 6 6 6 116 116 116
38406 -106 106 106 6 6 6 70 70 70 149 149 149
38407 -128 128 128 18 18 18 38 38 38 54 54 54
38408 -221 221 221 106 106 106 2 2 6 14 14 14
38409 - 46 46 46 190 190 190 198 198 198 2 2 6
38410 - 2 2 6 2 2 6 2 2 6 2 2 6
38411 - 74 74 74 62 62 62 22 22 22 6 6 6
38412 - 0 0 0 0 0 0 0 0 0 0 0 0
38413 - 0 0 0 0 0 0 0 0 0 0 0 0
38414 - 0 0 0 0 0 0 0 0 0 0 0 0
38415 - 0 0 0 0 0 0 0 0 0 0 0 0
38416 - 0 0 0 0 0 0 0 0 0 0 0 0
38417 - 0 0 0 0 0 0 0 0 0 0 0 0
38418 - 0 0 0 0 0 0 0 0 1 0 0 0
38419 - 0 0 1 0 0 0 0 0 1 0 0 0
38420 - 0 0 0 0 0 0 0 0 0 0 0 0
38421 - 0 0 0 0 0 0 0 0 0 0 0 0
38422 - 0 0 0 0 0 0 0 0 0 0 0 0
38423 - 0 0 0 0 0 0 0 0 0 0 0 0
38424 - 0 0 0 0 0 0 0 0 0 14 14 14
38425 - 42 42 42 94 94 94 14 14 14 101 101 101
38426 -128 128 128 2 2 6 18 18 18 116 116 116
38427 -118 98 46 121 92 8 121 92 8 98 78 10
38428 -162 162 162 106 106 106 2 2 6 2 2 6
38429 - 2 2 6 195 195 195 195 195 195 6 6 6
38430 - 2 2 6 2 2 6 2 2 6 2 2 6
38431 - 74 74 74 62 62 62 22 22 22 6 6 6
38432 - 0 0 0 0 0 0 0 0 0 0 0 0
38433 - 0 0 0 0 0 0 0 0 0 0 0 0
38434 - 0 0 0 0 0 0 0 0 0 0 0 0
38435 - 0 0 0 0 0 0 0 0 0 0 0 0
38436 - 0 0 0 0 0 0 0 0 0 0 0 0
38437 - 0 0 0 0 0 0 0 0 0 0 0 0
38438 - 0 0 0 0 0 0 0 0 1 0 0 1
38439 - 0 0 1 0 0 0 0 0 1 0 0 0
38440 - 0 0 0 0 0 0 0 0 0 0 0 0
38441 - 0 0 0 0 0 0 0 0 0 0 0 0
38442 - 0 0 0 0 0 0 0 0 0 0 0 0
38443 - 0 0 0 0 0 0 0 0 0 0 0 0
38444 - 0 0 0 0 0 0 0 0 0 10 10 10
38445 - 38 38 38 90 90 90 14 14 14 58 58 58
38446 -210 210 210 26 26 26 54 38 6 154 114 10
38447 -226 170 11 236 186 11 225 175 15 184 144 12
38448 -215 174 15 175 146 61 37 26 9 2 2 6
38449 - 70 70 70 246 246 246 138 138 138 2 2 6
38450 - 2 2 6 2 2 6 2 2 6 2 2 6
38451 - 70 70 70 66 66 66 26 26 26 6 6 6
38452 - 0 0 0 0 0 0 0 0 0 0 0 0
38453 - 0 0 0 0 0 0 0 0 0 0 0 0
38454 - 0 0 0 0 0 0 0 0 0 0 0 0
38455 - 0 0 0 0 0 0 0 0 0 0 0 0
38456 - 0 0 0 0 0 0 0 0 0 0 0 0
38457 - 0 0 0 0 0 0 0 0 0 0 0 0
38458 - 0 0 0 0 0 0 0 0 0 0 0 0
38459 - 0 0 0 0 0 0 0 0 0 0 0 0
38460 - 0 0 0 0 0 0 0 0 0 0 0 0
38461 - 0 0 0 0 0 0 0 0 0 0 0 0
38462 - 0 0 0 0 0 0 0 0 0 0 0 0
38463 - 0 0 0 0 0 0 0 0 0 0 0 0
38464 - 0 0 0 0 0 0 0 0 0 10 10 10
38465 - 38 38 38 86 86 86 14 14 14 10 10 10
38466 -195 195 195 188 164 115 192 133 9 225 175 15
38467 -239 182 13 234 190 10 232 195 16 232 200 30
38468 -245 207 45 241 208 19 232 195 16 184 144 12
38469 -218 194 134 211 206 186 42 42 42 2 2 6
38470 - 2 2 6 2 2 6 2 2 6 2 2 6
38471 - 50 50 50 74 74 74 30 30 30 6 6 6
38472 - 0 0 0 0 0 0 0 0 0 0 0 0
38473 - 0 0 0 0 0 0 0 0 0 0 0 0
38474 - 0 0 0 0 0 0 0 0 0 0 0 0
38475 - 0 0 0 0 0 0 0 0 0 0 0 0
38476 - 0 0 0 0 0 0 0 0 0 0 0 0
38477 - 0 0 0 0 0 0 0 0 0 0 0 0
38478 - 0 0 0 0 0 0 0 0 0 0 0 0
38479 - 0 0 0 0 0 0 0 0 0 0 0 0
38480 - 0 0 0 0 0 0 0 0 0 0 0 0
38481 - 0 0 0 0 0 0 0 0 0 0 0 0
38482 - 0 0 0 0 0 0 0 0 0 0 0 0
38483 - 0 0 0 0 0 0 0 0 0 0 0 0
38484 - 0 0 0 0 0 0 0 0 0 10 10 10
38485 - 34 34 34 86 86 86 14 14 14 2 2 6
38486 -121 87 25 192 133 9 219 162 10 239 182 13
38487 -236 186 11 232 195 16 241 208 19 244 214 54
38488 -246 218 60 246 218 38 246 215 20 241 208 19
38489 -241 208 19 226 184 13 121 87 25 2 2 6
38490 - 2 2 6 2 2 6 2 2 6 2 2 6
38491 - 50 50 50 82 82 82 34 34 34 10 10 10
38492 - 0 0 0 0 0 0 0 0 0 0 0 0
38493 - 0 0 0 0 0 0 0 0 0 0 0 0
38494 - 0 0 0 0 0 0 0 0 0 0 0 0
38495 - 0 0 0 0 0 0 0 0 0 0 0 0
38496 - 0 0 0 0 0 0 0 0 0 0 0 0
38497 - 0 0 0 0 0 0 0 0 0 0 0 0
38498 - 0 0 0 0 0 0 0 0 0 0 0 0
38499 - 0 0 0 0 0 0 0 0 0 0 0 0
38500 - 0 0 0 0 0 0 0 0 0 0 0 0
38501 - 0 0 0 0 0 0 0 0 0 0 0 0
38502 - 0 0 0 0 0 0 0 0 0 0 0 0
38503 - 0 0 0 0 0 0 0 0 0 0 0 0
38504 - 0 0 0 0 0 0 0 0 0 10 10 10
38505 - 34 34 34 82 82 82 30 30 30 61 42 6
38506 -180 123 7 206 145 10 230 174 11 239 182 13
38507 -234 190 10 238 202 15 241 208 19 246 218 74
38508 -246 218 38 246 215 20 246 215 20 246 215 20
38509 -226 184 13 215 174 15 184 144 12 6 6 6
38510 - 2 2 6 2 2 6 2 2 6 2 2 6
38511 - 26 26 26 94 94 94 42 42 42 14 14 14
38512 - 0 0 0 0 0 0 0 0 0 0 0 0
38513 - 0 0 0 0 0 0 0 0 0 0 0 0
38514 - 0 0 0 0 0 0 0 0 0 0 0 0
38515 - 0 0 0 0 0 0 0 0 0 0 0 0
38516 - 0 0 0 0 0 0 0 0 0 0 0 0
38517 - 0 0 0 0 0 0 0 0 0 0 0 0
38518 - 0 0 0 0 0 0 0 0 0 0 0 0
38519 - 0 0 0 0 0 0 0 0 0 0 0 0
38520 - 0 0 0 0 0 0 0 0 0 0 0 0
38521 - 0 0 0 0 0 0 0 0 0 0 0 0
38522 - 0 0 0 0 0 0 0 0 0 0 0 0
38523 - 0 0 0 0 0 0 0 0 0 0 0 0
38524 - 0 0 0 0 0 0 0 0 0 10 10 10
38525 - 30 30 30 78 78 78 50 50 50 104 69 6
38526 -192 133 9 216 158 10 236 178 12 236 186 11
38527 -232 195 16 241 208 19 244 214 54 245 215 43
38528 -246 215 20 246 215 20 241 208 19 198 155 10
38529 -200 144 11 216 158 10 156 118 10 2 2 6
38530 - 2 2 6 2 2 6 2 2 6 2 2 6
38531 - 6 6 6 90 90 90 54 54 54 18 18 18
38532 - 6 6 6 0 0 0 0 0 0 0 0 0
38533 - 0 0 0 0 0 0 0 0 0 0 0 0
38534 - 0 0 0 0 0 0 0 0 0 0 0 0
38535 - 0 0 0 0 0 0 0 0 0 0 0 0
38536 - 0 0 0 0 0 0 0 0 0 0 0 0
38537 - 0 0 0 0 0 0 0 0 0 0 0 0
38538 - 0 0 0 0 0 0 0 0 0 0 0 0
38539 - 0 0 0 0 0 0 0 0 0 0 0 0
38540 - 0 0 0 0 0 0 0 0 0 0 0 0
38541 - 0 0 0 0 0 0 0 0 0 0 0 0
38542 - 0 0 0 0 0 0 0 0 0 0 0 0
38543 - 0 0 0 0 0 0 0 0 0 0 0 0
38544 - 0 0 0 0 0 0 0 0 0 10 10 10
38545 - 30 30 30 78 78 78 46 46 46 22 22 22
38546 -137 92 6 210 162 10 239 182 13 238 190 10
38547 -238 202 15 241 208 19 246 215 20 246 215 20
38548 -241 208 19 203 166 17 185 133 11 210 150 10
38549 -216 158 10 210 150 10 102 78 10 2 2 6
38550 - 6 6 6 54 54 54 14 14 14 2 2 6
38551 - 2 2 6 62 62 62 74 74 74 30 30 30
38552 - 10 10 10 0 0 0 0 0 0 0 0 0
38553 - 0 0 0 0 0 0 0 0 0 0 0 0
38554 - 0 0 0 0 0 0 0 0 0 0 0 0
38555 - 0 0 0 0 0 0 0 0 0 0 0 0
38556 - 0 0 0 0 0 0 0 0 0 0 0 0
38557 - 0 0 0 0 0 0 0 0 0 0 0 0
38558 - 0 0 0 0 0 0 0 0 0 0 0 0
38559 - 0 0 0 0 0 0 0 0 0 0 0 0
38560 - 0 0 0 0 0 0 0 0 0 0 0 0
38561 - 0 0 0 0 0 0 0 0 0 0 0 0
38562 - 0 0 0 0 0 0 0 0 0 0 0 0
38563 - 0 0 0 0 0 0 0 0 0 0 0 0
38564 - 0 0 0 0 0 0 0 0 0 10 10 10
38565 - 34 34 34 78 78 78 50 50 50 6 6 6
38566 - 94 70 30 139 102 15 190 146 13 226 184 13
38567 -232 200 30 232 195 16 215 174 15 190 146 13
38568 -168 122 10 192 133 9 210 150 10 213 154 11
38569 -202 150 34 182 157 106 101 98 89 2 2 6
38570 - 2 2 6 78 78 78 116 116 116 58 58 58
38571 - 2 2 6 22 22 22 90 90 90 46 46 46
38572 - 18 18 18 6 6 6 0 0 0 0 0 0
38573 - 0 0 0 0 0 0 0 0 0 0 0 0
38574 - 0 0 0 0 0 0 0 0 0 0 0 0
38575 - 0 0 0 0 0 0 0 0 0 0 0 0
38576 - 0 0 0 0 0 0 0 0 0 0 0 0
38577 - 0 0 0 0 0 0 0 0 0 0 0 0
38578 - 0 0 0 0 0 0 0 0 0 0 0 0
38579 - 0 0 0 0 0 0 0 0 0 0 0 0
38580 - 0 0 0 0 0 0 0 0 0 0 0 0
38581 - 0 0 0 0 0 0 0 0 0 0 0 0
38582 - 0 0 0 0 0 0 0 0 0 0 0 0
38583 - 0 0 0 0 0 0 0 0 0 0 0 0
38584 - 0 0 0 0 0 0 0 0 0 10 10 10
38585 - 38 38 38 86 86 86 50 50 50 6 6 6
38586 -128 128 128 174 154 114 156 107 11 168 122 10
38587 -198 155 10 184 144 12 197 138 11 200 144 11
38588 -206 145 10 206 145 10 197 138 11 188 164 115
38589 -195 195 195 198 198 198 174 174 174 14 14 14
38590 - 2 2 6 22 22 22 116 116 116 116 116 116
38591 - 22 22 22 2 2 6 74 74 74 70 70 70
38592 - 30 30 30 10 10 10 0 0 0 0 0 0
38593 - 0 0 0 0 0 0 0 0 0 0 0 0
38594 - 0 0 0 0 0 0 0 0 0 0 0 0
38595 - 0 0 0 0 0 0 0 0 0 0 0 0
38596 - 0 0 0 0 0 0 0 0 0 0 0 0
38597 - 0 0 0 0 0 0 0 0 0 0 0 0
38598 - 0 0 0 0 0 0 0 0 0 0 0 0
38599 - 0 0 0 0 0 0 0 0 0 0 0 0
38600 - 0 0 0 0 0 0 0 0 0 0 0 0
38601 - 0 0 0 0 0 0 0 0 0 0 0 0
38602 - 0 0 0 0 0 0 0 0 0 0 0 0
38603 - 0 0 0 0 0 0 0 0 0 0 0 0
38604 - 0 0 0 0 0 0 6 6 6 18 18 18
38605 - 50 50 50 101 101 101 26 26 26 10 10 10
38606 -138 138 138 190 190 190 174 154 114 156 107 11
38607 -197 138 11 200 144 11 197 138 11 192 133 9
38608 -180 123 7 190 142 34 190 178 144 187 187 187
38609 -202 202 202 221 221 221 214 214 214 66 66 66
38610 - 2 2 6 2 2 6 50 50 50 62 62 62
38611 - 6 6 6 2 2 6 10 10 10 90 90 90
38612 - 50 50 50 18 18 18 6 6 6 0 0 0
38613 - 0 0 0 0 0 0 0 0 0 0 0 0
38614 - 0 0 0 0 0 0 0 0 0 0 0 0
38615 - 0 0 0 0 0 0 0 0 0 0 0 0
38616 - 0 0 0 0 0 0 0 0 0 0 0 0
38617 - 0 0 0 0 0 0 0 0 0 0 0 0
38618 - 0 0 0 0 0 0 0 0 0 0 0 0
38619 - 0 0 0 0 0 0 0 0 0 0 0 0
38620 - 0 0 0 0 0 0 0 0 0 0 0 0
38621 - 0 0 0 0 0 0 0 0 0 0 0 0
38622 - 0 0 0 0 0 0 0 0 0 0 0 0
38623 - 0 0 0 0 0 0 0 0 0 0 0 0
38624 - 0 0 0 0 0 0 10 10 10 34 34 34
38625 - 74 74 74 74 74 74 2 2 6 6 6 6
38626 -144 144 144 198 198 198 190 190 190 178 166 146
38627 -154 121 60 156 107 11 156 107 11 168 124 44
38628 -174 154 114 187 187 187 190 190 190 210 210 210
38629 -246 246 246 253 253 253 253 253 253 182 182 182
38630 - 6 6 6 2 2 6 2 2 6 2 2 6
38631 - 2 2 6 2 2 6 2 2 6 62 62 62
38632 - 74 74 74 34 34 34 14 14 14 0 0 0
38633 - 0 0 0 0 0 0 0 0 0 0 0 0
38634 - 0 0 0 0 0 0 0 0 0 0 0 0
38635 - 0 0 0 0 0 0 0 0 0 0 0 0
38636 - 0 0 0 0 0 0 0 0 0 0 0 0
38637 - 0 0 0 0 0 0 0 0 0 0 0 0
38638 - 0 0 0 0 0 0 0 0 0 0 0 0
38639 - 0 0 0 0 0 0 0 0 0 0 0 0
38640 - 0 0 0 0 0 0 0 0 0 0 0 0
38641 - 0 0 0 0 0 0 0 0 0 0 0 0
38642 - 0 0 0 0 0 0 0 0 0 0 0 0
38643 - 0 0 0 0 0 0 0 0 0 0 0 0
38644 - 0 0 0 10 10 10 22 22 22 54 54 54
38645 - 94 94 94 18 18 18 2 2 6 46 46 46
38646 -234 234 234 221 221 221 190 190 190 190 190 190
38647 -190 190 190 187 187 187 187 187 187 190 190 190
38648 -190 190 190 195 195 195 214 214 214 242 242 242
38649 -253 253 253 253 253 253 253 253 253 253 253 253
38650 - 82 82 82 2 2 6 2 2 6 2 2 6
38651 - 2 2 6 2 2 6 2 2 6 14 14 14
38652 - 86 86 86 54 54 54 22 22 22 6 6 6
38653 - 0 0 0 0 0 0 0 0 0 0 0 0
38654 - 0 0 0 0 0 0 0 0 0 0 0 0
38655 - 0 0 0 0 0 0 0 0 0 0 0 0
38656 - 0 0 0 0 0 0 0 0 0 0 0 0
38657 - 0 0 0 0 0 0 0 0 0 0 0 0
38658 - 0 0 0 0 0 0 0 0 0 0 0 0
38659 - 0 0 0 0 0 0 0 0 0 0 0 0
38660 - 0 0 0 0 0 0 0 0 0 0 0 0
38661 - 0 0 0 0 0 0 0 0 0 0 0 0
38662 - 0 0 0 0 0 0 0 0 0 0 0 0
38663 - 0 0 0 0 0 0 0 0 0 0 0 0
38664 - 6 6 6 18 18 18 46 46 46 90 90 90
38665 - 46 46 46 18 18 18 6 6 6 182 182 182
38666 -253 253 253 246 246 246 206 206 206 190 190 190
38667 -190 190 190 190 190 190 190 190 190 190 190 190
38668 -206 206 206 231 231 231 250 250 250 253 253 253
38669 -253 253 253 253 253 253 253 253 253 253 253 253
38670 -202 202 202 14 14 14 2 2 6 2 2 6
38671 - 2 2 6 2 2 6 2 2 6 2 2 6
38672 - 42 42 42 86 86 86 42 42 42 18 18 18
38673 - 6 6 6 0 0 0 0 0 0 0 0 0
38674 - 0 0 0 0 0 0 0 0 0 0 0 0
38675 - 0 0 0 0 0 0 0 0 0 0 0 0
38676 - 0 0 0 0 0 0 0 0 0 0 0 0
38677 - 0 0 0 0 0 0 0 0 0 0 0 0
38678 - 0 0 0 0 0 0 0 0 0 0 0 0
38679 - 0 0 0 0 0 0 0 0 0 0 0 0
38680 - 0 0 0 0 0 0 0 0 0 0 0 0
38681 - 0 0 0 0 0 0 0 0 0 0 0 0
38682 - 0 0 0 0 0 0 0 0 0 0 0 0
38683 - 0 0 0 0 0 0 0 0 0 6 6 6
38684 - 14 14 14 38 38 38 74 74 74 66 66 66
38685 - 2 2 6 6 6 6 90 90 90 250 250 250
38686 -253 253 253 253 253 253 238 238 238 198 198 198
38687 -190 190 190 190 190 190 195 195 195 221 221 221
38688 -246 246 246 253 253 253 253 253 253 253 253 253
38689 -253 253 253 253 253 253 253 253 253 253 253 253
38690 -253 253 253 82 82 82 2 2 6 2 2 6
38691 - 2 2 6 2 2 6 2 2 6 2 2 6
38692 - 2 2 6 78 78 78 70 70 70 34 34 34
38693 - 14 14 14 6 6 6 0 0 0 0 0 0
38694 - 0 0 0 0 0 0 0 0 0 0 0 0
38695 - 0 0 0 0 0 0 0 0 0 0 0 0
38696 - 0 0 0 0 0 0 0 0 0 0 0 0
38697 - 0 0 0 0 0 0 0 0 0 0 0 0
38698 - 0 0 0 0 0 0 0 0 0 0 0 0
38699 - 0 0 0 0 0 0 0 0 0 0 0 0
38700 - 0 0 0 0 0 0 0 0 0 0 0 0
38701 - 0 0 0 0 0 0 0 0 0 0 0 0
38702 - 0 0 0 0 0 0 0 0 0 0 0 0
38703 - 0 0 0 0 0 0 0 0 0 14 14 14
38704 - 34 34 34 66 66 66 78 78 78 6 6 6
38705 - 2 2 6 18 18 18 218 218 218 253 253 253
38706 -253 253 253 253 253 253 253 253 253 246 246 246
38707 -226 226 226 231 231 231 246 246 246 253 253 253
38708 -253 253 253 253 253 253 253 253 253 253 253 253
38709 -253 253 253 253 253 253 253 253 253 253 253 253
38710 -253 253 253 178 178 178 2 2 6 2 2 6
38711 - 2 2 6 2 2 6 2 2 6 2 2 6
38712 - 2 2 6 18 18 18 90 90 90 62 62 62
38713 - 30 30 30 10 10 10 0 0 0 0 0 0
38714 - 0 0 0 0 0 0 0 0 0 0 0 0
38715 - 0 0 0 0 0 0 0 0 0 0 0 0
38716 - 0 0 0 0 0 0 0 0 0 0 0 0
38717 - 0 0 0 0 0 0 0 0 0 0 0 0
38718 - 0 0 0 0 0 0 0 0 0 0 0 0
38719 - 0 0 0 0 0 0 0 0 0 0 0 0
38720 - 0 0 0 0 0 0 0 0 0 0 0 0
38721 - 0 0 0 0 0 0 0 0 0 0 0 0
38722 - 0 0 0 0 0 0 0 0 0 0 0 0
38723 - 0 0 0 0 0 0 10 10 10 26 26 26
38724 - 58 58 58 90 90 90 18 18 18 2 2 6
38725 - 2 2 6 110 110 110 253 253 253 253 253 253
38726 -253 253 253 253 253 253 253 253 253 253 253 253
38727 -250 250 250 253 253 253 253 253 253 253 253 253
38728 -253 253 253 253 253 253 253 253 253 253 253 253
38729 -253 253 253 253 253 253 253 253 253 253 253 253
38730 -253 253 253 231 231 231 18 18 18 2 2 6
38731 - 2 2 6 2 2 6 2 2 6 2 2 6
38732 - 2 2 6 2 2 6 18 18 18 94 94 94
38733 - 54 54 54 26 26 26 10 10 10 0 0 0
38734 - 0 0 0 0 0 0 0 0 0 0 0 0
38735 - 0 0 0 0 0 0 0 0 0 0 0 0
38736 - 0 0 0 0 0 0 0 0 0 0 0 0
38737 - 0 0 0 0 0 0 0 0 0 0 0 0
38738 - 0 0 0 0 0 0 0 0 0 0 0 0
38739 - 0 0 0 0 0 0 0 0 0 0 0 0
38740 - 0 0 0 0 0 0 0 0 0 0 0 0
38741 - 0 0 0 0 0 0 0 0 0 0 0 0
38742 - 0 0 0 0 0 0 0 0 0 0 0 0
38743 - 0 0 0 6 6 6 22 22 22 50 50 50
38744 - 90 90 90 26 26 26 2 2 6 2 2 6
38745 - 14 14 14 195 195 195 250 250 250 253 253 253
38746 -253 253 253 253 253 253 253 253 253 253 253 253
38747 -253 253 253 253 253 253 253 253 253 253 253 253
38748 -253 253 253 253 253 253 253 253 253 253 253 253
38749 -253 253 253 253 253 253 253 253 253 253 253 253
38750 -250 250 250 242 242 242 54 54 54 2 2 6
38751 - 2 2 6 2 2 6 2 2 6 2 2 6
38752 - 2 2 6 2 2 6 2 2 6 38 38 38
38753 - 86 86 86 50 50 50 22 22 22 6 6 6
38754 - 0 0 0 0 0 0 0 0 0 0 0 0
38755 - 0 0 0 0 0 0 0 0 0 0 0 0
38756 - 0 0 0 0 0 0 0 0 0 0 0 0
38757 - 0 0 0 0 0 0 0 0 0 0 0 0
38758 - 0 0 0 0 0 0 0 0 0 0 0 0
38759 - 0 0 0 0 0 0 0 0 0 0 0 0
38760 - 0 0 0 0 0 0 0 0 0 0 0 0
38761 - 0 0 0 0 0 0 0 0 0 0 0 0
38762 - 0 0 0 0 0 0 0 0 0 0 0 0
38763 - 6 6 6 14 14 14 38 38 38 82 82 82
38764 - 34 34 34 2 2 6 2 2 6 2 2 6
38765 - 42 42 42 195 195 195 246 246 246 253 253 253
38766 -253 253 253 253 253 253 253 253 253 250 250 250
38767 -242 242 242 242 242 242 250 250 250 253 253 253
38768 -253 253 253 253 253 253 253 253 253 253 253 253
38769 -253 253 253 250 250 250 246 246 246 238 238 238
38770 -226 226 226 231 231 231 101 101 101 6 6 6
38771 - 2 2 6 2 2 6 2 2 6 2 2 6
38772 - 2 2 6 2 2 6 2 2 6 2 2 6
38773 - 38 38 38 82 82 82 42 42 42 14 14 14
38774 - 6 6 6 0 0 0 0 0 0 0 0 0
38775 - 0 0 0 0 0 0 0 0 0 0 0 0
38776 - 0 0 0 0 0 0 0 0 0 0 0 0
38777 - 0 0 0 0 0 0 0 0 0 0 0 0
38778 - 0 0 0 0 0 0 0 0 0 0 0 0
38779 - 0 0 0 0 0 0 0 0 0 0 0 0
38780 - 0 0 0 0 0 0 0 0 0 0 0 0
38781 - 0 0 0 0 0 0 0 0 0 0 0 0
38782 - 0 0 0 0 0 0 0 0 0 0 0 0
38783 - 10 10 10 26 26 26 62 62 62 66 66 66
38784 - 2 2 6 2 2 6 2 2 6 6 6 6
38785 - 70 70 70 170 170 170 206 206 206 234 234 234
38786 -246 246 246 250 250 250 250 250 250 238 238 238
38787 -226 226 226 231 231 231 238 238 238 250 250 250
38788 -250 250 250 250 250 250 246 246 246 231 231 231
38789 -214 214 214 206 206 206 202 202 202 202 202 202
38790 -198 198 198 202 202 202 182 182 182 18 18 18
38791 - 2 2 6 2 2 6 2 2 6 2 2 6
38792 - 2 2 6 2 2 6 2 2 6 2 2 6
38793 - 2 2 6 62 62 62 66 66 66 30 30 30
38794 - 10 10 10 0 0 0 0 0 0 0 0 0
38795 - 0 0 0 0 0 0 0 0 0 0 0 0
38796 - 0 0 0 0 0 0 0 0 0 0 0 0
38797 - 0 0 0 0 0 0 0 0 0 0 0 0
38798 - 0 0 0 0 0 0 0 0 0 0 0 0
38799 - 0 0 0 0 0 0 0 0 0 0 0 0
38800 - 0 0 0 0 0 0 0 0 0 0 0 0
38801 - 0 0 0 0 0 0 0 0 0 0 0 0
38802 - 0 0 0 0 0 0 0 0 0 0 0 0
38803 - 14 14 14 42 42 42 82 82 82 18 18 18
38804 - 2 2 6 2 2 6 2 2 6 10 10 10
38805 - 94 94 94 182 182 182 218 218 218 242 242 242
38806 -250 250 250 253 253 253 253 253 253 250 250 250
38807 -234 234 234 253 253 253 253 253 253 253 253 253
38808 -253 253 253 253 253 253 253 253 253 246 246 246
38809 -238 238 238 226 226 226 210 210 210 202 202 202
38810 -195 195 195 195 195 195 210 210 210 158 158 158
38811 - 6 6 6 14 14 14 50 50 50 14 14 14
38812 - 2 2 6 2 2 6 2 2 6 2 2 6
38813 - 2 2 6 6 6 6 86 86 86 46 46 46
38814 - 18 18 18 6 6 6 0 0 0 0 0 0
38815 - 0 0 0 0 0 0 0 0 0 0 0 0
38816 - 0 0 0 0 0 0 0 0 0 0 0 0
38817 - 0 0 0 0 0 0 0 0 0 0 0 0
38818 - 0 0 0 0 0 0 0 0 0 0 0 0
38819 - 0 0 0 0 0 0 0 0 0 0 0 0
38820 - 0 0 0 0 0 0 0 0 0 0 0 0
38821 - 0 0 0 0 0 0 0 0 0 0 0 0
38822 - 0 0 0 0 0 0 0 0 0 6 6 6
38823 - 22 22 22 54 54 54 70 70 70 2 2 6
38824 - 2 2 6 10 10 10 2 2 6 22 22 22
38825 -166 166 166 231 231 231 250 250 250 253 253 253
38826 -253 253 253 253 253 253 253 253 253 250 250 250
38827 -242 242 242 253 253 253 253 253 253 253 253 253
38828 -253 253 253 253 253 253 253 253 253 253 253 253
38829 -253 253 253 253 253 253 253 253 253 246 246 246
38830 -231 231 231 206 206 206 198 198 198 226 226 226
38831 - 94 94 94 2 2 6 6 6 6 38 38 38
38832 - 30 30 30 2 2 6 2 2 6 2 2 6
38833 - 2 2 6 2 2 6 62 62 62 66 66 66
38834 - 26 26 26 10 10 10 0 0 0 0 0 0
38835 - 0 0 0 0 0 0 0 0 0 0 0 0
38836 - 0 0 0 0 0 0 0 0 0 0 0 0
38837 - 0 0 0 0 0 0 0 0 0 0 0 0
38838 - 0 0 0 0 0 0 0 0 0 0 0 0
38839 - 0 0 0 0 0 0 0 0 0 0 0 0
38840 - 0 0 0 0 0 0 0 0 0 0 0 0
38841 - 0 0 0 0 0 0 0 0 0 0 0 0
38842 - 0 0 0 0 0 0 0 0 0 10 10 10
38843 - 30 30 30 74 74 74 50 50 50 2 2 6
38844 - 26 26 26 26 26 26 2 2 6 106 106 106
38845 -238 238 238 253 253 253 253 253 253 253 253 253
38846 -253 253 253 253 253 253 253 253 253 253 253 253
38847 -253 253 253 253 253 253 253 253 253 253 253 253
38848 -253 253 253 253 253 253 253 253 253 253 253 253
38849 -253 253 253 253 253 253 253 253 253 253 253 253
38850 -253 253 253 246 246 246 218 218 218 202 202 202
38851 -210 210 210 14 14 14 2 2 6 2 2 6
38852 - 30 30 30 22 22 22 2 2 6 2 2 6
38853 - 2 2 6 2 2 6 18 18 18 86 86 86
38854 - 42 42 42 14 14 14 0 0 0 0 0 0
38855 - 0 0 0 0 0 0 0 0 0 0 0 0
38856 - 0 0 0 0 0 0 0 0 0 0 0 0
38857 - 0 0 0 0 0 0 0 0 0 0 0 0
38858 - 0 0 0 0 0 0 0 0 0 0 0 0
38859 - 0 0 0 0 0 0 0 0 0 0 0 0
38860 - 0 0 0 0 0 0 0 0 0 0 0 0
38861 - 0 0 0 0 0 0 0 0 0 0 0 0
38862 - 0 0 0 0 0 0 0 0 0 14 14 14
38863 - 42 42 42 90 90 90 22 22 22 2 2 6
38864 - 42 42 42 2 2 6 18 18 18 218 218 218
38865 -253 253 253 253 253 253 253 253 253 253 253 253
38866 -253 253 253 253 253 253 253 253 253 253 253 253
38867 -253 253 253 253 253 253 253 253 253 253 253 253
38868 -253 253 253 253 253 253 253 253 253 253 253 253
38869 -253 253 253 253 253 253 253 253 253 253 253 253
38870 -253 253 253 253 253 253 250 250 250 221 221 221
38871 -218 218 218 101 101 101 2 2 6 14 14 14
38872 - 18 18 18 38 38 38 10 10 10 2 2 6
38873 - 2 2 6 2 2 6 2 2 6 78 78 78
38874 - 58 58 58 22 22 22 6 6 6 0 0 0
38875 - 0 0 0 0 0 0 0 0 0 0 0 0
38876 - 0 0 0 0 0 0 0 0 0 0 0 0
38877 - 0 0 0 0 0 0 0 0 0 0 0 0
38878 - 0 0 0 0 0 0 0 0 0 0 0 0
38879 - 0 0 0 0 0 0 0 0 0 0 0 0
38880 - 0 0 0 0 0 0 0 0 0 0 0 0
38881 - 0 0 0 0 0 0 0 0 0 0 0 0
38882 - 0 0 0 0 0 0 6 6 6 18 18 18
38883 - 54 54 54 82 82 82 2 2 6 26 26 26
38884 - 22 22 22 2 2 6 123 123 123 253 253 253
38885 -253 253 253 253 253 253 253 253 253 253 253 253
38886 -253 253 253 253 253 253 253 253 253 253 253 253
38887 -253 253 253 253 253 253 253 253 253 253 253 253
38888 -253 253 253 253 253 253 253 253 253 253 253 253
38889 -253 253 253 253 253 253 253 253 253 253 253 253
38890 -253 253 253 253 253 253 253 253 253 250 250 250
38891 -238 238 238 198 198 198 6 6 6 38 38 38
38892 - 58 58 58 26 26 26 38 38 38 2 2 6
38893 - 2 2 6 2 2 6 2 2 6 46 46 46
38894 - 78 78 78 30 30 30 10 10 10 0 0 0
38895 - 0 0 0 0 0 0 0 0 0 0 0 0
38896 - 0 0 0 0 0 0 0 0 0 0 0 0
38897 - 0 0 0 0 0 0 0 0 0 0 0 0
38898 - 0 0 0 0 0 0 0 0 0 0 0 0
38899 - 0 0 0 0 0 0 0 0 0 0 0 0
38900 - 0 0 0 0 0 0 0 0 0 0 0 0
38901 - 0 0 0 0 0 0 0 0 0 0 0 0
38902 - 0 0 0 0 0 0 10 10 10 30 30 30
38903 - 74 74 74 58 58 58 2 2 6 42 42 42
38904 - 2 2 6 22 22 22 231 231 231 253 253 253
38905 -253 253 253 253 253 253 253 253 253 253 253 253
38906 -253 253 253 253 253 253 253 253 253 250 250 250
38907 -253 253 253 253 253 253 253 253 253 253 253 253
38908 -253 253 253 253 253 253 253 253 253 253 253 253
38909 -253 253 253 253 253 253 253 253 253 253 253 253
38910 -253 253 253 253 253 253 253 253 253 253 253 253
38911 -253 253 253 246 246 246 46 46 46 38 38 38
38912 - 42 42 42 14 14 14 38 38 38 14 14 14
38913 - 2 2 6 2 2 6 2 2 6 6 6 6
38914 - 86 86 86 46 46 46 14 14 14 0 0 0
38915 - 0 0 0 0 0 0 0 0 0 0 0 0
38916 - 0 0 0 0 0 0 0 0 0 0 0 0
38917 - 0 0 0 0 0 0 0 0 0 0 0 0
38918 - 0 0 0 0 0 0 0 0 0 0 0 0
38919 - 0 0 0 0 0 0 0 0 0 0 0 0
38920 - 0 0 0 0 0 0 0 0 0 0 0 0
38921 - 0 0 0 0 0 0 0 0 0 0 0 0
38922 - 0 0 0 6 6 6 14 14 14 42 42 42
38923 - 90 90 90 18 18 18 18 18 18 26 26 26
38924 - 2 2 6 116 116 116 253 253 253 253 253 253
38925 -253 253 253 253 253 253 253 253 253 253 253 253
38926 -253 253 253 253 253 253 250 250 250 238 238 238
38927 -253 253 253 253 253 253 253 253 253 253 253 253
38928 -253 253 253 253 253 253 253 253 253 253 253 253
38929 -253 253 253 253 253 253 253 253 253 253 253 253
38930 -253 253 253 253 253 253 253 253 253 253 253 253
38931 -253 253 253 253 253 253 94 94 94 6 6 6
38932 - 2 2 6 2 2 6 10 10 10 34 34 34
38933 - 2 2 6 2 2 6 2 2 6 2 2 6
38934 - 74 74 74 58 58 58 22 22 22 6 6 6
38935 - 0 0 0 0 0 0 0 0 0 0 0 0
38936 - 0 0 0 0 0 0 0 0 0 0 0 0
38937 - 0 0 0 0 0 0 0 0 0 0 0 0
38938 - 0 0 0 0 0 0 0 0 0 0 0 0
38939 - 0 0 0 0 0 0 0 0 0 0 0 0
38940 - 0 0 0 0 0 0 0 0 0 0 0 0
38941 - 0 0 0 0 0 0 0 0 0 0 0 0
38942 - 0 0 0 10 10 10 26 26 26 66 66 66
38943 - 82 82 82 2 2 6 38 38 38 6 6 6
38944 - 14 14 14 210 210 210 253 253 253 253 253 253
38945 -253 253 253 253 253 253 253 253 253 253 253 253
38946 -253 253 253 253 253 253 246 246 246 242 242 242
38947 -253 253 253 253 253 253 253 253 253 253 253 253
38948 -253 253 253 253 253 253 253 253 253 253 253 253
38949 -253 253 253 253 253 253 253 253 253 253 253 253
38950 -253 253 253 253 253 253 253 253 253 253 253 253
38951 -253 253 253 253 253 253 144 144 144 2 2 6
38952 - 2 2 6 2 2 6 2 2 6 46 46 46
38953 - 2 2 6 2 2 6 2 2 6 2 2 6
38954 - 42 42 42 74 74 74 30 30 30 10 10 10
38955 - 0 0 0 0 0 0 0 0 0 0 0 0
38956 - 0 0 0 0 0 0 0 0 0 0 0 0
38957 - 0 0 0 0 0 0 0 0 0 0 0 0
38958 - 0 0 0 0 0 0 0 0 0 0 0 0
38959 - 0 0 0 0 0 0 0 0 0 0 0 0
38960 - 0 0 0 0 0 0 0 0 0 0 0 0
38961 - 0 0 0 0 0 0 0 0 0 0 0 0
38962 - 6 6 6 14 14 14 42 42 42 90 90 90
38963 - 26 26 26 6 6 6 42 42 42 2 2 6
38964 - 74 74 74 250 250 250 253 253 253 253 253 253
38965 -253 253 253 253 253 253 253 253 253 253 253 253
38966 -253 253 253 253 253 253 242 242 242 242 242 242
38967 -253 253 253 253 253 253 253 253 253 253 253 253
38968 -253 253 253 253 253 253 253 253 253 253 253 253
38969 -253 253 253 253 253 253 253 253 253 253 253 253
38970 -253 253 253 253 253 253 253 253 253 253 253 253
38971 -253 253 253 253 253 253 182 182 182 2 2 6
38972 - 2 2 6 2 2 6 2 2 6 46 46 46
38973 - 2 2 6 2 2 6 2 2 6 2 2 6
38974 - 10 10 10 86 86 86 38 38 38 10 10 10
38975 - 0 0 0 0 0 0 0 0 0 0 0 0
38976 - 0 0 0 0 0 0 0 0 0 0 0 0
38977 - 0 0 0 0 0 0 0 0 0 0 0 0
38978 - 0 0 0 0 0 0 0 0 0 0 0 0
38979 - 0 0 0 0 0 0 0 0 0 0 0 0
38980 - 0 0 0 0 0 0 0 0 0 0 0 0
38981 - 0 0 0 0 0 0 0 0 0 0 0 0
38982 - 10 10 10 26 26 26 66 66 66 82 82 82
38983 - 2 2 6 22 22 22 18 18 18 2 2 6
38984 -149 149 149 253 253 253 253 253 253 253 253 253
38985 -253 253 253 253 253 253 253 253 253 253 253 253
38986 -253 253 253 253 253 253 234 234 234 242 242 242
38987 -253 253 253 253 253 253 253 253 253 253 253 253
38988 -253 253 253 253 253 253 253 253 253 253 253 253
38989 -253 253 253 253 253 253 253 253 253 253 253 253
38990 -253 253 253 253 253 253 253 253 253 253 253 253
38991 -253 253 253 253 253 253 206 206 206 2 2 6
38992 - 2 2 6 2 2 6 2 2 6 38 38 38
38993 - 2 2 6 2 2 6 2 2 6 2 2 6
38994 - 6 6 6 86 86 86 46 46 46 14 14 14
38995 - 0 0 0 0 0 0 0 0 0 0 0 0
38996 - 0 0 0 0 0 0 0 0 0 0 0 0
38997 - 0 0 0 0 0 0 0 0 0 0 0 0
38998 - 0 0 0 0 0 0 0 0 0 0 0 0
38999 - 0 0 0 0 0 0 0 0 0 0 0 0
39000 - 0 0 0 0 0 0 0 0 0 0 0 0
39001 - 0 0 0 0 0 0 0 0 0 6 6 6
39002 - 18 18 18 46 46 46 86 86 86 18 18 18
39003 - 2 2 6 34 34 34 10 10 10 6 6 6
39004 -210 210 210 253 253 253 253 253 253 253 253 253
39005 -253 253 253 253 253 253 253 253 253 253 253 253
39006 -253 253 253 253 253 253 234 234 234 242 242 242
39007 -253 253 253 253 253 253 253 253 253 253 253 253
39008 -253 253 253 253 253 253 253 253 253 253 253 253
39009 -253 253 253 253 253 253 253 253 253 253 253 253
39010 -253 253 253 253 253 253 253 253 253 253 253 253
39011 -253 253 253 253 253 253 221 221 221 6 6 6
39012 - 2 2 6 2 2 6 6 6 6 30 30 30
39013 - 2 2 6 2 2 6 2 2 6 2 2 6
39014 - 2 2 6 82 82 82 54 54 54 18 18 18
39015 - 6 6 6 0 0 0 0 0 0 0 0 0
39016 - 0 0 0 0 0 0 0 0 0 0 0 0
39017 - 0 0 0 0 0 0 0 0 0 0 0 0
39018 - 0 0 0 0 0 0 0 0 0 0 0 0
39019 - 0 0 0 0 0 0 0 0 0 0 0 0
39020 - 0 0 0 0 0 0 0 0 0 0 0 0
39021 - 0 0 0 0 0 0 0 0 0 10 10 10
39022 - 26 26 26 66 66 66 62 62 62 2 2 6
39023 - 2 2 6 38 38 38 10 10 10 26 26 26
39024 -238 238 238 253 253 253 253 253 253 253 253 253
39025 -253 253 253 253 253 253 253 253 253 253 253 253
39026 -253 253 253 253 253 253 231 231 231 238 238 238
39027 -253 253 253 253 253 253 253 253 253 253 253 253
39028 -253 253 253 253 253 253 253 253 253 253 253 253
39029 -253 253 253 253 253 253 253 253 253 253 253 253
39030 -253 253 253 253 253 253 253 253 253 253 253 253
39031 -253 253 253 253 253 253 231 231 231 6 6 6
39032 - 2 2 6 2 2 6 10 10 10 30 30 30
39033 - 2 2 6 2 2 6 2 2 6 2 2 6
39034 - 2 2 6 66 66 66 58 58 58 22 22 22
39035 - 6 6 6 0 0 0 0 0 0 0 0 0
39036 - 0 0 0 0 0 0 0 0 0 0 0 0
39037 - 0 0 0 0 0 0 0 0 0 0 0 0
39038 - 0 0 0 0 0 0 0 0 0 0 0 0
39039 - 0 0 0 0 0 0 0 0 0 0 0 0
39040 - 0 0 0 0 0 0 0 0 0 0 0 0
39041 - 0 0 0 0 0 0 0 0 0 10 10 10
39042 - 38 38 38 78 78 78 6 6 6 2 2 6
39043 - 2 2 6 46 46 46 14 14 14 42 42 42
39044 -246 246 246 253 253 253 253 253 253 253 253 253
39045 -253 253 253 253 253 253 253 253 253 253 253 253
39046 -253 253 253 253 253 253 231 231 231 242 242 242
39047 -253 253 253 253 253 253 253 253 253 253 253 253
39048 -253 253 253 253 253 253 253 253 253 253 253 253
39049 -253 253 253 253 253 253 253 253 253 253 253 253
39050 -253 253 253 253 253 253 253 253 253 253 253 253
39051 -253 253 253 253 253 253 234 234 234 10 10 10
39052 - 2 2 6 2 2 6 22 22 22 14 14 14
39053 - 2 2 6 2 2 6 2 2 6 2 2 6
39054 - 2 2 6 66 66 66 62 62 62 22 22 22
39055 - 6 6 6 0 0 0 0 0 0 0 0 0
39056 - 0 0 0 0 0 0 0 0 0 0 0 0
39057 - 0 0 0 0 0 0 0 0 0 0 0 0
39058 - 0 0 0 0 0 0 0 0 0 0 0 0
39059 - 0 0 0 0 0 0 0 0 0 0 0 0
39060 - 0 0 0 0 0 0 0 0 0 0 0 0
39061 - 0 0 0 0 0 0 6 6 6 18 18 18
39062 - 50 50 50 74 74 74 2 2 6 2 2 6
39063 - 14 14 14 70 70 70 34 34 34 62 62 62
39064 -250 250 250 253 253 253 253 253 253 253 253 253
39065 -253 253 253 253 253 253 253 253 253 253 253 253
39066 -253 253 253 253 253 253 231 231 231 246 246 246
39067 -253 253 253 253 253 253 253 253 253 253 253 253
39068 -253 253 253 253 253 253 253 253 253 253 253 253
39069 -253 253 253 253 253 253 253 253 253 253 253 253
39070 -253 253 253 253 253 253 253 253 253 253 253 253
39071 -253 253 253 253 253 253 234 234 234 14 14 14
39072 - 2 2 6 2 2 6 30 30 30 2 2 6
39073 - 2 2 6 2 2 6 2 2 6 2 2 6
39074 - 2 2 6 66 66 66 62 62 62 22 22 22
39075 - 6 6 6 0 0 0 0 0 0 0 0 0
39076 - 0 0 0 0 0 0 0 0 0 0 0 0
39077 - 0 0 0 0 0 0 0 0 0 0 0 0
39078 - 0 0 0 0 0 0 0 0 0 0 0 0
39079 - 0 0 0 0 0 0 0 0 0 0 0 0
39080 - 0 0 0 0 0 0 0 0 0 0 0 0
39081 - 0 0 0 0 0 0 6 6 6 18 18 18
39082 - 54 54 54 62 62 62 2 2 6 2 2 6
39083 - 2 2 6 30 30 30 46 46 46 70 70 70
39084 -250 250 250 253 253 253 253 253 253 253 253 253
39085 -253 253 253 253 253 253 253 253 253 253 253 253
39086 -253 253 253 253 253 253 231 231 231 246 246 246
39087 -253 253 253 253 253 253 253 253 253 253 253 253
39088 -253 253 253 253 253 253 253 253 253 253 253 253
39089 -253 253 253 253 253 253 253 253 253 253 253 253
39090 -253 253 253 253 253 253 253 253 253 253 253 253
39091 -253 253 253 253 253 253 226 226 226 10 10 10
39092 - 2 2 6 6 6 6 30 30 30 2 2 6
39093 - 2 2 6 2 2 6 2 2 6 2 2 6
39094 - 2 2 6 66 66 66 58 58 58 22 22 22
39095 - 6 6 6 0 0 0 0 0 0 0 0 0
39096 - 0 0 0 0 0 0 0 0 0 0 0 0
39097 - 0 0 0 0 0 0 0 0 0 0 0 0
39098 - 0 0 0 0 0 0 0 0 0 0 0 0
39099 - 0 0 0 0 0 0 0 0 0 0 0 0
39100 - 0 0 0 0 0 0 0 0 0 0 0 0
39101 - 0 0 0 0 0 0 6 6 6 22 22 22
39102 - 58 58 58 62 62 62 2 2 6 2 2 6
39103 - 2 2 6 2 2 6 30 30 30 78 78 78
39104 -250 250 250 253 253 253 253 253 253 253 253 253
39105 -253 253 253 253 253 253 253 253 253 253 253 253
39106 -253 253 253 253 253 253 231 231 231 246 246 246
39107 -253 253 253 253 253 253 253 253 253 253 253 253
39108 -253 253 253 253 253 253 253 253 253 253 253 253
39109 -253 253 253 253 253 253 253 253 253 253 253 253
39110 -253 253 253 253 253 253 253 253 253 253 253 253
39111 -253 253 253 253 253 253 206 206 206 2 2 6
39112 - 22 22 22 34 34 34 18 14 6 22 22 22
39113 - 26 26 26 18 18 18 6 6 6 2 2 6
39114 - 2 2 6 82 82 82 54 54 54 18 18 18
39115 - 6 6 6 0 0 0 0 0 0 0 0 0
39116 - 0 0 0 0 0 0 0 0 0 0 0 0
39117 - 0 0 0 0 0 0 0 0 0 0 0 0
39118 - 0 0 0 0 0 0 0 0 0 0 0 0
39119 - 0 0 0 0 0 0 0 0 0 0 0 0
39120 - 0 0 0 0 0 0 0 0 0 0 0 0
39121 - 0 0 0 0 0 0 6 6 6 26 26 26
39122 - 62 62 62 106 106 106 74 54 14 185 133 11
39123 -210 162 10 121 92 8 6 6 6 62 62 62
39124 -238 238 238 253 253 253 253 253 253 253 253 253
39125 -253 253 253 253 253 253 253 253 253 253 253 253
39126 -253 253 253 253 253 253 231 231 231 246 246 246
39127 -253 253 253 253 253 253 253 253 253 253 253 253
39128 -253 253 253 253 253 253 253 253 253 253 253 253
39129 -253 253 253 253 253 253 253 253 253 253 253 253
39130 -253 253 253 253 253 253 253 253 253 253 253 253
39131 -253 253 253 253 253 253 158 158 158 18 18 18
39132 - 14 14 14 2 2 6 2 2 6 2 2 6
39133 - 6 6 6 18 18 18 66 66 66 38 38 38
39134 - 6 6 6 94 94 94 50 50 50 18 18 18
39135 - 6 6 6 0 0 0 0 0 0 0 0 0
39136 - 0 0 0 0 0 0 0 0 0 0 0 0
39137 - 0 0 0 0 0 0 0 0 0 0 0 0
39138 - 0 0 0 0 0 0 0 0 0 0 0 0
39139 - 0 0 0 0 0 0 0 0 0 0 0 0
39140 - 0 0 0 0 0 0 0 0 0 6 6 6
39141 - 10 10 10 10 10 10 18 18 18 38 38 38
39142 - 78 78 78 142 134 106 216 158 10 242 186 14
39143 -246 190 14 246 190 14 156 118 10 10 10 10
39144 - 90 90 90 238 238 238 253 253 253 253 253 253
39145 -253 253 253 253 253 253 253 253 253 253 253 253
39146 -253 253 253 253 253 253 231 231 231 250 250 250
39147 -253 253 253 253 253 253 253 253 253 253 253 253
39148 -253 253 253 253 253 253 253 253 253 253 253 253
39149 -253 253 253 253 253 253 253 253 253 253 253 253
39150 -253 253 253 253 253 253 253 253 253 246 230 190
39151 -238 204 91 238 204 91 181 142 44 37 26 9
39152 - 2 2 6 2 2 6 2 2 6 2 2 6
39153 - 2 2 6 2 2 6 38 38 38 46 46 46
39154 - 26 26 26 106 106 106 54 54 54 18 18 18
39155 - 6 6 6 0 0 0 0 0 0 0 0 0
39156 - 0 0 0 0 0 0 0 0 0 0 0 0
39157 - 0 0 0 0 0 0 0 0 0 0 0 0
39158 - 0 0 0 0 0 0 0 0 0 0 0 0
39159 - 0 0 0 0 0 0 0 0 0 0 0 0
39160 - 0 0 0 6 6 6 14 14 14 22 22 22
39161 - 30 30 30 38 38 38 50 50 50 70 70 70
39162 -106 106 106 190 142 34 226 170 11 242 186 14
39163 -246 190 14 246 190 14 246 190 14 154 114 10
39164 - 6 6 6 74 74 74 226 226 226 253 253 253
39165 -253 253 253 253 253 253 253 253 253 253 253 253
39166 -253 253 253 253 253 253 231 231 231 250 250 250
39167 -253 253 253 253 253 253 253 253 253 253 253 253
39168 -253 253 253 253 253 253 253 253 253 253 253 253
39169 -253 253 253 253 253 253 253 253 253 253 253 253
39170 -253 253 253 253 253 253 253 253 253 228 184 62
39171 -241 196 14 241 208 19 232 195 16 38 30 10
39172 - 2 2 6 2 2 6 2 2 6 2 2 6
39173 - 2 2 6 6 6 6 30 30 30 26 26 26
39174 -203 166 17 154 142 90 66 66 66 26 26 26
39175 - 6 6 6 0 0 0 0 0 0 0 0 0
39176 - 0 0 0 0 0 0 0 0 0 0 0 0
39177 - 0 0 0 0 0 0 0 0 0 0 0 0
39178 - 0 0 0 0 0 0 0 0 0 0 0 0
39179 - 0 0 0 0 0 0 0 0 0 0 0 0
39180 - 6 6 6 18 18 18 38 38 38 58 58 58
39181 - 78 78 78 86 86 86 101 101 101 123 123 123
39182 -175 146 61 210 150 10 234 174 13 246 186 14
39183 -246 190 14 246 190 14 246 190 14 238 190 10
39184 -102 78 10 2 2 6 46 46 46 198 198 198
39185 -253 253 253 253 253 253 253 253 253 253 253 253
39186 -253 253 253 253 253 253 234 234 234 242 242 242
39187 -253 253 253 253 253 253 253 253 253 253 253 253
39188 -253 253 253 253 253 253 253 253 253 253 253 253
39189 -253 253 253 253 253 253 253 253 253 253 253 253
39190 -253 253 253 253 253 253 253 253 253 224 178 62
39191 -242 186 14 241 196 14 210 166 10 22 18 6
39192 - 2 2 6 2 2 6 2 2 6 2 2 6
39193 - 2 2 6 2 2 6 6 6 6 121 92 8
39194 -238 202 15 232 195 16 82 82 82 34 34 34
39195 - 10 10 10 0 0 0 0 0 0 0 0 0
39196 - 0 0 0 0 0 0 0 0 0 0 0 0
39197 - 0 0 0 0 0 0 0 0 0 0 0 0
39198 - 0 0 0 0 0 0 0 0 0 0 0 0
39199 - 0 0 0 0 0 0 0 0 0 0 0 0
39200 - 14 14 14 38 38 38 70 70 70 154 122 46
39201 -190 142 34 200 144 11 197 138 11 197 138 11
39202 -213 154 11 226 170 11 242 186 14 246 190 14
39203 -246 190 14 246 190 14 246 190 14 246 190 14
39204 -225 175 15 46 32 6 2 2 6 22 22 22
39205 -158 158 158 250 250 250 253 253 253 253 253 253
39206 -253 253 253 253 253 253 253 253 253 253 253 253
39207 -253 253 253 253 253 253 253 253 253 253 253 253
39208 -253 253 253 253 253 253 253 253 253 253 253 253
39209 -253 253 253 253 253 253 253 253 253 253 253 253
39210 -253 253 253 250 250 250 242 242 242 224 178 62
39211 -239 182 13 236 186 11 213 154 11 46 32 6
39212 - 2 2 6 2 2 6 2 2 6 2 2 6
39213 - 2 2 6 2 2 6 61 42 6 225 175 15
39214 -238 190 10 236 186 11 112 100 78 42 42 42
39215 - 14 14 14 0 0 0 0 0 0 0 0 0
39216 - 0 0 0 0 0 0 0 0 0 0 0 0
39217 - 0 0 0 0 0 0 0 0 0 0 0 0
39218 - 0 0 0 0 0 0 0 0 0 0 0 0
39219 - 0 0 0 0 0 0 0 0 0 6 6 6
39220 - 22 22 22 54 54 54 154 122 46 213 154 11
39221 -226 170 11 230 174 11 226 170 11 226 170 11
39222 -236 178 12 242 186 14 246 190 14 246 190 14
39223 -246 190 14 246 190 14 246 190 14 246 190 14
39224 -241 196 14 184 144 12 10 10 10 2 2 6
39225 - 6 6 6 116 116 116 242 242 242 253 253 253
39226 -253 253 253 253 253 253 253 253 253 253 253 253
39227 -253 253 253 253 253 253 253 253 253 253 253 253
39228 -253 253 253 253 253 253 253 253 253 253 253 253
39229 -253 253 253 253 253 253 253 253 253 253 253 253
39230 -253 253 253 231 231 231 198 198 198 214 170 54
39231 -236 178 12 236 178 12 210 150 10 137 92 6
39232 - 18 14 6 2 2 6 2 2 6 2 2 6
39233 - 6 6 6 70 47 6 200 144 11 236 178 12
39234 -239 182 13 239 182 13 124 112 88 58 58 58
39235 - 22 22 22 6 6 6 0 0 0 0 0 0
39236 - 0 0 0 0 0 0 0 0 0 0 0 0
39237 - 0 0 0 0 0 0 0 0 0 0 0 0
39238 - 0 0 0 0 0 0 0 0 0 0 0 0
39239 - 0 0 0 0 0 0 0 0 0 10 10 10
39240 - 30 30 30 70 70 70 180 133 36 226 170 11
39241 -239 182 13 242 186 14 242 186 14 246 186 14
39242 -246 190 14 246 190 14 246 190 14 246 190 14
39243 -246 190 14 246 190 14 246 190 14 246 190 14
39244 -246 190 14 232 195 16 98 70 6 2 2 6
39245 - 2 2 6 2 2 6 66 66 66 221 221 221
39246 -253 253 253 253 253 253 253 253 253 253 253 253
39247 -253 253 253 253 253 253 253 253 253 253 253 253
39248 -253 253 253 253 253 253 253 253 253 253 253 253
39249 -253 253 253 253 253 253 253 253 253 253 253 253
39250 -253 253 253 206 206 206 198 198 198 214 166 58
39251 -230 174 11 230 174 11 216 158 10 192 133 9
39252 -163 110 8 116 81 8 102 78 10 116 81 8
39253 -167 114 7 197 138 11 226 170 11 239 182 13
39254 -242 186 14 242 186 14 162 146 94 78 78 78
39255 - 34 34 34 14 14 14 6 6 6 0 0 0
39256 - 0 0 0 0 0 0 0 0 0 0 0 0
39257 - 0 0 0 0 0 0 0 0 0 0 0 0
39258 - 0 0 0 0 0 0 0 0 0 0 0 0
39259 - 0 0 0 0 0 0 0 0 0 6 6 6
39260 - 30 30 30 78 78 78 190 142 34 226 170 11
39261 -239 182 13 246 190 14 246 190 14 246 190 14
39262 -246 190 14 246 190 14 246 190 14 246 190 14
39263 -246 190 14 246 190 14 246 190 14 246 190 14
39264 -246 190 14 241 196 14 203 166 17 22 18 6
39265 - 2 2 6 2 2 6 2 2 6 38 38 38
39266 -218 218 218 253 253 253 253 253 253 253 253 253
39267 -253 253 253 253 253 253 253 253 253 253 253 253
39268 -253 253 253 253 253 253 253 253 253 253 253 253
39269 -253 253 253 253 253 253 253 253 253 253 253 253
39270 -250 250 250 206 206 206 198 198 198 202 162 69
39271 -226 170 11 236 178 12 224 166 10 210 150 10
39272 -200 144 11 197 138 11 192 133 9 197 138 11
39273 -210 150 10 226 170 11 242 186 14 246 190 14
39274 -246 190 14 246 186 14 225 175 15 124 112 88
39275 - 62 62 62 30 30 30 14 14 14 6 6 6
39276 - 0 0 0 0 0 0 0 0 0 0 0 0
39277 - 0 0 0 0 0 0 0 0 0 0 0 0
39278 - 0 0 0 0 0 0 0 0 0 0 0 0
39279 - 0 0 0 0 0 0 0 0 0 10 10 10
39280 - 30 30 30 78 78 78 174 135 50 224 166 10
39281 -239 182 13 246 190 14 246 190 14 246 190 14
39282 -246 190 14 246 190 14 246 190 14 246 190 14
39283 -246 190 14 246 190 14 246 190 14 246 190 14
39284 -246 190 14 246 190 14 241 196 14 139 102 15
39285 - 2 2 6 2 2 6 2 2 6 2 2 6
39286 - 78 78 78 250 250 250 253 253 253 253 253 253
39287 -253 253 253 253 253 253 253 253 253 253 253 253
39288 -253 253 253 253 253 253 253 253 253 253 253 253
39289 -253 253 253 253 253 253 253 253 253 253 253 253
39290 -250 250 250 214 214 214 198 198 198 190 150 46
39291 -219 162 10 236 178 12 234 174 13 224 166 10
39292 -216 158 10 213 154 11 213 154 11 216 158 10
39293 -226 170 11 239 182 13 246 190 14 246 190 14
39294 -246 190 14 246 190 14 242 186 14 206 162 42
39295 -101 101 101 58 58 58 30 30 30 14 14 14
39296 - 6 6 6 0 0 0 0 0 0 0 0 0
39297 - 0 0 0 0 0 0 0 0 0 0 0 0
39298 - 0 0 0 0 0 0 0 0 0 0 0 0
39299 - 0 0 0 0 0 0 0 0 0 10 10 10
39300 - 30 30 30 74 74 74 174 135 50 216 158 10
39301 -236 178 12 246 190 14 246 190 14 246 190 14
39302 -246 190 14 246 190 14 246 190 14 246 190 14
39303 -246 190 14 246 190 14 246 190 14 246 190 14
39304 -246 190 14 246 190 14 241 196 14 226 184 13
39305 - 61 42 6 2 2 6 2 2 6 2 2 6
39306 - 22 22 22 238 238 238 253 253 253 253 253 253
39307 -253 253 253 253 253 253 253 253 253 253 253 253
39308 -253 253 253 253 253 253 253 253 253 253 253 253
39309 -253 253 253 253 253 253 253 253 253 253 253 253
39310 -253 253 253 226 226 226 187 187 187 180 133 36
39311 -216 158 10 236 178 12 239 182 13 236 178 12
39312 -230 174 11 226 170 11 226 170 11 230 174 11
39313 -236 178 12 242 186 14 246 190 14 246 190 14
39314 -246 190 14 246 190 14 246 186 14 239 182 13
39315 -206 162 42 106 106 106 66 66 66 34 34 34
39316 - 14 14 14 6 6 6 0 0 0 0 0 0
39317 - 0 0 0 0 0 0 0 0 0 0 0 0
39318 - 0 0 0 0 0 0 0 0 0 0 0 0
39319 - 0 0 0 0 0 0 0 0 0 6 6 6
39320 - 26 26 26 70 70 70 163 133 67 213 154 11
39321 -236 178 12 246 190 14 246 190 14 246 190 14
39322 -246 190 14 246 190 14 246 190 14 246 190 14
39323 -246 190 14 246 190 14 246 190 14 246 190 14
39324 -246 190 14 246 190 14 246 190 14 241 196 14
39325 -190 146 13 18 14 6 2 2 6 2 2 6
39326 - 46 46 46 246 246 246 253 253 253 253 253 253
39327 -253 253 253 253 253 253 253 253 253 253 253 253
39328 -253 253 253 253 253 253 253 253 253 253 253 253
39329 -253 253 253 253 253 253 253 253 253 253 253 253
39330 -253 253 253 221 221 221 86 86 86 156 107 11
39331 -216 158 10 236 178 12 242 186 14 246 186 14
39332 -242 186 14 239 182 13 239 182 13 242 186 14
39333 -242 186 14 246 186 14 246 190 14 246 190 14
39334 -246 190 14 246 190 14 246 190 14 246 190 14
39335 -242 186 14 225 175 15 142 122 72 66 66 66
39336 - 30 30 30 10 10 10 0 0 0 0 0 0
39337 - 0 0 0 0 0 0 0 0 0 0 0 0
39338 - 0 0 0 0 0 0 0 0 0 0 0 0
39339 - 0 0 0 0 0 0 0 0 0 6 6 6
39340 - 26 26 26 70 70 70 163 133 67 210 150 10
39341 -236 178 12 246 190 14 246 190 14 246 190 14
39342 -246 190 14 246 190 14 246 190 14 246 190 14
39343 -246 190 14 246 190 14 246 190 14 246 190 14
39344 -246 190 14 246 190 14 246 190 14 246 190 14
39345 -232 195 16 121 92 8 34 34 34 106 106 106
39346 -221 221 221 253 253 253 253 253 253 253 253 253
39347 -253 253 253 253 253 253 253 253 253 253 253 253
39348 -253 253 253 253 253 253 253 253 253 253 253 253
39349 -253 253 253 253 253 253 253 253 253 253 253 253
39350 -242 242 242 82 82 82 18 14 6 163 110 8
39351 -216 158 10 236 178 12 242 186 14 246 190 14
39352 -246 190 14 246 190 14 246 190 14 246 190 14
39353 -246 190 14 246 190 14 246 190 14 246 190 14
39354 -246 190 14 246 190 14 246 190 14 246 190 14
39355 -246 190 14 246 190 14 242 186 14 163 133 67
39356 - 46 46 46 18 18 18 6 6 6 0 0 0
39357 - 0 0 0 0 0 0 0 0 0 0 0 0
39358 - 0 0 0 0 0 0 0 0 0 0 0 0
39359 - 0 0 0 0 0 0 0 0 0 10 10 10
39360 - 30 30 30 78 78 78 163 133 67 210 150 10
39361 -236 178 12 246 186 14 246 190 14 246 190 14
39362 -246 190 14 246 190 14 246 190 14 246 190 14
39363 -246 190 14 246 190 14 246 190 14 246 190 14
39364 -246 190 14 246 190 14 246 190 14 246 190 14
39365 -241 196 14 215 174 15 190 178 144 253 253 253
39366 -253 253 253 253 253 253 253 253 253 253 253 253
39367 -253 253 253 253 253 253 253 253 253 253 253 253
39368 -253 253 253 253 253 253 253 253 253 253 253 253
39369 -253 253 253 253 253 253 253 253 253 218 218 218
39370 - 58 58 58 2 2 6 22 18 6 167 114 7
39371 -216 158 10 236 178 12 246 186 14 246 190 14
39372 -246 190 14 246 190 14 246 190 14 246 190 14
39373 -246 190 14 246 190 14 246 190 14 246 190 14
39374 -246 190 14 246 190 14 246 190 14 246 190 14
39375 -246 190 14 246 186 14 242 186 14 190 150 46
39376 - 54 54 54 22 22 22 6 6 6 0 0 0
39377 - 0 0 0 0 0 0 0 0 0 0 0 0
39378 - 0 0 0 0 0 0 0 0 0 0 0 0
39379 - 0 0 0 0 0 0 0 0 0 14 14 14
39380 - 38 38 38 86 86 86 180 133 36 213 154 11
39381 -236 178 12 246 186 14 246 190 14 246 190 14
39382 -246 190 14 246 190 14 246 190 14 246 190 14
39383 -246 190 14 246 190 14 246 190 14 246 190 14
39384 -246 190 14 246 190 14 246 190 14 246 190 14
39385 -246 190 14 232 195 16 190 146 13 214 214 214
39386 -253 253 253 253 253 253 253 253 253 253 253 253
39387 -253 253 253 253 253 253 253 253 253 253 253 253
39388 -253 253 253 253 253 253 253 253 253 253 253 253
39389 -253 253 253 250 250 250 170 170 170 26 26 26
39390 - 2 2 6 2 2 6 37 26 9 163 110 8
39391 -219 162 10 239 182 13 246 186 14 246 190 14
39392 -246 190 14 246 190 14 246 190 14 246 190 14
39393 -246 190 14 246 190 14 246 190 14 246 190 14
39394 -246 190 14 246 190 14 246 190 14 246 190 14
39395 -246 186 14 236 178 12 224 166 10 142 122 72
39396 - 46 46 46 18 18 18 6 6 6 0 0 0
39397 - 0 0 0 0 0 0 0 0 0 0 0 0
39398 - 0 0 0 0 0 0 0 0 0 0 0 0
39399 - 0 0 0 0 0 0 6 6 6 18 18 18
39400 - 50 50 50 109 106 95 192 133 9 224 166 10
39401 -242 186 14 246 190 14 246 190 14 246 190 14
39402 -246 190 14 246 190 14 246 190 14 246 190 14
39403 -246 190 14 246 190 14 246 190 14 246 190 14
39404 -246 190 14 246 190 14 246 190 14 246 190 14
39405 -242 186 14 226 184 13 210 162 10 142 110 46
39406 -226 226 226 253 253 253 253 253 253 253 253 253
39407 -253 253 253 253 253 253 253 253 253 253 253 253
39408 -253 253 253 253 253 253 253 253 253 253 253 253
39409 -198 198 198 66 66 66 2 2 6 2 2 6
39410 - 2 2 6 2 2 6 50 34 6 156 107 11
39411 -219 162 10 239 182 13 246 186 14 246 190 14
39412 -246 190 14 246 190 14 246 190 14 246 190 14
39413 -246 190 14 246 190 14 246 190 14 246 190 14
39414 -246 190 14 246 190 14 246 190 14 242 186 14
39415 -234 174 13 213 154 11 154 122 46 66 66 66
39416 - 30 30 30 10 10 10 0 0 0 0 0 0
39417 - 0 0 0 0 0 0 0 0 0 0 0 0
39418 - 0 0 0 0 0 0 0 0 0 0 0 0
39419 - 0 0 0 0 0 0 6 6 6 22 22 22
39420 - 58 58 58 154 121 60 206 145 10 234 174 13
39421 -242 186 14 246 186 14 246 190 14 246 190 14
39422 -246 190 14 246 190 14 246 190 14 246 190 14
39423 -246 190 14 246 190 14 246 190 14 246 190 14
39424 -246 190 14 246 190 14 246 190 14 246 190 14
39425 -246 186 14 236 178 12 210 162 10 163 110 8
39426 - 61 42 6 138 138 138 218 218 218 250 250 250
39427 -253 253 253 253 253 253 253 253 253 250 250 250
39428 -242 242 242 210 210 210 144 144 144 66 66 66
39429 - 6 6 6 2 2 6 2 2 6 2 2 6
39430 - 2 2 6 2 2 6 61 42 6 163 110 8
39431 -216 158 10 236 178 12 246 190 14 246 190 14
39432 -246 190 14 246 190 14 246 190 14 246 190 14
39433 -246 190 14 246 190 14 246 190 14 246 190 14
39434 -246 190 14 239 182 13 230 174 11 216 158 10
39435 -190 142 34 124 112 88 70 70 70 38 38 38
39436 - 18 18 18 6 6 6 0 0 0 0 0 0
39437 - 0 0 0 0 0 0 0 0 0 0 0 0
39438 - 0 0 0 0 0 0 0 0 0 0 0 0
39439 - 0 0 0 0 0 0 6 6 6 22 22 22
39440 - 62 62 62 168 124 44 206 145 10 224 166 10
39441 -236 178 12 239 182 13 242 186 14 242 186 14
39442 -246 186 14 246 190 14 246 190 14 246 190 14
39443 -246 190 14 246 190 14 246 190 14 246 190 14
39444 -246 190 14 246 190 14 246 190 14 246 190 14
39445 -246 190 14 236 178 12 216 158 10 175 118 6
39446 - 80 54 7 2 2 6 6 6 6 30 30 30
39447 - 54 54 54 62 62 62 50 50 50 38 38 38
39448 - 14 14 14 2 2 6 2 2 6 2 2 6
39449 - 2 2 6 2 2 6 2 2 6 2 2 6
39450 - 2 2 6 6 6 6 80 54 7 167 114 7
39451 -213 154 11 236 178 12 246 190 14 246 190 14
39452 -246 190 14 246 190 14 246 190 14 246 190 14
39453 -246 190 14 242 186 14 239 182 13 239 182 13
39454 -230 174 11 210 150 10 174 135 50 124 112 88
39455 - 82 82 82 54 54 54 34 34 34 18 18 18
39456 - 6 6 6 0 0 0 0 0 0 0 0 0
39457 - 0 0 0 0 0 0 0 0 0 0 0 0
39458 - 0 0 0 0 0 0 0 0 0 0 0 0
39459 - 0 0 0 0 0 0 6 6 6 18 18 18
39460 - 50 50 50 158 118 36 192 133 9 200 144 11
39461 -216 158 10 219 162 10 224 166 10 226 170 11
39462 -230 174 11 236 178 12 239 182 13 239 182 13
39463 -242 186 14 246 186 14 246 190 14 246 190 14
39464 -246 190 14 246 190 14 246 190 14 246 190 14
39465 -246 186 14 230 174 11 210 150 10 163 110 8
39466 -104 69 6 10 10 10 2 2 6 2 2 6
39467 - 2 2 6 2 2 6 2 2 6 2 2 6
39468 - 2 2 6 2 2 6 2 2 6 2 2 6
39469 - 2 2 6 2 2 6 2 2 6 2 2 6
39470 - 2 2 6 6 6 6 91 60 6 167 114 7
39471 -206 145 10 230 174 11 242 186 14 246 190 14
39472 -246 190 14 246 190 14 246 186 14 242 186 14
39473 -239 182 13 230 174 11 224 166 10 213 154 11
39474 -180 133 36 124 112 88 86 86 86 58 58 58
39475 - 38 38 38 22 22 22 10 10 10 6 6 6
39476 - 0 0 0 0 0 0 0 0 0 0 0 0
39477 - 0 0 0 0 0 0 0 0 0 0 0 0
39478 - 0 0 0 0 0 0 0 0 0 0 0 0
39479 - 0 0 0 0 0 0 0 0 0 14 14 14
39480 - 34 34 34 70 70 70 138 110 50 158 118 36
39481 -167 114 7 180 123 7 192 133 9 197 138 11
39482 -200 144 11 206 145 10 213 154 11 219 162 10
39483 -224 166 10 230 174 11 239 182 13 242 186 14
39484 -246 186 14 246 186 14 246 186 14 246 186 14
39485 -239 182 13 216 158 10 185 133 11 152 99 6
39486 -104 69 6 18 14 6 2 2 6 2 2 6
39487 - 2 2 6 2 2 6 2 2 6 2 2 6
39488 - 2 2 6 2 2 6 2 2 6 2 2 6
39489 - 2 2 6 2 2 6 2 2 6 2 2 6
39490 - 2 2 6 6 6 6 80 54 7 152 99 6
39491 -192 133 9 219 162 10 236 178 12 239 182 13
39492 -246 186 14 242 186 14 239 182 13 236 178 12
39493 -224 166 10 206 145 10 192 133 9 154 121 60
39494 - 94 94 94 62 62 62 42 42 42 22 22 22
39495 - 14 14 14 6 6 6 0 0 0 0 0 0
39496 - 0 0 0 0 0 0 0 0 0 0 0 0
39497 - 0 0 0 0 0 0 0 0 0 0 0 0
39498 - 0 0 0 0 0 0 0 0 0 0 0 0
39499 - 0 0 0 0 0 0 0 0 0 6 6 6
39500 - 18 18 18 34 34 34 58 58 58 78 78 78
39501 -101 98 89 124 112 88 142 110 46 156 107 11
39502 -163 110 8 167 114 7 175 118 6 180 123 7
39503 -185 133 11 197 138 11 210 150 10 219 162 10
39504 -226 170 11 236 178 12 236 178 12 234 174 13
39505 -219 162 10 197 138 11 163 110 8 130 83 6
39506 - 91 60 6 10 10 10 2 2 6 2 2 6
39507 - 18 18 18 38 38 38 38 38 38 38 38 38
39508 - 38 38 38 38 38 38 38 38 38 38 38 38
39509 - 38 38 38 38 38 38 26 26 26 2 2 6
39510 - 2 2 6 6 6 6 70 47 6 137 92 6
39511 -175 118 6 200 144 11 219 162 10 230 174 11
39512 -234 174 13 230 174 11 219 162 10 210 150 10
39513 -192 133 9 163 110 8 124 112 88 82 82 82
39514 - 50 50 50 30 30 30 14 14 14 6 6 6
39515 - 0 0 0 0 0 0 0 0 0 0 0 0
39516 - 0 0 0 0 0 0 0 0 0 0 0 0
39517 - 0 0 0 0 0 0 0 0 0 0 0 0
39518 - 0 0 0 0 0 0 0 0 0 0 0 0
39519 - 0 0 0 0 0 0 0 0 0 0 0 0
39520 - 6 6 6 14 14 14 22 22 22 34 34 34
39521 - 42 42 42 58 58 58 74 74 74 86 86 86
39522 -101 98 89 122 102 70 130 98 46 121 87 25
39523 -137 92 6 152 99 6 163 110 8 180 123 7
39524 -185 133 11 197 138 11 206 145 10 200 144 11
39525 -180 123 7 156 107 11 130 83 6 104 69 6
39526 - 50 34 6 54 54 54 110 110 110 101 98 89
39527 - 86 86 86 82 82 82 78 78 78 78 78 78
39528 - 78 78 78 78 78 78 78 78 78 78 78 78
39529 - 78 78 78 82 82 82 86 86 86 94 94 94
39530 -106 106 106 101 101 101 86 66 34 124 80 6
39531 -156 107 11 180 123 7 192 133 9 200 144 11
39532 -206 145 10 200 144 11 192 133 9 175 118 6
39533 -139 102 15 109 106 95 70 70 70 42 42 42
39534 - 22 22 22 10 10 10 0 0 0 0 0 0
39535 - 0 0 0 0 0 0 0 0 0 0 0 0
39536 - 0 0 0 0 0 0 0 0 0 0 0 0
39537 - 0 0 0 0 0 0 0 0 0 0 0 0
39538 - 0 0 0 0 0 0 0 0 0 0 0 0
39539 - 0 0 0 0 0 0 0 0 0 0 0 0
39540 - 0 0 0 0 0 0 6 6 6 10 10 10
39541 - 14 14 14 22 22 22 30 30 30 38 38 38
39542 - 50 50 50 62 62 62 74 74 74 90 90 90
39543 -101 98 89 112 100 78 121 87 25 124 80 6
39544 -137 92 6 152 99 6 152 99 6 152 99 6
39545 -138 86 6 124 80 6 98 70 6 86 66 30
39546 -101 98 89 82 82 82 58 58 58 46 46 46
39547 - 38 38 38 34 34 34 34 34 34 34 34 34
39548 - 34 34 34 34 34 34 34 34 34 34 34 34
39549 - 34 34 34 34 34 34 38 38 38 42 42 42
39550 - 54 54 54 82 82 82 94 86 76 91 60 6
39551 -134 86 6 156 107 11 167 114 7 175 118 6
39552 -175 118 6 167 114 7 152 99 6 121 87 25
39553 -101 98 89 62 62 62 34 34 34 18 18 18
39554 - 6 6 6 0 0 0 0 0 0 0 0 0
39555 - 0 0 0 0 0 0 0 0 0 0 0 0
39556 - 0 0 0 0 0 0 0 0 0 0 0 0
39557 - 0 0 0 0 0 0 0 0 0 0 0 0
39558 - 0 0 0 0 0 0 0 0 0 0 0 0
39559 - 0 0 0 0 0 0 0 0 0 0 0 0
39560 - 0 0 0 0 0 0 0 0 0 0 0 0
39561 - 0 0 0 6 6 6 6 6 6 10 10 10
39562 - 18 18 18 22 22 22 30 30 30 42 42 42
39563 - 50 50 50 66 66 66 86 86 86 101 98 89
39564 -106 86 58 98 70 6 104 69 6 104 69 6
39565 -104 69 6 91 60 6 82 62 34 90 90 90
39566 - 62 62 62 38 38 38 22 22 22 14 14 14
39567 - 10 10 10 10 10 10 10 10 10 10 10 10
39568 - 10 10 10 10 10 10 6 6 6 10 10 10
39569 - 10 10 10 10 10 10 10 10 10 14 14 14
39570 - 22 22 22 42 42 42 70 70 70 89 81 66
39571 - 80 54 7 104 69 6 124 80 6 137 92 6
39572 -134 86 6 116 81 8 100 82 52 86 86 86
39573 - 58 58 58 30 30 30 14 14 14 6 6 6
39574 - 0 0 0 0 0 0 0 0 0 0 0 0
39575 - 0 0 0 0 0 0 0 0 0 0 0 0
39576 - 0 0 0 0 0 0 0 0 0 0 0 0
39577 - 0 0 0 0 0 0 0 0 0 0 0 0
39578 - 0 0 0 0 0 0 0 0 0 0 0 0
39579 - 0 0 0 0 0 0 0 0 0 0 0 0
39580 - 0 0 0 0 0 0 0 0 0 0 0 0
39581 - 0 0 0 0 0 0 0 0 0 0 0 0
39582 - 0 0 0 6 6 6 10 10 10 14 14 14
39583 - 18 18 18 26 26 26 38 38 38 54 54 54
39584 - 70 70 70 86 86 86 94 86 76 89 81 66
39585 - 89 81 66 86 86 86 74 74 74 50 50 50
39586 - 30 30 30 14 14 14 6 6 6 0 0 0
39587 - 0 0 0 0 0 0 0 0 0 0 0 0
39588 - 0 0 0 0 0 0 0 0 0 0 0 0
39589 - 0 0 0 0 0 0 0 0 0 0 0 0
39590 - 6 6 6 18 18 18 34 34 34 58 58 58
39591 - 82 82 82 89 81 66 89 81 66 89 81 66
39592 - 94 86 66 94 86 76 74 74 74 50 50 50
39593 - 26 26 26 14 14 14 6 6 6 0 0 0
39594 - 0 0 0 0 0 0 0 0 0 0 0 0
39595 - 0 0 0 0 0 0 0 0 0 0 0 0
39596 - 0 0 0 0 0 0 0 0 0 0 0 0
39597 - 0 0 0 0 0 0 0 0 0 0 0 0
39598 - 0 0 0 0 0 0 0 0 0 0 0 0
39599 - 0 0 0 0 0 0 0 0 0 0 0 0
39600 - 0 0 0 0 0 0 0 0 0 0 0 0
39601 - 0 0 0 0 0 0 0 0 0 0 0 0
39602 - 0 0 0 0 0 0 0 0 0 0 0 0
39603 - 6 6 6 6 6 6 14 14 14 18 18 18
39604 - 30 30 30 38 38 38 46 46 46 54 54 54
39605 - 50 50 50 42 42 42 30 30 30 18 18 18
39606 - 10 10 10 0 0 0 0 0 0 0 0 0
39607 - 0 0 0 0 0 0 0 0 0 0 0 0
39608 - 0 0 0 0 0 0 0 0 0 0 0 0
39609 - 0 0 0 0 0 0 0 0 0 0 0 0
39610 - 0 0 0 6 6 6 14 14 14 26 26 26
39611 - 38 38 38 50 50 50 58 58 58 58 58 58
39612 - 54 54 54 42 42 42 30 30 30 18 18 18
39613 - 10 10 10 0 0 0 0 0 0 0 0 0
39614 - 0 0 0 0 0 0 0 0 0 0 0 0
39615 - 0 0 0 0 0 0 0 0 0 0 0 0
39616 - 0 0 0 0 0 0 0 0 0 0 0 0
39617 - 0 0 0 0 0 0 0 0 0 0 0 0
39618 - 0 0 0 0 0 0 0 0 0 0 0 0
39619 - 0 0 0 0 0 0 0 0 0 0 0 0
39620 - 0 0 0 0 0 0 0 0 0 0 0 0
39621 - 0 0 0 0 0 0 0 0 0 0 0 0
39622 - 0 0 0 0 0 0 0 0 0 0 0 0
39623 - 0 0 0 0 0 0 0 0 0 6 6 6
39624 - 6 6 6 10 10 10 14 14 14 18 18 18
39625 - 18 18 18 14 14 14 10 10 10 6 6 6
39626 - 0 0 0 0 0 0 0 0 0 0 0 0
39627 - 0 0 0 0 0 0 0 0 0 0 0 0
39628 - 0 0 0 0 0 0 0 0 0 0 0 0
39629 - 0 0 0 0 0 0 0 0 0 0 0 0
39630 - 0 0 0 0 0 0 0 0 0 6 6 6
39631 - 14 14 14 18 18 18 22 22 22 22 22 22
39632 - 18 18 18 14 14 14 10 10 10 6 6 6
39633 - 0 0 0 0 0 0 0 0 0 0 0 0
39634 - 0 0 0 0 0 0 0 0 0 0 0 0
39635 - 0 0 0 0 0 0 0 0 0 0 0 0
39636 - 0 0 0 0 0 0 0 0 0 0 0 0
39637 - 0 0 0 0 0 0 0 0 0 0 0 0
39638 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39639 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39640 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39641 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39642 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39643 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39644 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39645 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39646 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39647 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39648 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39649 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39650 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39651 +4 4 4 4 4 4
39652 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39653 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39654 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39655 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39656 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39657 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39658 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39659 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39660 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39661 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39662 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39663 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39664 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39665 +4 4 4 4 4 4
39666 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39667 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39668 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39669 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39670 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39671 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39672 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39673 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39674 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39675 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39676 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39677 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39678 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39679 +4 4 4 4 4 4
39680 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39681 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39682 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39683 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39684 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39685 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39686 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39687 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39688 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39689 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39690 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39691 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39692 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39693 +4 4 4 4 4 4
39694 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39695 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39696 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39697 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39698 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39699 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39700 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39701 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39702 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39703 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39704 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39705 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39706 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39707 +4 4 4 4 4 4
39708 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39709 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39710 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39711 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39712 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39713 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39714 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39715 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39716 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39717 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39718 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39719 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39720 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39721 +4 4 4 4 4 4
39722 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39723 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39724 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39725 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39726 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
39727 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
39728 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39729 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39730 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39731 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
39732 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39733 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
39734 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39735 +4 4 4 4 4 4
39736 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39737 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39738 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39739 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39740 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
39741 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
39742 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39743 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39744 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39745 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
39746 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
39747 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
39748 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39749 +4 4 4 4 4 4
39750 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39751 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39752 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39753 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39754 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
39755 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
39756 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39757 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39758 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39759 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
39760 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
39761 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
39762 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
39763 +4 4 4 4 4 4
39764 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39765 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39766 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39767 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
39768 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
39769 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
39770 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
39771 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39772 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
39773 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
39774 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
39775 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
39776 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
39777 +4 4 4 4 4 4
39778 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39779 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39780 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39781 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
39782 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
39783 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
39784 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
39785 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39786 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
39787 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
39788 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
39789 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
39790 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
39791 +4 4 4 4 4 4
39792 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39793 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39794 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39795 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
39796 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
39797 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
39798 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
39799 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39800 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
39801 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
39802 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
39803 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
39804 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
39805 +4 4 4 4 4 4
39806 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39807 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39808 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
39809 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
39810 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
39811 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
39812 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
39813 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
39814 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
39815 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
39816 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
39817 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
39818 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
39819 +4 4 4 4 4 4
39820 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39821 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39822 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
39823 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
39824 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
39825 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
39826 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
39827 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
39828 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
39829 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
39830 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
39831 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
39832 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
39833 +4 4 4 4 4 4
39834 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39835 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39836 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
39837 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
39838 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
39839 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
39840 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
39841 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
39842 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
39843 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
39844 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
39845 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
39846 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
39847 +4 4 4 4 4 4
39848 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39849 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39850 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
39851 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
39852 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
39853 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
39854 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
39855 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
39856 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
39857 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
39858 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
39859 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
39860 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
39861 +4 4 4 4 4 4
39862 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39863 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
39864 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
39865 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
39866 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
39867 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
39868 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
39869 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
39870 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
39871 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
39872 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
39873 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
39874 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
39875 +4 4 4 4 4 4
39876 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39877 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
39878 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
39879 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
39880 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
39881 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
39882 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
39883 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
39884 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
39885 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
39886 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
39887 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
39888 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
39889 +0 0 0 4 4 4
39890 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
39891 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
39892 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
39893 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
39894 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
39895 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
39896 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
39897 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
39898 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
39899 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
39900 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
39901 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
39902 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
39903 +2 0 0 0 0 0
39904 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
39905 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
39906 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
39907 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
39908 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
39909 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
39910 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
39911 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
39912 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
39913 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
39914 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
39915 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
39916 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
39917 +37 38 37 0 0 0
39918 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
39919 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
39920 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
39921 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
39922 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
39923 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
39924 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
39925 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
39926 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
39927 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
39928 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
39929 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
39930 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
39931 +85 115 134 4 0 0
39932 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
39933 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
39934 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
39935 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
39936 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
39937 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
39938 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
39939 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
39940 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
39941 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
39942 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
39943 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
39944 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
39945 +60 73 81 4 0 0
39946 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
39947 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
39948 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
39949 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
39950 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
39951 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
39952 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
39953 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
39954 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
39955 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
39956 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
39957 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
39958 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
39959 +16 19 21 4 0 0
39960 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
39961 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
39962 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
39963 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
39964 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
39965 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
39966 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
39967 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
39968 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
39969 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
39970 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
39971 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
39972 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
39973 +4 0 0 4 3 3
39974 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
39975 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
39976 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
39977 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
39978 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
39979 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
39980 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
39981 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
39982 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
39983 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
39984 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
39985 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
39986 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
39987 +3 2 2 4 4 4
39988 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
39989 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
39990 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
39991 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
39992 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
39993 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
39994 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
39995 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
39996 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
39997 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
39998 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
39999 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
40000 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
40001 +4 4 4 4 4 4
40002 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
40003 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
40004 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
40005 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
40006 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
40007 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
40008 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
40009 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
40010 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
40011 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
40012 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
40013 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
40014 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
40015 +4 4 4 4 4 4
40016 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
40017 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
40018 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
40019 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
40020 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
40021 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40022 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
40023 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
40024 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
40025 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
40026 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
40027 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
40028 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
40029 +5 5 5 5 5 5
40030 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
40031 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
40032 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
40033 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
40034 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
40035 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40036 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
40037 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
40038 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
40039 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
40040 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
40041 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
40042 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40043 +5 5 5 4 4 4
40044 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
40045 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
40046 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
40047 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
40048 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40049 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
40050 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
40051 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
40052 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
40053 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
40054 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
40055 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40056 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40057 +4 4 4 4 4 4
40058 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
40059 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
40060 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
40061 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
40062 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
40063 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40064 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40065 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
40066 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
40067 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
40068 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
40069 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
40070 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40071 +4 4 4 4 4 4
40072 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
40073 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
40074 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
40075 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
40076 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40077 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
40078 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
40079 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
40080 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
40081 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
40082 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
40083 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40084 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40085 +4 4 4 4 4 4
40086 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
40087 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
40088 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
40089 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
40090 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40091 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40092 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40093 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
40094 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
40095 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
40096 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
40097 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40098 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40099 +4 4 4 4 4 4
40100 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
40101 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
40102 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
40103 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
40104 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40105 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
40106 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40107 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
40108 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
40109 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
40110 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40111 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40112 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40113 +4 4 4 4 4 4
40114 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
40115 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
40116 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
40117 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
40118 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40119 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
40120 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
40121 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
40122 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
40123 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
40124 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
40125 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40126 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40127 +4 4 4 4 4 4
40128 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
40129 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
40130 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
40131 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
40132 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40133 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
40134 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
40135 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
40136 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
40137 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
40138 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
40139 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40140 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40141 +4 4 4 4 4 4
40142 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
40143 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
40144 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
40145 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40146 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
40147 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
40148 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
40149 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
40150 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
40151 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
40152 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40153 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40154 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40155 +4 4 4 4 4 4
40156 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
40157 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
40158 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
40159 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40160 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40161 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
40162 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
40163 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
40164 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
40165 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
40166 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40167 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40168 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40169 +4 4 4 4 4 4
40170 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
40171 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
40172 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40173 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40174 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40175 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
40176 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
40177 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
40178 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
40179 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
40180 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40181 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40182 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40183 +4 4 4 4 4 4
40184 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
40185 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
40186 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40187 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40188 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40189 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
40190 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
40191 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
40192 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40193 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40194 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40195 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40196 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40197 +4 4 4 4 4 4
40198 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40199 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
40200 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40201 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
40202 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
40203 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
40204 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
40205 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
40206 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40207 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40208 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40209 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40210 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40211 +4 4 4 4 4 4
40212 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40213 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
40214 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40215 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
40216 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40217 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
40218 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
40219 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
40220 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40221 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40222 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40223 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40224 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40225 +4 4 4 4 4 4
40226 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
40227 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
40228 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40229 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
40230 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
40231 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
40232 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
40233 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
40234 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40235 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40236 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40237 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40238 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40239 +4 4 4 4 4 4
40240 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
40241 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
40242 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40243 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
40244 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
40245 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
40246 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
40247 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
40248 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40249 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40250 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40251 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40252 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40253 +4 4 4 4 4 4
40254 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40255 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
40256 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40257 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
40258 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
40259 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
40260 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
40261 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
40262 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40263 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40264 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40265 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40266 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40267 +4 4 4 4 4 4
40268 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
40269 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
40270 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40271 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
40272 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
40273 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
40274 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
40275 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
40276 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
40277 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40278 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40279 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40280 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40281 +4 4 4 4 4 4
40282 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40283 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
40284 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
40285 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
40286 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
40287 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
40288 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
40289 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
40290 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40291 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40292 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40293 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40294 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40295 +4 4 4 4 4 4
40296 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40297 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
40298 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40299 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
40300 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
40301 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
40302 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
40303 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
40304 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40305 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40306 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40307 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40308 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40309 +4 4 4 4 4 4
40310 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40311 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
40312 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
40313 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
40314 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
40315 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
40316 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40317 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
40318 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40319 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40320 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40321 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40322 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40323 +4 4 4 4 4 4
40324 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40325 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
40326 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
40327 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40328 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
40329 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
40330 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40331 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
40332 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40333 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40334 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40335 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40336 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40337 +4 4 4 4 4 4
40338 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40339 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
40340 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
40341 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
40342 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
40343 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
40344 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
40345 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
40346 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
40347 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40348 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40349 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40350 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40351 +4 4 4 4 4 4
40352 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40353 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
40354 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
40355 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
40356 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
40357 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
40358 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
40359 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
40360 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
40361 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40362 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40363 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40364 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40365 +4 4 4 4 4 4
40366 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
40367 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
40368 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
40369 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
40370 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40371 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
40372 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
40373 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
40374 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
40375 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40376 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40377 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40378 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40379 +4 4 4 4 4 4
40380 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40381 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
40382 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
40383 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
40384 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
40385 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
40386 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
40387 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
40388 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
40389 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40390 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40391 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40392 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40393 +4 4 4 4 4 4
40394 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
40395 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
40396 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
40397 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
40398 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
40399 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
40400 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
40401 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
40402 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
40403 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
40404 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40405 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40406 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40407 +4 4 4 4 4 4
40408 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
40409 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40410 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
40411 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
40412 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
40413 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
40414 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
40415 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
40416 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
40417 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
40418 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40419 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40420 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40421 +4 4 4 4 4 4
40422 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
40423 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40424 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
40425 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
40426 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
40427 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
40428 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40429 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
40430 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
40431 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
40432 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40433 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40434 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40435 +4 4 4 4 4 4
40436 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
40437 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
40438 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
40439 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
40440 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
40441 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
40442 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
40443 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
40444 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
40445 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
40446 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40447 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40448 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40449 +4 4 4 4 4 4
40450 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
40451 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
40452 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
40453 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
40454 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
40455 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
40456 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
40457 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
40458 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
40459 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
40460 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40461 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40462 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40463 +4 4 4 4 4 4
40464 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40465 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
40466 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
40467 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
40468 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
40469 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
40470 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
40471 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
40472 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
40473 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
40474 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40475 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40476 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40477 +4 4 4 4 4 4
40478 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
40479 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
40480 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
40481 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
40482 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
40483 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
40484 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
40485 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
40486 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
40487 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40488 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40489 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40490 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40491 +4 4 4 4 4 4
40492 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
40493 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
40494 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
40495 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
40496 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
40497 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
40498 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
40499 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
40500 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40501 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40502 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40503 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40504 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40505 +4 4 4 4 4 4
40506 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
40507 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40508 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
40509 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
40510 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
40511 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
40512 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
40513 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
40514 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
40515 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40516 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40517 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40518 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40519 +4 4 4 4 4 4
40520 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
40521 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
40522 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
40523 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
40524 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
40525 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
40526 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
40527 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
40528 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
40529 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40530 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40531 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40532 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40533 +4 4 4 4 4 4
40534 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
40535 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
40536 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
40537 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
40538 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
40539 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
40540 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
40541 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
40542 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40543 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40544 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40545 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40546 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40547 +4 4 4 4 4 4
40548 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
40549 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
40550 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
40551 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
40552 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
40553 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
40554 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
40555 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
40556 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40557 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40558 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40559 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40560 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40561 +4 4 4 4 4 4
40562 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
40563 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
40564 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
40565 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
40566 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
40567 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
40568 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
40569 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
40570 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40571 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40572 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40573 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40574 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40575 +4 4 4 4 4 4
40576 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40577 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
40578 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40579 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
40580 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
40581 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
40582 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
40583 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
40584 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40585 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40586 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40587 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40588 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40589 +4 4 4 4 4 4
40590 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40591 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
40592 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
40593 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
40594 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
40595 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
40596 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
40597 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
40598 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40599 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40600 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40601 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40602 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40603 +4 4 4 4 4 4
40604 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40605 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40606 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
40607 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
40608 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
40609 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
40610 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
40611 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40612 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40613 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40614 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40615 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40616 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40617 +4 4 4 4 4 4
40618 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40619 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40620 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
40621 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
40622 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
40623 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
40624 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
40625 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40626 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40627 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40628 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40629 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40630 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40631 +4 4 4 4 4 4
40632 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40633 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40634 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40635 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40636 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
40637 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
40638 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
40639 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40640 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40641 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40642 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40643 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40644 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40645 +4 4 4 4 4 4
40646 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40647 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40648 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40649 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
40650 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
40651 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
40652 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
40653 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40654 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40655 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40656 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40657 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40658 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40659 +4 4 4 4 4 4
40660 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40661 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40662 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40663 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
40664 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40665 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
40666 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
40667 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40668 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40669 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40670 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40671 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40672 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40673 +4 4 4 4 4 4
40674 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40675 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40676 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40677 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
40678 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
40679 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
40680 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
40681 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40682 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40683 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40684 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40685 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40686 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40687 +4 4 4 4 4 4
40688 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40689 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40690 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40691 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40692 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
40693 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
40694 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40695 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40696 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40697 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40698 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40699 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40700 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40701 +4 4 4 4 4 4
40702 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40703 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40704 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40705 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40706 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
40707 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
40708 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40709 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40710 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40711 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40712 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40713 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40714 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40715 +4 4 4 4 4 4
40716 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40717 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40718 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40719 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40720 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
40721 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
40722 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40723 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40724 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40725 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40726 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40727 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40728 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40729 +4 4 4 4 4 4
40730 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40731 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40732 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40733 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40734 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
40735 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
40736 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40737 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40738 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40739 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40740 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40741 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40742 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40743 +4 4 4 4 4 4
40744 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40745 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40746 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40747 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40748 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
40749 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
40750 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40751 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40752 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40753 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40754 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40755 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40756 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40757 +4 4 4 4 4 4
40758 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
40759 index 3473e75..c930142 100644
40760 --- a/drivers/video/udlfb.c
40761 +++ b/drivers/video/udlfb.c
40762 @@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
40763 dlfb_urb_completion(urb);
40764
40765 error:
40766 - atomic_add(bytes_sent, &dev->bytes_sent);
40767 - atomic_add(bytes_identical, &dev->bytes_identical);
40768 - atomic_add(width*height*2, &dev->bytes_rendered);
40769 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
40770 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
40771 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
40772 end_cycles = get_cycles();
40773 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
40774 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
40775 >> 10)), /* Kcycles */
40776 &dev->cpu_kcycles_used);
40777
40778 @@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
40779 dlfb_urb_completion(urb);
40780
40781 error:
40782 - atomic_add(bytes_sent, &dev->bytes_sent);
40783 - atomic_add(bytes_identical, &dev->bytes_identical);
40784 - atomic_add(bytes_rendered, &dev->bytes_rendered);
40785 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
40786 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
40787 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
40788 end_cycles = get_cycles();
40789 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
40790 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
40791 >> 10)), /* Kcycles */
40792 &dev->cpu_kcycles_used);
40793 }
40794 @@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
40795 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40796 struct dlfb_data *dev = fb_info->par;
40797 return snprintf(buf, PAGE_SIZE, "%u\n",
40798 - atomic_read(&dev->bytes_rendered));
40799 + atomic_read_unchecked(&dev->bytes_rendered));
40800 }
40801
40802 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
40803 @@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
40804 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40805 struct dlfb_data *dev = fb_info->par;
40806 return snprintf(buf, PAGE_SIZE, "%u\n",
40807 - atomic_read(&dev->bytes_identical));
40808 + atomic_read_unchecked(&dev->bytes_identical));
40809 }
40810
40811 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
40812 @@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
40813 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40814 struct dlfb_data *dev = fb_info->par;
40815 return snprintf(buf, PAGE_SIZE, "%u\n",
40816 - atomic_read(&dev->bytes_sent));
40817 + atomic_read_unchecked(&dev->bytes_sent));
40818 }
40819
40820 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
40821 @@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
40822 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40823 struct dlfb_data *dev = fb_info->par;
40824 return snprintf(buf, PAGE_SIZE, "%u\n",
40825 - atomic_read(&dev->cpu_kcycles_used));
40826 + atomic_read_unchecked(&dev->cpu_kcycles_used));
40827 }
40828
40829 static ssize_t edid_show(
40830 @@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
40831 struct fb_info *fb_info = dev_get_drvdata(fbdev);
40832 struct dlfb_data *dev = fb_info->par;
40833
40834 - atomic_set(&dev->bytes_rendered, 0);
40835 - atomic_set(&dev->bytes_identical, 0);
40836 - atomic_set(&dev->bytes_sent, 0);
40837 - atomic_set(&dev->cpu_kcycles_used, 0);
40838 + atomic_set_unchecked(&dev->bytes_rendered, 0);
40839 + atomic_set_unchecked(&dev->bytes_identical, 0);
40840 + atomic_set_unchecked(&dev->bytes_sent, 0);
40841 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
40842
40843 return count;
40844 }
40845 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
40846 index 7f8472c..9842e87 100644
40847 --- a/drivers/video/uvesafb.c
40848 +++ b/drivers/video/uvesafb.c
40849 @@ -19,6 +19,7 @@
40850 #include <linux/io.h>
40851 #include <linux/mutex.h>
40852 #include <linux/slab.h>
40853 +#include <linux/moduleloader.h>
40854 #include <video/edid.h>
40855 #include <video/uvesafb.h>
40856 #ifdef CONFIG_X86
40857 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
40858 NULL,
40859 };
40860
40861 - return call_usermodehelper(v86d_path, argv, envp, 1);
40862 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
40863 }
40864
40865 /*
40866 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
40867 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
40868 par->pmi_setpal = par->ypan = 0;
40869 } else {
40870 +
40871 +#ifdef CONFIG_PAX_KERNEXEC
40872 +#ifdef CONFIG_MODULES
40873 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
40874 +#endif
40875 + if (!par->pmi_code) {
40876 + par->pmi_setpal = par->ypan = 0;
40877 + return 0;
40878 + }
40879 +#endif
40880 +
40881 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
40882 + task->t.regs.edi);
40883 +
40884 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40885 + pax_open_kernel();
40886 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
40887 + pax_close_kernel();
40888 +
40889 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
40890 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
40891 +#else
40892 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
40893 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
40894 +#endif
40895 +
40896 printk(KERN_INFO "uvesafb: protected mode interface info at "
40897 "%04x:%04x\n",
40898 (u16)task->t.regs.es, (u16)task->t.regs.edi);
40899 @@ -1821,6 +1844,11 @@ out:
40900 if (par->vbe_modes)
40901 kfree(par->vbe_modes);
40902
40903 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40904 + if (par->pmi_code)
40905 + module_free_exec(NULL, par->pmi_code);
40906 +#endif
40907 +
40908 framebuffer_release(info);
40909 return err;
40910 }
40911 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev)
40912 kfree(par->vbe_state_orig);
40913 if (par->vbe_state_saved)
40914 kfree(par->vbe_state_saved);
40915 +
40916 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40917 + if (par->pmi_code)
40918 + module_free_exec(NULL, par->pmi_code);
40919 +#endif
40920 +
40921 }
40922
40923 framebuffer_release(info);
40924 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
40925 index 501b340..86bd4cf 100644
40926 --- a/drivers/video/vesafb.c
40927 +++ b/drivers/video/vesafb.c
40928 @@ -9,6 +9,7 @@
40929 */
40930
40931 #include <linux/module.h>
40932 +#include <linux/moduleloader.h>
40933 #include <linux/kernel.h>
40934 #include <linux/errno.h>
40935 #include <linux/string.h>
40936 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
40937 static int vram_total __initdata; /* Set total amount of memory */
40938 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
40939 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
40940 -static void (*pmi_start)(void) __read_mostly;
40941 -static void (*pmi_pal) (void) __read_mostly;
40942 +static void (*pmi_start)(void) __read_only;
40943 +static void (*pmi_pal) (void) __read_only;
40944 static int depth __read_mostly;
40945 static int vga_compat __read_mostly;
40946 /* --------------------------------------------------------------------- */
40947 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
40948 unsigned int size_vmode;
40949 unsigned int size_remap;
40950 unsigned int size_total;
40951 + void *pmi_code = NULL;
40952
40953 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
40954 return -ENODEV;
40955 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
40956 size_remap = size_total;
40957 vesafb_fix.smem_len = size_remap;
40958
40959 -#ifndef __i386__
40960 - screen_info.vesapm_seg = 0;
40961 -#endif
40962 -
40963 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
40964 printk(KERN_WARNING
40965 "vesafb: cannot reserve video memory at 0x%lx\n",
40966 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
40967 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
40968 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
40969
40970 +#ifdef __i386__
40971 +
40972 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
40973 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
40974 + if (!pmi_code)
40975 +#elif !defined(CONFIG_PAX_KERNEXEC)
40976 + if (0)
40977 +#endif
40978 +
40979 +#endif
40980 + screen_info.vesapm_seg = 0;
40981 +
40982 if (screen_info.vesapm_seg) {
40983 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
40984 - screen_info.vesapm_seg,screen_info.vesapm_off);
40985 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
40986 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
40987 }
40988
40989 if (screen_info.vesapm_seg < 0xc000)
40990 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
40991
40992 if (ypan || pmi_setpal) {
40993 unsigned short *pmi_base;
40994 +
40995 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
40996 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
40997 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
40998 +
40999 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41000 + pax_open_kernel();
41001 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
41002 +#else
41003 + pmi_code = pmi_base;
41004 +#endif
41005 +
41006 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
41007 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
41008 +
41009 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41010 + pmi_start = ktva_ktla(pmi_start);
41011 + pmi_pal = ktva_ktla(pmi_pal);
41012 + pax_close_kernel();
41013 +#endif
41014 +
41015 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
41016 if (pmi_base[3]) {
41017 printk(KERN_INFO "vesafb: pmi: ports = ");
41018 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
41019 info->node, info->fix.id);
41020 return 0;
41021 err:
41022 +
41023 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41024 + module_free_exec(NULL, pmi_code);
41025 +#endif
41026 +
41027 if (info->screen_base)
41028 iounmap(info->screen_base);
41029 framebuffer_release(info);
41030 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
41031 index 88714ae..16c2e11 100644
41032 --- a/drivers/video/via/via_clock.h
41033 +++ b/drivers/video/via/via_clock.h
41034 @@ -56,7 +56,7 @@ struct via_clock {
41035
41036 void (*set_engine_pll_state)(u8 state);
41037 void (*set_engine_pll)(struct via_pll_config config);
41038 -};
41039 +} __no_const;
41040
41041
41042 static inline u32 get_pll_internal_frequency(u32 ref_freq,
41043 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
41044 index e56c934..fc22f4b 100644
41045 --- a/drivers/xen/xen-pciback/conf_space.h
41046 +++ b/drivers/xen/xen-pciback/conf_space.h
41047 @@ -44,15 +44,15 @@ struct config_field {
41048 struct {
41049 conf_dword_write write;
41050 conf_dword_read read;
41051 - } dw;
41052 + } __no_const dw;
41053 struct {
41054 conf_word_write write;
41055 conf_word_read read;
41056 - } w;
41057 + } __no_const w;
41058 struct {
41059 conf_byte_write write;
41060 conf_byte_read read;
41061 - } b;
41062 + } __no_const b;
41063 } u;
41064 struct list_head list;
41065 };
41066 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
41067 index 879ed88..bc03a01 100644
41068 --- a/fs/9p/vfs_inode.c
41069 +++ b/fs/9p/vfs_inode.c
41070 @@ -1286,7 +1286,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41071 void
41072 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41073 {
41074 - char *s = nd_get_link(nd);
41075 + const char *s = nd_get_link(nd);
41076
41077 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
41078 IS_ERR(s) ? "<error>" : s);
41079 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
41080 index 79e2ca7..5828ad1 100644
41081 --- a/fs/Kconfig.binfmt
41082 +++ b/fs/Kconfig.binfmt
41083 @@ -86,7 +86,7 @@ config HAVE_AOUT
41084
41085 config BINFMT_AOUT
41086 tristate "Kernel support for a.out and ECOFF binaries"
41087 - depends on HAVE_AOUT
41088 + depends on HAVE_AOUT && BROKEN
41089 ---help---
41090 A.out (Assembler.OUTput) is a set of formats for libraries and
41091 executables used in the earliest versions of UNIX. Linux used
41092 diff --git a/fs/aio.c b/fs/aio.c
41093 index 67e4b90..fbb09dc 100644
41094 --- a/fs/aio.c
41095 +++ b/fs/aio.c
41096 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
41097 size += sizeof(struct io_event) * nr_events;
41098 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
41099
41100 - if (nr_pages < 0)
41101 + if (nr_pages <= 0)
41102 return -EINVAL;
41103
41104 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
41105 @@ -1463,22 +1463,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
41106 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41107 {
41108 ssize_t ret;
41109 + struct iovec iovstack;
41110
41111 #ifdef CONFIG_COMPAT
41112 if (compat)
41113 ret = compat_rw_copy_check_uvector(type,
41114 (struct compat_iovec __user *)kiocb->ki_buf,
41115 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41116 + kiocb->ki_nbytes, 1, &iovstack,
41117 &kiocb->ki_iovec, 1);
41118 else
41119 #endif
41120 ret = rw_copy_check_uvector(type,
41121 (struct iovec __user *)kiocb->ki_buf,
41122 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41123 + kiocb->ki_nbytes, 1, &iovstack,
41124 &kiocb->ki_iovec, 1);
41125 if (ret < 0)
41126 goto out;
41127
41128 + if (kiocb->ki_iovec == &iovstack) {
41129 + kiocb->ki_inline_vec = iovstack;
41130 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
41131 + }
41132 kiocb->ki_nr_segs = kiocb->ki_nbytes;
41133 kiocb->ki_cur_seg = 0;
41134 /* ki_nbytes/left now reflect bytes instead of segs */
41135 diff --git a/fs/attr.c b/fs/attr.c
41136 index 7ee7ba4..0c61a60 100644
41137 --- a/fs/attr.c
41138 +++ b/fs/attr.c
41139 @@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
41140 unsigned long limit;
41141
41142 limit = rlimit(RLIMIT_FSIZE);
41143 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
41144 if (limit != RLIM_INFINITY && offset > limit)
41145 goto out_sig;
41146 if (offset > inode->i_sb->s_maxbytes)
41147 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
41148 index 6861f61..a25f010 100644
41149 --- a/fs/autofs4/waitq.c
41150 +++ b/fs/autofs4/waitq.c
41151 @@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
41152 {
41153 unsigned long sigpipe, flags;
41154 mm_segment_t fs;
41155 - const char *data = (const char *)addr;
41156 + const char __user *data = (const char __force_user *)addr;
41157 ssize_t wr = 0;
41158
41159 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
41160 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
41161 index 8342ca6..82fd192 100644
41162 --- a/fs/befs/linuxvfs.c
41163 +++ b/fs/befs/linuxvfs.c
41164 @@ -503,7 +503,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41165 {
41166 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
41167 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
41168 - char *link = nd_get_link(nd);
41169 + const char *link = nd_get_link(nd);
41170 if (!IS_ERR(link))
41171 kfree(link);
41172 }
41173 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
41174 index a6395bd..f1e376a 100644
41175 --- a/fs/binfmt_aout.c
41176 +++ b/fs/binfmt_aout.c
41177 @@ -16,6 +16,7 @@
41178 #include <linux/string.h>
41179 #include <linux/fs.h>
41180 #include <linux/file.h>
41181 +#include <linux/security.h>
41182 #include <linux/stat.h>
41183 #include <linux/fcntl.h>
41184 #include <linux/ptrace.h>
41185 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
41186 #endif
41187 # define START_STACK(u) ((void __user *)u.start_stack)
41188
41189 + memset(&dump, 0, sizeof(dump));
41190 +
41191 fs = get_fs();
41192 set_fs(KERNEL_DS);
41193 has_dumped = 1;
41194 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
41195
41196 /* If the size of the dump file exceeds the rlimit, then see what would happen
41197 if we wrote the stack, but not the data area. */
41198 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
41199 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
41200 dump.u_dsize = 0;
41201
41202 /* Make sure we have enough room to write the stack and data areas. */
41203 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
41204 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
41205 dump.u_ssize = 0;
41206
41207 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41208 rlim = rlimit(RLIMIT_DATA);
41209 if (rlim >= RLIM_INFINITY)
41210 rlim = ~0;
41211 +
41212 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
41213 if (ex.a_data + ex.a_bss > rlim)
41214 return -ENOMEM;
41215
41216 @@ -259,9 +266,37 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41217 current->mm->free_area_cache = current->mm->mmap_base;
41218 current->mm->cached_hole_size = 0;
41219
41220 + retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
41221 + if (retval < 0) {
41222 + /* Someone check-me: is this error path enough? */
41223 + send_sig(SIGKILL, current, 0);
41224 + return retval;
41225 + }
41226 +
41227 install_exec_creds(bprm);
41228 current->flags &= ~PF_FORKNOEXEC;
41229
41230 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41231 + current->mm->pax_flags = 0UL;
41232 +#endif
41233 +
41234 +#ifdef CONFIG_PAX_PAGEEXEC
41235 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
41236 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
41237 +
41238 +#ifdef CONFIG_PAX_EMUTRAMP
41239 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
41240 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
41241 +#endif
41242 +
41243 +#ifdef CONFIG_PAX_MPROTECT
41244 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
41245 + current->mm->pax_flags |= MF_PAX_MPROTECT;
41246 +#endif
41247 +
41248 + }
41249 +#endif
41250 +
41251 if (N_MAGIC(ex) == OMAGIC) {
41252 unsigned long text_addr, map_size;
41253 loff_t pos;
41254 @@ -334,7 +369,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41255
41256 down_write(&current->mm->mmap_sem);
41257 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
41258 - PROT_READ | PROT_WRITE | PROT_EXEC,
41259 + PROT_READ | PROT_WRITE,
41260 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
41261 fd_offset + ex.a_text);
41262 up_write(&current->mm->mmap_sem);
41263 @@ -352,13 +387,6 @@ beyond_if:
41264 return retval;
41265 }
41266
41267 - retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
41268 - if (retval < 0) {
41269 - /* Someone check-me: is this error path enough? */
41270 - send_sig(SIGKILL, current, 0);
41271 - return retval;
41272 - }
41273 -
41274 current->mm->start_stack =
41275 (unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
41276 #ifdef __alpha__
41277 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
41278 index 6ff96c6..dbf63ee 100644
41279 --- a/fs/binfmt_elf.c
41280 +++ b/fs/binfmt_elf.c
41281 @@ -32,6 +32,7 @@
41282 #include <linux/elf.h>
41283 #include <linux/utsname.h>
41284 #include <linux/coredump.h>
41285 +#include <linux/xattr.h>
41286 #include <asm/uaccess.h>
41287 #include <asm/param.h>
41288 #include <asm/page.h>
41289 @@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
41290 #define elf_core_dump NULL
41291 #endif
41292
41293 +#ifdef CONFIG_PAX_MPROTECT
41294 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
41295 +#endif
41296 +
41297 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
41298 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
41299 #else
41300 @@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
41301 .load_binary = load_elf_binary,
41302 .load_shlib = load_elf_library,
41303 .core_dump = elf_core_dump,
41304 +
41305 +#ifdef CONFIG_PAX_MPROTECT
41306 + .handle_mprotect= elf_handle_mprotect,
41307 +#endif
41308 +
41309 .min_coredump = ELF_EXEC_PAGESIZE,
41310 };
41311
41312 @@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
41313
41314 static int set_brk(unsigned long start, unsigned long end)
41315 {
41316 + unsigned long e = end;
41317 +
41318 start = ELF_PAGEALIGN(start);
41319 end = ELF_PAGEALIGN(end);
41320 if (end > start) {
41321 @@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
41322 if (BAD_ADDR(addr))
41323 return addr;
41324 }
41325 - current->mm->start_brk = current->mm->brk = end;
41326 + current->mm->start_brk = current->mm->brk = e;
41327 return 0;
41328 }
41329
41330 @@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41331 elf_addr_t __user *u_rand_bytes;
41332 const char *k_platform = ELF_PLATFORM;
41333 const char *k_base_platform = ELF_BASE_PLATFORM;
41334 - unsigned char k_rand_bytes[16];
41335 + u32 k_rand_bytes[4];
41336 int items;
41337 elf_addr_t *elf_info;
41338 int ei_index = 0;
41339 const struct cred *cred = current_cred();
41340 struct vm_area_struct *vma;
41341 + unsigned long saved_auxv[AT_VECTOR_SIZE];
41342
41343 /*
41344 * In some cases (e.g. Hyper-Threading), we want to avoid L1
41345 @@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41346 * Generate 16 random bytes for userspace PRNG seeding.
41347 */
41348 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
41349 - u_rand_bytes = (elf_addr_t __user *)
41350 - STACK_ALLOC(p, sizeof(k_rand_bytes));
41351 + srandom32(k_rand_bytes[0] ^ random32());
41352 + srandom32(k_rand_bytes[1] ^ random32());
41353 + srandom32(k_rand_bytes[2] ^ random32());
41354 + srandom32(k_rand_bytes[3] ^ random32());
41355 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
41356 + u_rand_bytes = (elf_addr_t __user *) p;
41357 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
41358 return -EFAULT;
41359
41360 @@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41361 return -EFAULT;
41362 current->mm->env_end = p;
41363
41364 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
41365 +
41366 /* Put the elf_info on the stack in the right place. */
41367 sp = (elf_addr_t __user *)envp + 1;
41368 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
41369 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
41370 return -EFAULT;
41371 return 0;
41372 }
41373 @@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41374 {
41375 struct elf_phdr *elf_phdata;
41376 struct elf_phdr *eppnt;
41377 - unsigned long load_addr = 0;
41378 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
41379 int load_addr_set = 0;
41380 unsigned long last_bss = 0, elf_bss = 0;
41381 - unsigned long error = ~0UL;
41382 + unsigned long error = -EINVAL;
41383 unsigned long total_size;
41384 int retval, i, size;
41385
41386 @@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41387 goto out_close;
41388 }
41389
41390 +#ifdef CONFIG_PAX_SEGMEXEC
41391 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
41392 + pax_task_size = SEGMEXEC_TASK_SIZE;
41393 +#endif
41394 +
41395 eppnt = elf_phdata;
41396 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
41397 if (eppnt->p_type == PT_LOAD) {
41398 @@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41399 k = load_addr + eppnt->p_vaddr;
41400 if (BAD_ADDR(k) ||
41401 eppnt->p_filesz > eppnt->p_memsz ||
41402 - eppnt->p_memsz > TASK_SIZE ||
41403 - TASK_SIZE - eppnt->p_memsz < k) {
41404 + eppnt->p_memsz > pax_task_size ||
41405 + pax_task_size - eppnt->p_memsz < k) {
41406 error = -ENOMEM;
41407 goto out_close;
41408 }
41409 @@ -528,6 +552,351 @@ out:
41410 return error;
41411 }
41412
41413 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
41414 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
41415 +{
41416 + unsigned long pax_flags = 0UL;
41417 +
41418 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
41419 +
41420 +#ifdef CONFIG_PAX_PAGEEXEC
41421 + if (elf_phdata->p_flags & PF_PAGEEXEC)
41422 + pax_flags |= MF_PAX_PAGEEXEC;
41423 +#endif
41424 +
41425 +#ifdef CONFIG_PAX_SEGMEXEC
41426 + if (elf_phdata->p_flags & PF_SEGMEXEC)
41427 + pax_flags |= MF_PAX_SEGMEXEC;
41428 +#endif
41429 +
41430 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41431 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41432 + if ((__supported_pte_mask & _PAGE_NX))
41433 + pax_flags &= ~MF_PAX_SEGMEXEC;
41434 + else
41435 + pax_flags &= ~MF_PAX_PAGEEXEC;
41436 + }
41437 +#endif
41438 +
41439 +#ifdef CONFIG_PAX_EMUTRAMP
41440 + if (elf_phdata->p_flags & PF_EMUTRAMP)
41441 + pax_flags |= MF_PAX_EMUTRAMP;
41442 +#endif
41443 +
41444 +#ifdef CONFIG_PAX_MPROTECT
41445 + if (elf_phdata->p_flags & PF_MPROTECT)
41446 + pax_flags |= MF_PAX_MPROTECT;
41447 +#endif
41448 +
41449 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41450 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
41451 + pax_flags |= MF_PAX_RANDMMAP;
41452 +#endif
41453 +
41454 +#endif
41455 +
41456 + return pax_flags;
41457 +}
41458 +
41459 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
41460 +{
41461 + unsigned long pax_flags = 0UL;
41462 +
41463 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
41464 +
41465 +#ifdef CONFIG_PAX_PAGEEXEC
41466 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
41467 + pax_flags |= MF_PAX_PAGEEXEC;
41468 +#endif
41469 +
41470 +#ifdef CONFIG_PAX_SEGMEXEC
41471 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
41472 + pax_flags |= MF_PAX_SEGMEXEC;
41473 +#endif
41474 +
41475 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41476 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41477 + if ((__supported_pte_mask & _PAGE_NX))
41478 + pax_flags &= ~MF_PAX_SEGMEXEC;
41479 + else
41480 + pax_flags &= ~MF_PAX_PAGEEXEC;
41481 + }
41482 +#endif
41483 +
41484 +#ifdef CONFIG_PAX_EMUTRAMP
41485 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
41486 + pax_flags |= MF_PAX_EMUTRAMP;
41487 +#endif
41488 +
41489 +#ifdef CONFIG_PAX_MPROTECT
41490 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
41491 + pax_flags |= MF_PAX_MPROTECT;
41492 +#endif
41493 +
41494 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41495 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
41496 + pax_flags |= MF_PAX_RANDMMAP;
41497 +#endif
41498 +
41499 +#endif
41500 +
41501 + return pax_flags;
41502 +}
41503 +
41504 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
41505 +{
41506 + unsigned long pax_flags = 0UL;
41507 +
41508 +#ifdef CONFIG_PAX_EI_PAX
41509 +
41510 +#ifdef CONFIG_PAX_PAGEEXEC
41511 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
41512 + pax_flags |= MF_PAX_PAGEEXEC;
41513 +#endif
41514 +
41515 +#ifdef CONFIG_PAX_SEGMEXEC
41516 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
41517 + pax_flags |= MF_PAX_SEGMEXEC;
41518 +#endif
41519 +
41520 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41521 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41522 + if ((__supported_pte_mask & _PAGE_NX))
41523 + pax_flags &= ~MF_PAX_SEGMEXEC;
41524 + else
41525 + pax_flags &= ~MF_PAX_PAGEEXEC;
41526 + }
41527 +#endif
41528 +
41529 +#ifdef CONFIG_PAX_EMUTRAMP
41530 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
41531 + pax_flags |= MF_PAX_EMUTRAMP;
41532 +#endif
41533 +
41534 +#ifdef CONFIG_PAX_MPROTECT
41535 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
41536 + pax_flags |= MF_PAX_MPROTECT;
41537 +#endif
41538 +
41539 +#ifdef CONFIG_PAX_ASLR
41540 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
41541 + pax_flags |= MF_PAX_RANDMMAP;
41542 +#endif
41543 +
41544 +#else
41545 +
41546 +#ifdef CONFIG_PAX_PAGEEXEC
41547 + pax_flags |= MF_PAX_PAGEEXEC;
41548 +#endif
41549 +
41550 +#ifdef CONFIG_PAX_MPROTECT
41551 + pax_flags |= MF_PAX_MPROTECT;
41552 +#endif
41553 +
41554 +#ifdef CONFIG_PAX_RANDMMAP
41555 + pax_flags |= MF_PAX_RANDMMAP;
41556 +#endif
41557 +
41558 +#ifdef CONFIG_PAX_SEGMEXEC
41559 + if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
41560 + pax_flags &= ~MF_PAX_PAGEEXEC;
41561 + pax_flags |= MF_PAX_SEGMEXEC;
41562 + }
41563 +#endif
41564 +
41565 +#endif
41566 +
41567 + return pax_flags;
41568 +}
41569 +
41570 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
41571 +{
41572 +
41573 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
41574 + unsigned long i;
41575 +
41576 + for (i = 0UL; i < elf_ex->e_phnum; i++)
41577 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
41578 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
41579 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
41580 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
41581 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
41582 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
41583 + return ~0UL;
41584 +
41585 +#ifdef CONFIG_PAX_SOFTMODE
41586 + if (pax_softmode)
41587 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
41588 + else
41589 +#endif
41590 +
41591 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
41592 + break;
41593 + }
41594 +#endif
41595 +
41596 + return ~0UL;
41597 +}
41598 +
41599 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
41600 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
41601 +{
41602 + unsigned long pax_flags = 0UL;
41603 +
41604 +#ifdef CONFIG_PAX_PAGEEXEC
41605 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
41606 + pax_flags |= MF_PAX_PAGEEXEC;
41607 +#endif
41608 +
41609 +#ifdef CONFIG_PAX_SEGMEXEC
41610 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
41611 + pax_flags |= MF_PAX_SEGMEXEC;
41612 +#endif
41613 +
41614 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41615 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41616 + if ((__supported_pte_mask & _PAGE_NX))
41617 + pax_flags &= ~MF_PAX_SEGMEXEC;
41618 + else
41619 + pax_flags &= ~MF_PAX_PAGEEXEC;
41620 + }
41621 +#endif
41622 +
41623 +#ifdef CONFIG_PAX_EMUTRAMP
41624 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
41625 + pax_flags |= MF_PAX_EMUTRAMP;
41626 +#endif
41627 +
41628 +#ifdef CONFIG_PAX_MPROTECT
41629 + if (pax_flags_softmode & MF_PAX_MPROTECT)
41630 + pax_flags |= MF_PAX_MPROTECT;
41631 +#endif
41632 +
41633 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41634 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
41635 + pax_flags |= MF_PAX_RANDMMAP;
41636 +#endif
41637 +
41638 + return pax_flags;
41639 +}
41640 +
41641 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
41642 +{
41643 + unsigned long pax_flags = 0UL;
41644 +
41645 +#ifdef CONFIG_PAX_PAGEEXEC
41646 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
41647 + pax_flags |= MF_PAX_PAGEEXEC;
41648 +#endif
41649 +
41650 +#ifdef CONFIG_PAX_SEGMEXEC
41651 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
41652 + pax_flags |= MF_PAX_SEGMEXEC;
41653 +#endif
41654 +
41655 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41656 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41657 + if ((__supported_pte_mask & _PAGE_NX))
41658 + pax_flags &= ~MF_PAX_SEGMEXEC;
41659 + else
41660 + pax_flags &= ~MF_PAX_PAGEEXEC;
41661 + }
41662 +#endif
41663 +
41664 +#ifdef CONFIG_PAX_EMUTRAMP
41665 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
41666 + pax_flags |= MF_PAX_EMUTRAMP;
41667 +#endif
41668 +
41669 +#ifdef CONFIG_PAX_MPROTECT
41670 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
41671 + pax_flags |= MF_PAX_MPROTECT;
41672 +#endif
41673 +
41674 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41675 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
41676 + pax_flags |= MF_PAX_RANDMMAP;
41677 +#endif
41678 +
41679 + return pax_flags;
41680 +}
41681 +#endif
41682 +
41683 +static unsigned long pax_parse_xattr_pax(struct file * const file)
41684 +{
41685 +
41686 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
41687 + ssize_t xattr_size, i;
41688 + unsigned char xattr_value[5];
41689 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
41690 +
41691 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
41692 + if (xattr_size <= 0)
41693 + return ~0UL;
41694 +
41695 + for (i = 0; i < xattr_size; i++)
41696 + switch (xattr_value[i]) {
41697 + default:
41698 + return ~0UL;
41699 +
41700 +#define parse_flag(option1, option2, flag) \
41701 + case option1: \
41702 + pax_flags_hardmode |= MF_PAX_##flag; \
41703 + break; \
41704 + case option2: \
41705 + pax_flags_softmode |= MF_PAX_##flag; \
41706 + break;
41707 +
41708 + parse_flag('p', 'P', PAGEEXEC);
41709 + parse_flag('e', 'E', EMUTRAMP);
41710 + parse_flag('m', 'M', MPROTECT);
41711 + parse_flag('r', 'R', RANDMMAP);
41712 + parse_flag('s', 'S', SEGMEXEC);
41713 +
41714 +#undef parse_flag
41715 + }
41716 +
41717 + if (pax_flags_hardmode & pax_flags_softmode)
41718 + return ~0UL;
41719 +
41720 +#ifdef CONFIG_PAX_SOFTMODE
41721 + if (pax_softmode)
41722 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
41723 + else
41724 +#endif
41725 +
41726 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
41727 +#else
41728 + return ~0UL;
41729 +#endif
41730 +
41731 +}
41732 +
41733 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
41734 +{
41735 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
41736 +
41737 + pax_flags = pax_parse_ei_pax(elf_ex);
41738 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
41739 + xattr_pax_flags = pax_parse_xattr_pax(file);
41740 +
41741 + if (pt_pax_flags == ~0UL)
41742 + pt_pax_flags = xattr_pax_flags;
41743 + else if (xattr_pax_flags == ~0UL)
41744 + xattr_pax_flags = pt_pax_flags;
41745 + if (pt_pax_flags != xattr_pax_flags)
41746 + return -EINVAL;
41747 + if (pt_pax_flags != ~0UL)
41748 + pax_flags = pt_pax_flags;
41749 +
41750 + if (0 > pax_check_flags(&pax_flags))
41751 + return -EINVAL;
41752 +
41753 + current->mm->pax_flags = pax_flags;
41754 + return 0;
41755 +}
41756 +#endif
41757 +
41758 /*
41759 * These are the functions used to load ELF style executables and shared
41760 * libraries. There is no binary dependent code anywhere else.
41761 @@ -544,6 +913,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
41762 {
41763 unsigned int random_variable = 0;
41764
41765 +#ifdef CONFIG_PAX_RANDUSTACK
41766 + if (randomize_va_space)
41767 + return stack_top - current->mm->delta_stack;
41768 +#endif
41769 +
41770 if ((current->flags & PF_RANDOMIZE) &&
41771 !(current->personality & ADDR_NO_RANDOMIZE)) {
41772 random_variable = get_random_int() & STACK_RND_MASK;
41773 @@ -562,7 +936,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41774 unsigned long load_addr = 0, load_bias = 0;
41775 int load_addr_set = 0;
41776 char * elf_interpreter = NULL;
41777 - unsigned long error;
41778 + unsigned long error = 0;
41779 struct elf_phdr *elf_ppnt, *elf_phdata;
41780 unsigned long elf_bss, elf_brk;
41781 int retval, i;
41782 @@ -572,11 +946,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41783 unsigned long start_code, end_code, start_data, end_data;
41784 unsigned long reloc_func_desc __maybe_unused = 0;
41785 int executable_stack = EXSTACK_DEFAULT;
41786 - unsigned long def_flags = 0;
41787 struct {
41788 struct elfhdr elf_ex;
41789 struct elfhdr interp_elf_ex;
41790 } *loc;
41791 + unsigned long pax_task_size = TASK_SIZE;
41792
41793 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
41794 if (!loc) {
41795 @@ -713,11 +1087,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41796
41797 /* OK, This is the point of no return */
41798 current->flags &= ~PF_FORKNOEXEC;
41799 - current->mm->def_flags = def_flags;
41800 +
41801 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41802 + current->mm->pax_flags = 0UL;
41803 +#endif
41804 +
41805 +#ifdef CONFIG_PAX_DLRESOLVE
41806 + current->mm->call_dl_resolve = 0UL;
41807 +#endif
41808 +
41809 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
41810 + current->mm->call_syscall = 0UL;
41811 +#endif
41812 +
41813 +#ifdef CONFIG_PAX_ASLR
41814 + current->mm->delta_mmap = 0UL;
41815 + current->mm->delta_stack = 0UL;
41816 +#endif
41817 +
41818 + current->mm->def_flags = 0;
41819 +
41820 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
41821 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
41822 + send_sig(SIGKILL, current, 0);
41823 + goto out_free_dentry;
41824 + }
41825 +#endif
41826 +
41827 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
41828 + pax_set_initial_flags(bprm);
41829 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
41830 + if (pax_set_initial_flags_func)
41831 + (pax_set_initial_flags_func)(bprm);
41832 +#endif
41833 +
41834 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
41835 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
41836 + current->mm->context.user_cs_limit = PAGE_SIZE;
41837 + current->mm->def_flags |= VM_PAGEEXEC;
41838 + }
41839 +#endif
41840 +
41841 +#ifdef CONFIG_PAX_SEGMEXEC
41842 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
41843 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
41844 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
41845 + pax_task_size = SEGMEXEC_TASK_SIZE;
41846 + current->mm->def_flags |= VM_NOHUGEPAGE;
41847 + }
41848 +#endif
41849 +
41850 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
41851 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41852 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
41853 + put_cpu();
41854 + }
41855 +#endif
41856
41857 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
41858 may depend on the personality. */
41859 SET_PERSONALITY(loc->elf_ex);
41860 +
41861 +#ifdef CONFIG_PAX_ASLR
41862 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
41863 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
41864 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
41865 + }
41866 +#endif
41867 +
41868 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41869 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41870 + executable_stack = EXSTACK_DISABLE_X;
41871 + current->personality &= ~READ_IMPLIES_EXEC;
41872 + } else
41873 +#endif
41874 +
41875 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
41876 current->personality |= READ_IMPLIES_EXEC;
41877
41878 @@ -808,6 +1252,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41879 #else
41880 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
41881 #endif
41882 +
41883 +#ifdef CONFIG_PAX_RANDMMAP
41884 + /* PaX: randomize base address at the default exe base if requested */
41885 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
41886 +#ifdef CONFIG_SPARC64
41887 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
41888 +#else
41889 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
41890 +#endif
41891 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
41892 + elf_flags |= MAP_FIXED;
41893 + }
41894 +#endif
41895 +
41896 }
41897
41898 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
41899 @@ -840,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41900 * allowed task size. Note that p_filesz must always be
41901 * <= p_memsz so it is only necessary to check p_memsz.
41902 */
41903 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
41904 - elf_ppnt->p_memsz > TASK_SIZE ||
41905 - TASK_SIZE - elf_ppnt->p_memsz < k) {
41906 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
41907 + elf_ppnt->p_memsz > pax_task_size ||
41908 + pax_task_size - elf_ppnt->p_memsz < k) {
41909 /* set_brk can never work. Avoid overflows. */
41910 send_sig(SIGKILL, current, 0);
41911 retval = -EINVAL;
41912 @@ -870,6 +1328,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41913 start_data += load_bias;
41914 end_data += load_bias;
41915
41916 +#ifdef CONFIG_PAX_RANDMMAP
41917 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
41918 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
41919 +#endif
41920 +
41921 /* Calling set_brk effectively mmaps the pages that we need
41922 * for the bss and break sections. We must do this before
41923 * mapping in the interpreter, to make sure it doesn't wind
41924 @@ -881,9 +1344,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
41925 goto out_free_dentry;
41926 }
41927 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
41928 - send_sig(SIGSEGV, current, 0);
41929 - retval = -EFAULT; /* Nobody gets to see this, but.. */
41930 - goto out_free_dentry;
41931 + /*
41932 + * This bss-zeroing can fail if the ELF
41933 + * file specifies odd protections. So
41934 + * we don't check the return value
41935 + */
41936 }
41937
41938 if (elf_interpreter) {
41939 @@ -1098,7 +1563,7 @@ out:
41940 * Decide what to dump of a segment, part, all or none.
41941 */
41942 static unsigned long vma_dump_size(struct vm_area_struct *vma,
41943 - unsigned long mm_flags)
41944 + unsigned long mm_flags, long signr)
41945 {
41946 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
41947
41948 @@ -1132,7 +1597,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
41949 if (vma->vm_file == NULL)
41950 return 0;
41951
41952 - if (FILTER(MAPPED_PRIVATE))
41953 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
41954 goto whole;
41955
41956 /*
41957 @@ -1354,9 +1819,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
41958 {
41959 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
41960 int i = 0;
41961 - do
41962 + do {
41963 i += 2;
41964 - while (auxv[i - 2] != AT_NULL);
41965 + } while (auxv[i - 2] != AT_NULL);
41966 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
41967 }
41968
41969 @@ -1862,14 +2327,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
41970 }
41971
41972 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
41973 - unsigned long mm_flags)
41974 + struct coredump_params *cprm)
41975 {
41976 struct vm_area_struct *vma;
41977 size_t size = 0;
41978
41979 for (vma = first_vma(current, gate_vma); vma != NULL;
41980 vma = next_vma(vma, gate_vma))
41981 - size += vma_dump_size(vma, mm_flags);
41982 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
41983 return size;
41984 }
41985
41986 @@ -1963,7 +2428,7 @@ static int elf_core_dump(struct coredump_params *cprm)
41987
41988 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
41989
41990 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
41991 + offset += elf_core_vma_data_size(gate_vma, cprm);
41992 offset += elf_core_extra_data_size();
41993 e_shoff = offset;
41994
41995 @@ -1977,10 +2442,12 @@ static int elf_core_dump(struct coredump_params *cprm)
41996 offset = dataoff;
41997
41998 size += sizeof(*elf);
41999 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42000 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
42001 goto end_coredump;
42002
42003 size += sizeof(*phdr4note);
42004 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42005 if (size > cprm->limit
42006 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
42007 goto end_coredump;
42008 @@ -1994,7 +2461,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42009 phdr.p_offset = offset;
42010 phdr.p_vaddr = vma->vm_start;
42011 phdr.p_paddr = 0;
42012 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
42013 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42014 phdr.p_memsz = vma->vm_end - vma->vm_start;
42015 offset += phdr.p_filesz;
42016 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
42017 @@ -2005,6 +2472,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42018 phdr.p_align = ELF_EXEC_PAGESIZE;
42019
42020 size += sizeof(phdr);
42021 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42022 if (size > cprm->limit
42023 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
42024 goto end_coredump;
42025 @@ -2029,7 +2497,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42026 unsigned long addr;
42027 unsigned long end;
42028
42029 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
42030 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42031
42032 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
42033 struct page *page;
42034 @@ -2038,6 +2506,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42035 page = get_dump_page(addr);
42036 if (page) {
42037 void *kaddr = kmap(page);
42038 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
42039 stop = ((size += PAGE_SIZE) > cprm->limit) ||
42040 !dump_write(cprm->file, kaddr,
42041 PAGE_SIZE);
42042 @@ -2055,6 +2524,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42043
42044 if (e_phnum == PN_XNUM) {
42045 size += sizeof(*shdr4extnum);
42046 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42047 if (size > cprm->limit
42048 || !dump_write(cprm->file, shdr4extnum,
42049 sizeof(*shdr4extnum)))
42050 @@ -2075,6 +2545,97 @@ out:
42051
42052 #endif /* CONFIG_ELF_CORE */
42053
42054 +#ifdef CONFIG_PAX_MPROTECT
42055 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
42056 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
42057 + * we'll remove VM_MAYWRITE for good on RELRO segments.
42058 + *
42059 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
42060 + * basis because we want to allow the common case and not the special ones.
42061 + */
42062 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
42063 +{
42064 + struct elfhdr elf_h;
42065 + struct elf_phdr elf_p;
42066 + unsigned long i;
42067 + unsigned long oldflags;
42068 + bool is_textrel_rw, is_textrel_rx, is_relro;
42069 +
42070 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
42071 + return;
42072 +
42073 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
42074 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
42075 +
42076 +#ifdef CONFIG_PAX_ELFRELOCS
42077 + /* possible TEXTREL */
42078 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
42079 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
42080 +#else
42081 + is_textrel_rw = false;
42082 + is_textrel_rx = false;
42083 +#endif
42084 +
42085 + /* possible RELRO */
42086 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
42087 +
42088 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
42089 + return;
42090 +
42091 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
42092 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
42093 +
42094 +#ifdef CONFIG_PAX_ETEXECRELOCS
42095 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42096 +#else
42097 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
42098 +#endif
42099 +
42100 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42101 + !elf_check_arch(&elf_h) ||
42102 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
42103 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
42104 + return;
42105 +
42106 + for (i = 0UL; i < elf_h.e_phnum; i++) {
42107 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
42108 + return;
42109 + switch (elf_p.p_type) {
42110 + case PT_DYNAMIC:
42111 + if (!is_textrel_rw && !is_textrel_rx)
42112 + continue;
42113 + i = 0UL;
42114 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
42115 + elf_dyn dyn;
42116 +
42117 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
42118 + return;
42119 + if (dyn.d_tag == DT_NULL)
42120 + return;
42121 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
42122 + gr_log_textrel(vma);
42123 + if (is_textrel_rw)
42124 + vma->vm_flags |= VM_MAYWRITE;
42125 + else
42126 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
42127 + vma->vm_flags &= ~VM_MAYWRITE;
42128 + return;
42129 + }
42130 + i++;
42131 + }
42132 + return;
42133 +
42134 + case PT_GNU_RELRO:
42135 + if (!is_relro)
42136 + continue;
42137 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
42138 + vma->vm_flags &= ~VM_MAYWRITE;
42139 + return;
42140 + }
42141 + }
42142 +}
42143 +#endif
42144 +
42145 static int __init init_elf_binfmt(void)
42146 {
42147 return register_binfmt(&elf_format);
42148 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
42149 index 1bffbe0..c8c283e 100644
42150 --- a/fs/binfmt_flat.c
42151 +++ b/fs/binfmt_flat.c
42152 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
42153 realdatastart = (unsigned long) -ENOMEM;
42154 printk("Unable to allocate RAM for process data, errno %d\n",
42155 (int)-realdatastart);
42156 + down_write(&current->mm->mmap_sem);
42157 do_munmap(current->mm, textpos, text_len);
42158 + up_write(&current->mm->mmap_sem);
42159 ret = realdatastart;
42160 goto err;
42161 }
42162 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42163 }
42164 if (IS_ERR_VALUE(result)) {
42165 printk("Unable to read data+bss, errno %d\n", (int)-result);
42166 + down_write(&current->mm->mmap_sem);
42167 do_munmap(current->mm, textpos, text_len);
42168 do_munmap(current->mm, realdatastart, len);
42169 + up_write(&current->mm->mmap_sem);
42170 ret = result;
42171 goto err;
42172 }
42173 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42174 }
42175 if (IS_ERR_VALUE(result)) {
42176 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
42177 + down_write(&current->mm->mmap_sem);
42178 do_munmap(current->mm, textpos, text_len + data_len + extra +
42179 MAX_SHARED_LIBS * sizeof(unsigned long));
42180 + up_write(&current->mm->mmap_sem);
42181 ret = result;
42182 goto err;
42183 }
42184 diff --git a/fs/bio.c b/fs/bio.c
42185 index b1fe82c..84da0a9 100644
42186 --- a/fs/bio.c
42187 +++ b/fs/bio.c
42188 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
42189 const int read = bio_data_dir(bio) == READ;
42190 struct bio_map_data *bmd = bio->bi_private;
42191 int i;
42192 - char *p = bmd->sgvecs[0].iov_base;
42193 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
42194
42195 __bio_for_each_segment(bvec, bio, i, 0) {
42196 char *addr = page_address(bvec->bv_page);
42197 diff --git a/fs/block_dev.c b/fs/block_dev.c
42198 index b07f1da..9efcb92 100644
42199 --- a/fs/block_dev.c
42200 +++ b/fs/block_dev.c
42201 @@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
42202 else if (bdev->bd_contains == bdev)
42203 return true; /* is a whole device which isn't held */
42204
42205 - else if (whole->bd_holder == bd_may_claim)
42206 + else if (whole->bd_holder == (void *)bd_may_claim)
42207 return true; /* is a partition of a device that is being partitioned */
42208 else if (whole->bd_holder != NULL)
42209 return false; /* is a partition of a held device */
42210 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
42211 index dede441..f2a2507 100644
42212 --- a/fs/btrfs/ctree.c
42213 +++ b/fs/btrfs/ctree.c
42214 @@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
42215 free_extent_buffer(buf);
42216 add_root_to_dirty_list(root);
42217 } else {
42218 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
42219 - parent_start = parent->start;
42220 - else
42221 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
42222 + if (parent)
42223 + parent_start = parent->start;
42224 + else
42225 + parent_start = 0;
42226 + } else
42227 parent_start = 0;
42228
42229 WARN_ON(trans->transid != btrfs_header_generation(parent));
42230 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
42231 index fd1a06d..6e9033d 100644
42232 --- a/fs/btrfs/inode.c
42233 +++ b/fs/btrfs/inode.c
42234 @@ -6895,7 +6895,7 @@ fail:
42235 return -ENOMEM;
42236 }
42237
42238 -static int btrfs_getattr(struct vfsmount *mnt,
42239 +int btrfs_getattr(struct vfsmount *mnt,
42240 struct dentry *dentry, struct kstat *stat)
42241 {
42242 struct inode *inode = dentry->d_inode;
42243 @@ -6909,6 +6909,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
42244 return 0;
42245 }
42246
42247 +EXPORT_SYMBOL(btrfs_getattr);
42248 +
42249 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
42250 +{
42251 + return BTRFS_I(inode)->root->anon_dev;
42252 +}
42253 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
42254 +
42255 /*
42256 * If a file is moved, it will inherit the cow and compression flags of the new
42257 * directory.
42258 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
42259 index c04f02c..f5c9e2e 100644
42260 --- a/fs/btrfs/ioctl.c
42261 +++ b/fs/btrfs/ioctl.c
42262 @@ -2733,9 +2733,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42263 for (i = 0; i < num_types; i++) {
42264 struct btrfs_space_info *tmp;
42265
42266 + /* Don't copy in more than we allocated */
42267 if (!slot_count)
42268 break;
42269
42270 + slot_count--;
42271 +
42272 info = NULL;
42273 rcu_read_lock();
42274 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
42275 @@ -2757,15 +2760,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42276 memcpy(dest, &space, sizeof(space));
42277 dest++;
42278 space_args.total_spaces++;
42279 - slot_count--;
42280 }
42281 - if (!slot_count)
42282 - break;
42283 }
42284 up_read(&info->groups_sem);
42285 }
42286
42287 - user_dest = (struct btrfs_ioctl_space_info *)
42288 + user_dest = (struct btrfs_ioctl_space_info __user *)
42289 (arg + sizeof(struct btrfs_ioctl_space_args));
42290
42291 if (copy_to_user(user_dest, dest_orig, alloc_size))
42292 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
42293 index cfb5543..1ae7347 100644
42294 --- a/fs/btrfs/relocation.c
42295 +++ b/fs/btrfs/relocation.c
42296 @@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
42297 }
42298 spin_unlock(&rc->reloc_root_tree.lock);
42299
42300 - BUG_ON((struct btrfs_root *)node->data != root);
42301 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
42302
42303 if (!del) {
42304 spin_lock(&rc->reloc_root_tree.lock);
42305 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
42306 index 622f469..e8d2d55 100644
42307 --- a/fs/cachefiles/bind.c
42308 +++ b/fs/cachefiles/bind.c
42309 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
42310 args);
42311
42312 /* start by checking things over */
42313 - ASSERT(cache->fstop_percent >= 0 &&
42314 - cache->fstop_percent < cache->fcull_percent &&
42315 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
42316 cache->fcull_percent < cache->frun_percent &&
42317 cache->frun_percent < 100);
42318
42319 - ASSERT(cache->bstop_percent >= 0 &&
42320 - cache->bstop_percent < cache->bcull_percent &&
42321 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
42322 cache->bcull_percent < cache->brun_percent &&
42323 cache->brun_percent < 100);
42324
42325 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
42326 index 0a1467b..6a53245 100644
42327 --- a/fs/cachefiles/daemon.c
42328 +++ b/fs/cachefiles/daemon.c
42329 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
42330 if (n > buflen)
42331 return -EMSGSIZE;
42332
42333 - if (copy_to_user(_buffer, buffer, n) != 0)
42334 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
42335 return -EFAULT;
42336
42337 return n;
42338 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
42339 if (test_bit(CACHEFILES_DEAD, &cache->flags))
42340 return -EIO;
42341
42342 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
42343 + if (datalen > PAGE_SIZE - 1)
42344 return -EOPNOTSUPP;
42345
42346 /* drag the command string into the kernel so we can parse it */
42347 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
42348 if (args[0] != '%' || args[1] != '\0')
42349 return -EINVAL;
42350
42351 - if (fstop < 0 || fstop >= cache->fcull_percent)
42352 + if (fstop >= cache->fcull_percent)
42353 return cachefiles_daemon_range_error(cache, args);
42354
42355 cache->fstop_percent = fstop;
42356 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
42357 if (args[0] != '%' || args[1] != '\0')
42358 return -EINVAL;
42359
42360 - if (bstop < 0 || bstop >= cache->bcull_percent)
42361 + if (bstop >= cache->bcull_percent)
42362 return cachefiles_daemon_range_error(cache, args);
42363
42364 cache->bstop_percent = bstop;
42365 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
42366 index bd6bc1b..b627b53 100644
42367 --- a/fs/cachefiles/internal.h
42368 +++ b/fs/cachefiles/internal.h
42369 @@ -57,7 +57,7 @@ struct cachefiles_cache {
42370 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
42371 struct rb_root active_nodes; /* active nodes (can't be culled) */
42372 rwlock_t active_lock; /* lock for active_nodes */
42373 - atomic_t gravecounter; /* graveyard uniquifier */
42374 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
42375 unsigned frun_percent; /* when to stop culling (% files) */
42376 unsigned fcull_percent; /* when to start culling (% files) */
42377 unsigned fstop_percent; /* when to stop allocating (% files) */
42378 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
42379 * proc.c
42380 */
42381 #ifdef CONFIG_CACHEFILES_HISTOGRAM
42382 -extern atomic_t cachefiles_lookup_histogram[HZ];
42383 -extern atomic_t cachefiles_mkdir_histogram[HZ];
42384 -extern atomic_t cachefiles_create_histogram[HZ];
42385 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42386 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42387 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
42388
42389 extern int __init cachefiles_proc_init(void);
42390 extern void cachefiles_proc_cleanup(void);
42391 static inline
42392 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
42393 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
42394 {
42395 unsigned long jif = jiffies - start_jif;
42396 if (jif >= HZ)
42397 jif = HZ - 1;
42398 - atomic_inc(&histogram[jif]);
42399 + atomic_inc_unchecked(&histogram[jif]);
42400 }
42401
42402 #else
42403 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
42404 index a0358c2..d6137f2 100644
42405 --- a/fs/cachefiles/namei.c
42406 +++ b/fs/cachefiles/namei.c
42407 @@ -318,7 +318,7 @@ try_again:
42408 /* first step is to make up a grave dentry in the graveyard */
42409 sprintf(nbuffer, "%08x%08x",
42410 (uint32_t) get_seconds(),
42411 - (uint32_t) atomic_inc_return(&cache->gravecounter));
42412 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
42413
42414 /* do the multiway lock magic */
42415 trap = lock_rename(cache->graveyard, dir);
42416 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
42417 index eccd339..4c1d995 100644
42418 --- a/fs/cachefiles/proc.c
42419 +++ b/fs/cachefiles/proc.c
42420 @@ -14,9 +14,9 @@
42421 #include <linux/seq_file.h>
42422 #include "internal.h"
42423
42424 -atomic_t cachefiles_lookup_histogram[HZ];
42425 -atomic_t cachefiles_mkdir_histogram[HZ];
42426 -atomic_t cachefiles_create_histogram[HZ];
42427 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42428 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42429 +atomic_unchecked_t cachefiles_create_histogram[HZ];
42430
42431 /*
42432 * display the latency histogram
42433 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
42434 return 0;
42435 default:
42436 index = (unsigned long) v - 3;
42437 - x = atomic_read(&cachefiles_lookup_histogram[index]);
42438 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
42439 - z = atomic_read(&cachefiles_create_histogram[index]);
42440 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
42441 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
42442 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
42443 if (x == 0 && y == 0 && z == 0)
42444 return 0;
42445
42446 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
42447 index 0e3c092..818480e 100644
42448 --- a/fs/cachefiles/rdwr.c
42449 +++ b/fs/cachefiles/rdwr.c
42450 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
42451 old_fs = get_fs();
42452 set_fs(KERNEL_DS);
42453 ret = file->f_op->write(
42454 - file, (const void __user *) data, len, &pos);
42455 + file, (const void __force_user *) data, len, &pos);
42456 set_fs(old_fs);
42457 kunmap(page);
42458 if (ret != len)
42459 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
42460 index 9895400..fa40a7d 100644
42461 --- a/fs/ceph/dir.c
42462 +++ b/fs/ceph/dir.c
42463 @@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
42464 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
42465 struct ceph_mds_client *mdsc = fsc->mdsc;
42466 unsigned frag = fpos_frag(filp->f_pos);
42467 - int off = fpos_off(filp->f_pos);
42468 + unsigned int off = fpos_off(filp->f_pos);
42469 int err;
42470 u32 ftype;
42471 struct ceph_mds_reply_info_parsed *rinfo;
42472 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
42473 index 84e8c07..6170d31 100644
42474 --- a/fs/cifs/cifs_debug.c
42475 +++ b/fs/cifs/cifs_debug.c
42476 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
42477
42478 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
42479 #ifdef CONFIG_CIFS_STATS2
42480 - atomic_set(&totBufAllocCount, 0);
42481 - atomic_set(&totSmBufAllocCount, 0);
42482 + atomic_set_unchecked(&totBufAllocCount, 0);
42483 + atomic_set_unchecked(&totSmBufAllocCount, 0);
42484 #endif /* CONFIG_CIFS_STATS2 */
42485 spin_lock(&cifs_tcp_ses_lock);
42486 list_for_each(tmp1, &cifs_tcp_ses_list) {
42487 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
42488 tcon = list_entry(tmp3,
42489 struct cifs_tcon,
42490 tcon_list);
42491 - atomic_set(&tcon->num_smbs_sent, 0);
42492 - atomic_set(&tcon->num_writes, 0);
42493 - atomic_set(&tcon->num_reads, 0);
42494 - atomic_set(&tcon->num_oplock_brks, 0);
42495 - atomic_set(&tcon->num_opens, 0);
42496 - atomic_set(&tcon->num_posixopens, 0);
42497 - atomic_set(&tcon->num_posixmkdirs, 0);
42498 - atomic_set(&tcon->num_closes, 0);
42499 - atomic_set(&tcon->num_deletes, 0);
42500 - atomic_set(&tcon->num_mkdirs, 0);
42501 - atomic_set(&tcon->num_rmdirs, 0);
42502 - atomic_set(&tcon->num_renames, 0);
42503 - atomic_set(&tcon->num_t2renames, 0);
42504 - atomic_set(&tcon->num_ffirst, 0);
42505 - atomic_set(&tcon->num_fnext, 0);
42506 - atomic_set(&tcon->num_fclose, 0);
42507 - atomic_set(&tcon->num_hardlinks, 0);
42508 - atomic_set(&tcon->num_symlinks, 0);
42509 - atomic_set(&tcon->num_locks, 0);
42510 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
42511 + atomic_set_unchecked(&tcon->num_writes, 0);
42512 + atomic_set_unchecked(&tcon->num_reads, 0);
42513 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
42514 + atomic_set_unchecked(&tcon->num_opens, 0);
42515 + atomic_set_unchecked(&tcon->num_posixopens, 0);
42516 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
42517 + atomic_set_unchecked(&tcon->num_closes, 0);
42518 + atomic_set_unchecked(&tcon->num_deletes, 0);
42519 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
42520 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
42521 + atomic_set_unchecked(&tcon->num_renames, 0);
42522 + atomic_set_unchecked(&tcon->num_t2renames, 0);
42523 + atomic_set_unchecked(&tcon->num_ffirst, 0);
42524 + atomic_set_unchecked(&tcon->num_fnext, 0);
42525 + atomic_set_unchecked(&tcon->num_fclose, 0);
42526 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
42527 + atomic_set_unchecked(&tcon->num_symlinks, 0);
42528 + atomic_set_unchecked(&tcon->num_locks, 0);
42529 }
42530 }
42531 }
42532 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
42533 smBufAllocCount.counter, cifs_min_small);
42534 #ifdef CONFIG_CIFS_STATS2
42535 seq_printf(m, "Total Large %d Small %d Allocations\n",
42536 - atomic_read(&totBufAllocCount),
42537 - atomic_read(&totSmBufAllocCount));
42538 + atomic_read_unchecked(&totBufAllocCount),
42539 + atomic_read_unchecked(&totSmBufAllocCount));
42540 #endif /* CONFIG_CIFS_STATS2 */
42541
42542 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
42543 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
42544 if (tcon->need_reconnect)
42545 seq_puts(m, "\tDISCONNECTED ");
42546 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
42547 - atomic_read(&tcon->num_smbs_sent),
42548 - atomic_read(&tcon->num_oplock_brks));
42549 + atomic_read_unchecked(&tcon->num_smbs_sent),
42550 + atomic_read_unchecked(&tcon->num_oplock_brks));
42551 seq_printf(m, "\nReads: %d Bytes: %lld",
42552 - atomic_read(&tcon->num_reads),
42553 + atomic_read_unchecked(&tcon->num_reads),
42554 (long long)(tcon->bytes_read));
42555 seq_printf(m, "\nWrites: %d Bytes: %lld",
42556 - atomic_read(&tcon->num_writes),
42557 + atomic_read_unchecked(&tcon->num_writes),
42558 (long long)(tcon->bytes_written));
42559 seq_printf(m, "\nFlushes: %d",
42560 - atomic_read(&tcon->num_flushes));
42561 + atomic_read_unchecked(&tcon->num_flushes));
42562 seq_printf(m, "\nLocks: %d HardLinks: %d "
42563 "Symlinks: %d",
42564 - atomic_read(&tcon->num_locks),
42565 - atomic_read(&tcon->num_hardlinks),
42566 - atomic_read(&tcon->num_symlinks));
42567 + atomic_read_unchecked(&tcon->num_locks),
42568 + atomic_read_unchecked(&tcon->num_hardlinks),
42569 + atomic_read_unchecked(&tcon->num_symlinks));
42570 seq_printf(m, "\nOpens: %d Closes: %d "
42571 "Deletes: %d",
42572 - atomic_read(&tcon->num_opens),
42573 - atomic_read(&tcon->num_closes),
42574 - atomic_read(&tcon->num_deletes));
42575 + atomic_read_unchecked(&tcon->num_opens),
42576 + atomic_read_unchecked(&tcon->num_closes),
42577 + atomic_read_unchecked(&tcon->num_deletes));
42578 seq_printf(m, "\nPosix Opens: %d "
42579 "Posix Mkdirs: %d",
42580 - atomic_read(&tcon->num_posixopens),
42581 - atomic_read(&tcon->num_posixmkdirs));
42582 + atomic_read_unchecked(&tcon->num_posixopens),
42583 + atomic_read_unchecked(&tcon->num_posixmkdirs));
42584 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
42585 - atomic_read(&tcon->num_mkdirs),
42586 - atomic_read(&tcon->num_rmdirs));
42587 + atomic_read_unchecked(&tcon->num_mkdirs),
42588 + atomic_read_unchecked(&tcon->num_rmdirs));
42589 seq_printf(m, "\nRenames: %d T2 Renames %d",
42590 - atomic_read(&tcon->num_renames),
42591 - atomic_read(&tcon->num_t2renames));
42592 + atomic_read_unchecked(&tcon->num_renames),
42593 + atomic_read_unchecked(&tcon->num_t2renames));
42594 seq_printf(m, "\nFindFirst: %d FNext %d "
42595 "FClose %d",
42596 - atomic_read(&tcon->num_ffirst),
42597 - atomic_read(&tcon->num_fnext),
42598 - atomic_read(&tcon->num_fclose));
42599 + atomic_read_unchecked(&tcon->num_ffirst),
42600 + atomic_read_unchecked(&tcon->num_fnext),
42601 + atomic_read_unchecked(&tcon->num_fclose));
42602 }
42603 }
42604 }
42605 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
42606 index 8f1fe32..38f9e27 100644
42607 --- a/fs/cifs/cifsfs.c
42608 +++ b/fs/cifs/cifsfs.c
42609 @@ -989,7 +989,7 @@ cifs_init_request_bufs(void)
42610 cifs_req_cachep = kmem_cache_create("cifs_request",
42611 CIFSMaxBufSize +
42612 MAX_CIFS_HDR_SIZE, 0,
42613 - SLAB_HWCACHE_ALIGN, NULL);
42614 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
42615 if (cifs_req_cachep == NULL)
42616 return -ENOMEM;
42617
42618 @@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void)
42619 efficient to alloc 1 per page off the slab compared to 17K (5page)
42620 alloc of large cifs buffers even when page debugging is on */
42621 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
42622 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
42623 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
42624 NULL);
42625 if (cifs_sm_req_cachep == NULL) {
42626 mempool_destroy(cifs_req_poolp);
42627 @@ -1101,8 +1101,8 @@ init_cifs(void)
42628 atomic_set(&bufAllocCount, 0);
42629 atomic_set(&smBufAllocCount, 0);
42630 #ifdef CONFIG_CIFS_STATS2
42631 - atomic_set(&totBufAllocCount, 0);
42632 - atomic_set(&totSmBufAllocCount, 0);
42633 + atomic_set_unchecked(&totBufAllocCount, 0);
42634 + atomic_set_unchecked(&totSmBufAllocCount, 0);
42635 #endif /* CONFIG_CIFS_STATS2 */
42636
42637 atomic_set(&midCount, 0);
42638 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
42639 index 8238aa1..0347196 100644
42640 --- a/fs/cifs/cifsglob.h
42641 +++ b/fs/cifs/cifsglob.h
42642 @@ -392,28 +392,28 @@ struct cifs_tcon {
42643 __u16 Flags; /* optional support bits */
42644 enum statusEnum tidStatus;
42645 #ifdef CONFIG_CIFS_STATS
42646 - atomic_t num_smbs_sent;
42647 - atomic_t num_writes;
42648 - atomic_t num_reads;
42649 - atomic_t num_flushes;
42650 - atomic_t num_oplock_brks;
42651 - atomic_t num_opens;
42652 - atomic_t num_closes;
42653 - atomic_t num_deletes;
42654 - atomic_t num_mkdirs;
42655 - atomic_t num_posixopens;
42656 - atomic_t num_posixmkdirs;
42657 - atomic_t num_rmdirs;
42658 - atomic_t num_renames;
42659 - atomic_t num_t2renames;
42660 - atomic_t num_ffirst;
42661 - atomic_t num_fnext;
42662 - atomic_t num_fclose;
42663 - atomic_t num_hardlinks;
42664 - atomic_t num_symlinks;
42665 - atomic_t num_locks;
42666 - atomic_t num_acl_get;
42667 - atomic_t num_acl_set;
42668 + atomic_unchecked_t num_smbs_sent;
42669 + atomic_unchecked_t num_writes;
42670 + atomic_unchecked_t num_reads;
42671 + atomic_unchecked_t num_flushes;
42672 + atomic_unchecked_t num_oplock_brks;
42673 + atomic_unchecked_t num_opens;
42674 + atomic_unchecked_t num_closes;
42675 + atomic_unchecked_t num_deletes;
42676 + atomic_unchecked_t num_mkdirs;
42677 + atomic_unchecked_t num_posixopens;
42678 + atomic_unchecked_t num_posixmkdirs;
42679 + atomic_unchecked_t num_rmdirs;
42680 + atomic_unchecked_t num_renames;
42681 + atomic_unchecked_t num_t2renames;
42682 + atomic_unchecked_t num_ffirst;
42683 + atomic_unchecked_t num_fnext;
42684 + atomic_unchecked_t num_fclose;
42685 + atomic_unchecked_t num_hardlinks;
42686 + atomic_unchecked_t num_symlinks;
42687 + atomic_unchecked_t num_locks;
42688 + atomic_unchecked_t num_acl_get;
42689 + atomic_unchecked_t num_acl_set;
42690 #ifdef CONFIG_CIFS_STATS2
42691 unsigned long long time_writes;
42692 unsigned long long time_reads;
42693 @@ -628,7 +628,7 @@ convert_delimiter(char *path, char delim)
42694 }
42695
42696 #ifdef CONFIG_CIFS_STATS
42697 -#define cifs_stats_inc atomic_inc
42698 +#define cifs_stats_inc atomic_inc_unchecked
42699
42700 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
42701 unsigned int bytes)
42702 @@ -985,8 +985,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
42703 /* Various Debug counters */
42704 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
42705 #ifdef CONFIG_CIFS_STATS2
42706 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
42707 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
42708 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
42709 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
42710 #endif
42711 GLOBAL_EXTERN atomic_t smBufAllocCount;
42712 GLOBAL_EXTERN atomic_t midCount;
42713 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
42714 index 6b0e064..94e6c3c 100644
42715 --- a/fs/cifs/link.c
42716 +++ b/fs/cifs/link.c
42717 @@ -600,7 +600,7 @@ symlink_exit:
42718
42719 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
42720 {
42721 - char *p = nd_get_link(nd);
42722 + const char *p = nd_get_link(nd);
42723 if (!IS_ERR(p))
42724 kfree(p);
42725 }
42726 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
42727 index 703ef5c..2a44ed5 100644
42728 --- a/fs/cifs/misc.c
42729 +++ b/fs/cifs/misc.c
42730 @@ -156,7 +156,7 @@ cifs_buf_get(void)
42731 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
42732 atomic_inc(&bufAllocCount);
42733 #ifdef CONFIG_CIFS_STATS2
42734 - atomic_inc(&totBufAllocCount);
42735 + atomic_inc_unchecked(&totBufAllocCount);
42736 #endif /* CONFIG_CIFS_STATS2 */
42737 }
42738
42739 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
42740 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
42741 atomic_inc(&smBufAllocCount);
42742 #ifdef CONFIG_CIFS_STATS2
42743 - atomic_inc(&totSmBufAllocCount);
42744 + atomic_inc_unchecked(&totSmBufAllocCount);
42745 #endif /* CONFIG_CIFS_STATS2 */
42746
42747 }
42748 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
42749 index 6901578..d402eb5 100644
42750 --- a/fs/coda/cache.c
42751 +++ b/fs/coda/cache.c
42752 @@ -24,7 +24,7 @@
42753 #include "coda_linux.h"
42754 #include "coda_cache.h"
42755
42756 -static atomic_t permission_epoch = ATOMIC_INIT(0);
42757 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
42758
42759 /* replace or extend an acl cache hit */
42760 void coda_cache_enter(struct inode *inode, int mask)
42761 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
42762 struct coda_inode_info *cii = ITOC(inode);
42763
42764 spin_lock(&cii->c_lock);
42765 - cii->c_cached_epoch = atomic_read(&permission_epoch);
42766 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
42767 if (cii->c_uid != current_fsuid()) {
42768 cii->c_uid = current_fsuid();
42769 cii->c_cached_perm = mask;
42770 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
42771 {
42772 struct coda_inode_info *cii = ITOC(inode);
42773 spin_lock(&cii->c_lock);
42774 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
42775 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
42776 spin_unlock(&cii->c_lock);
42777 }
42778
42779 /* remove all acl caches */
42780 void coda_cache_clear_all(struct super_block *sb)
42781 {
42782 - atomic_inc(&permission_epoch);
42783 + atomic_inc_unchecked(&permission_epoch);
42784 }
42785
42786
42787 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
42788 spin_lock(&cii->c_lock);
42789 hit = (mask & cii->c_cached_perm) == mask &&
42790 cii->c_uid == current_fsuid() &&
42791 - cii->c_cached_epoch == atomic_read(&permission_epoch);
42792 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
42793 spin_unlock(&cii->c_lock);
42794
42795 return hit;
42796 diff --git a/fs/compat.c b/fs/compat.c
42797 index c987875..08771ca 100644
42798 --- a/fs/compat.c
42799 +++ b/fs/compat.c
42800 @@ -132,8 +132,8 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim
42801 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
42802 {
42803 compat_ino_t ino = stat->ino;
42804 - typeof(ubuf->st_uid) uid = 0;
42805 - typeof(ubuf->st_gid) gid = 0;
42806 + typeof(((struct compat_stat *)0)->st_uid) uid = 0;
42807 + typeof(((struct compat_stat *)0)->st_gid) gid = 0;
42808 int err;
42809
42810 SET_UID(uid, stat->uid);
42811 @@ -504,7 +504,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
42812
42813 set_fs(KERNEL_DS);
42814 /* The __user pointer cast is valid because of the set_fs() */
42815 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
42816 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
42817 set_fs(oldfs);
42818 /* truncating is ok because it's a user address */
42819 if (!ret)
42820 @@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
42821 goto out;
42822
42823 ret = -EINVAL;
42824 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
42825 + if (nr_segs > UIO_MAXIOV)
42826 goto out;
42827 if (nr_segs > fast_segs) {
42828 ret = -ENOMEM;
42829 @@ -845,6 +845,7 @@ struct compat_old_linux_dirent {
42830
42831 struct compat_readdir_callback {
42832 struct compat_old_linux_dirent __user *dirent;
42833 + struct file * file;
42834 int result;
42835 };
42836
42837 @@ -862,6 +863,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
42838 buf->result = -EOVERFLOW;
42839 return -EOVERFLOW;
42840 }
42841 +
42842 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42843 + return 0;
42844 +
42845 buf->result++;
42846 dirent = buf->dirent;
42847 if (!access_ok(VERIFY_WRITE, dirent,
42848 @@ -894,6 +899,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
42849
42850 buf.result = 0;
42851 buf.dirent = dirent;
42852 + buf.file = file;
42853
42854 error = vfs_readdir(file, compat_fillonedir, &buf);
42855 if (buf.result)
42856 @@ -914,6 +920,7 @@ struct compat_linux_dirent {
42857 struct compat_getdents_callback {
42858 struct compat_linux_dirent __user *current_dir;
42859 struct compat_linux_dirent __user *previous;
42860 + struct file * file;
42861 int count;
42862 int error;
42863 };
42864 @@ -935,6 +942,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
42865 buf->error = -EOVERFLOW;
42866 return -EOVERFLOW;
42867 }
42868 +
42869 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42870 + return 0;
42871 +
42872 dirent = buf->previous;
42873 if (dirent) {
42874 if (__put_user(offset, &dirent->d_off))
42875 @@ -982,6 +993,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
42876 buf.previous = NULL;
42877 buf.count = count;
42878 buf.error = 0;
42879 + buf.file = file;
42880
42881 error = vfs_readdir(file, compat_filldir, &buf);
42882 if (error >= 0)
42883 @@ -1003,6 +1015,7 @@ out:
42884 struct compat_getdents_callback64 {
42885 struct linux_dirent64 __user *current_dir;
42886 struct linux_dirent64 __user *previous;
42887 + struct file * file;
42888 int count;
42889 int error;
42890 };
42891 @@ -1019,6 +1032,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
42892 buf->error = -EINVAL; /* only used if we fail.. */
42893 if (reclen > buf->count)
42894 return -EINVAL;
42895 +
42896 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42897 + return 0;
42898 +
42899 dirent = buf->previous;
42900
42901 if (dirent) {
42902 @@ -1070,13 +1087,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
42903 buf.previous = NULL;
42904 buf.count = count;
42905 buf.error = 0;
42906 + buf.file = file;
42907
42908 error = vfs_readdir(file, compat_filldir64, &buf);
42909 if (error >= 0)
42910 error = buf.error;
42911 lastdirent = buf.previous;
42912 if (lastdirent) {
42913 - typeof(lastdirent->d_off) d_off = file->f_pos;
42914 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
42915 if (__put_user_unaligned(d_off, &lastdirent->d_off))
42916 error = -EFAULT;
42917 else
42918 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
42919 index 112e45a..b59845b 100644
42920 --- a/fs/compat_binfmt_elf.c
42921 +++ b/fs/compat_binfmt_elf.c
42922 @@ -30,11 +30,13 @@
42923 #undef elf_phdr
42924 #undef elf_shdr
42925 #undef elf_note
42926 +#undef elf_dyn
42927 #undef elf_addr_t
42928 #define elfhdr elf32_hdr
42929 #define elf_phdr elf32_phdr
42930 #define elf_shdr elf32_shdr
42931 #define elf_note elf32_note
42932 +#define elf_dyn Elf32_Dyn
42933 #define elf_addr_t Elf32_Addr
42934
42935 /*
42936 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
42937 index 51352de..93292ff 100644
42938 --- a/fs/compat_ioctl.c
42939 +++ b/fs/compat_ioctl.c
42940 @@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
42941
42942 err = get_user(palp, &up->palette);
42943 err |= get_user(length, &up->length);
42944 + if (err)
42945 + return -EFAULT;
42946
42947 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
42948 err = put_user(compat_ptr(palp), &up_native->palette);
42949 @@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
42950 return -EFAULT;
42951 if (__get_user(udata, &ss32->iomem_base))
42952 return -EFAULT;
42953 - ss.iomem_base = compat_ptr(udata);
42954 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
42955 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
42956 __get_user(ss.port_high, &ss32->port_high))
42957 return -EFAULT;
42958 @@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
42959 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
42960 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
42961 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
42962 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
42963 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
42964 return -EFAULT;
42965
42966 return ioctl_preallocate(file, p);
42967 @@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
42968 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
42969 {
42970 unsigned int a, b;
42971 - a = *(unsigned int *)p;
42972 - b = *(unsigned int *)q;
42973 + a = *(const unsigned int *)p;
42974 + b = *(const unsigned int *)q;
42975 if (a > b)
42976 return 1;
42977 if (a < b)
42978 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
42979 index 9a37a9b..35792b6 100644
42980 --- a/fs/configfs/dir.c
42981 +++ b/fs/configfs/dir.c
42982 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
42983 }
42984 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
42985 struct configfs_dirent *next;
42986 - const char * name;
42987 + const unsigned char * name;
42988 + char d_name[sizeof(next->s_dentry->d_iname)];
42989 int len;
42990 struct inode *inode = NULL;
42991
42992 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
42993 continue;
42994
42995 name = configfs_get_name(next);
42996 - len = strlen(name);
42997 + if (next->s_dentry && name == next->s_dentry->d_iname) {
42998 + len = next->s_dentry->d_name.len;
42999 + memcpy(d_name, name, len);
43000 + name = d_name;
43001 + } else
43002 + len = strlen(name);
43003
43004 /*
43005 * We'll have a dentry and an inode for
43006 diff --git a/fs/dcache.c b/fs/dcache.c
43007 index f7908ae..920a680 100644
43008 --- a/fs/dcache.c
43009 +++ b/fs/dcache.c
43010 @@ -3042,7 +3042,7 @@ void __init vfs_caches_init(unsigned long mempages)
43011 mempages -= reserve;
43012
43013 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
43014 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
43015 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
43016
43017 dcache_init();
43018 inode_init();
43019 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
43020 index f3a257d..715ac0f 100644
43021 --- a/fs/debugfs/inode.c
43022 +++ b/fs/debugfs/inode.c
43023 @@ -261,7 +261,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
43024 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
43025 {
43026 return debugfs_create_file(name,
43027 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43028 + S_IFDIR | S_IRWXU,
43029 +#else
43030 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43031 +#endif
43032 parent, NULL, NULL);
43033 }
43034 EXPORT_SYMBOL_GPL(debugfs_create_dir);
43035 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
43036 index af11098..81e3bbe 100644
43037 --- a/fs/ecryptfs/inode.c
43038 +++ b/fs/ecryptfs/inode.c
43039 @@ -691,7 +691,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
43040 old_fs = get_fs();
43041 set_fs(get_ds());
43042 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
43043 - (char __user *)lower_buf,
43044 + (char __force_user *)lower_buf,
43045 lower_bufsiz);
43046 set_fs(old_fs);
43047 if (rc < 0)
43048 @@ -737,7 +737,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
43049 }
43050 old_fs = get_fs();
43051 set_fs(get_ds());
43052 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
43053 + rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
43054 set_fs(old_fs);
43055 if (rc < 0) {
43056 kfree(buf);
43057 @@ -752,7 +752,7 @@ out:
43058 static void
43059 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
43060 {
43061 - char *buf = nd_get_link(nd);
43062 + const char *buf = nd_get_link(nd);
43063 if (!IS_ERR(buf)) {
43064 /* Free the char* */
43065 kfree(buf);
43066 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
43067 index 0dc5a3d..d3cdeea 100644
43068 --- a/fs/ecryptfs/miscdev.c
43069 +++ b/fs/ecryptfs/miscdev.c
43070 @@ -328,7 +328,7 @@ check_list:
43071 goto out_unlock_msg_ctx;
43072 i = 5;
43073 if (msg_ctx->msg) {
43074 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
43075 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
43076 goto out_unlock_msg_ctx;
43077 i += packet_length_size;
43078 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
43079 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
43080 index 608c1c3..7d040a8 100644
43081 --- a/fs/ecryptfs/read_write.c
43082 +++ b/fs/ecryptfs/read_write.c
43083 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
43084 return -EIO;
43085 fs_save = get_fs();
43086 set_fs(get_ds());
43087 - rc = vfs_write(lower_file, data, size, &offset);
43088 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
43089 set_fs(fs_save);
43090 mark_inode_dirty_sync(ecryptfs_inode);
43091 return rc;
43092 @@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
43093 return -EIO;
43094 fs_save = get_fs();
43095 set_fs(get_ds());
43096 - rc = vfs_read(lower_file, data, size, &offset);
43097 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
43098 set_fs(fs_save);
43099 return rc;
43100 }
43101 diff --git a/fs/exec.c b/fs/exec.c
43102 index 3625464..cdeecdb 100644
43103 --- a/fs/exec.c
43104 +++ b/fs/exec.c
43105 @@ -55,12 +55,28 @@
43106 #include <linux/pipe_fs_i.h>
43107 #include <linux/oom.h>
43108 #include <linux/compat.h>
43109 +#include <linux/random.h>
43110 +#include <linux/seq_file.h>
43111 +
43112 +#ifdef CONFIG_PAX_REFCOUNT
43113 +#include <linux/kallsyms.h>
43114 +#include <linux/kdebug.h>
43115 +#endif
43116
43117 #include <asm/uaccess.h>
43118 #include <asm/mmu_context.h>
43119 #include <asm/tlb.h>
43120 #include "internal.h"
43121
43122 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
43123 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
43124 +#endif
43125 +
43126 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
43127 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
43128 +EXPORT_SYMBOL(pax_set_initial_flags_func);
43129 +#endif
43130 +
43131 int core_uses_pid;
43132 char core_pattern[CORENAME_MAX_SIZE] = "core";
43133 unsigned int core_pipe_limit;
43134 @@ -70,7 +86,7 @@ struct core_name {
43135 char *corename;
43136 int used, size;
43137 };
43138 -static atomic_t call_count = ATOMIC_INIT(1);
43139 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
43140
43141 /* The maximal length of core_pattern is also specified in sysctl.c */
43142
43143 @@ -188,18 +204,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43144 int write)
43145 {
43146 struct page *page;
43147 - int ret;
43148
43149 -#ifdef CONFIG_STACK_GROWSUP
43150 - if (write) {
43151 - ret = expand_downwards(bprm->vma, pos);
43152 - if (ret < 0)
43153 - return NULL;
43154 - }
43155 -#endif
43156 - ret = get_user_pages(current, bprm->mm, pos,
43157 - 1, write, 1, &page, NULL);
43158 - if (ret <= 0)
43159 + if (0 > expand_downwards(bprm->vma, pos))
43160 + return NULL;
43161 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
43162 return NULL;
43163
43164 if (write) {
43165 @@ -215,6 +223,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43166 if (size <= ARG_MAX)
43167 return page;
43168
43169 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43170 + // only allow 512KB for argv+env on suid/sgid binaries
43171 + // to prevent easy ASLR exhaustion
43172 + if (((bprm->cred->euid != current_euid()) ||
43173 + (bprm->cred->egid != current_egid())) &&
43174 + (size > (512 * 1024))) {
43175 + put_page(page);
43176 + return NULL;
43177 + }
43178 +#endif
43179 +
43180 /*
43181 * Limit to 1/4-th the stack size for the argv+env strings.
43182 * This ensures that:
43183 @@ -274,6 +293,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43184 vma->vm_end = STACK_TOP_MAX;
43185 vma->vm_start = vma->vm_end - PAGE_SIZE;
43186 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
43187 +
43188 +#ifdef CONFIG_PAX_SEGMEXEC
43189 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
43190 +#endif
43191 +
43192 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
43193 INIT_LIST_HEAD(&vma->anon_vma_chain);
43194
43195 @@ -288,6 +312,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43196 mm->stack_vm = mm->total_vm = 1;
43197 up_write(&mm->mmap_sem);
43198 bprm->p = vma->vm_end - sizeof(void *);
43199 +
43200 +#ifdef CONFIG_PAX_RANDUSTACK
43201 + if (randomize_va_space)
43202 + bprm->p ^= random32() & ~PAGE_MASK;
43203 +#endif
43204 +
43205 return 0;
43206 err:
43207 up_write(&mm->mmap_sem);
43208 @@ -396,19 +426,7 @@ err:
43209 return err;
43210 }
43211
43212 -struct user_arg_ptr {
43213 -#ifdef CONFIG_COMPAT
43214 - bool is_compat;
43215 -#endif
43216 - union {
43217 - const char __user *const __user *native;
43218 -#ifdef CONFIG_COMPAT
43219 - compat_uptr_t __user *compat;
43220 -#endif
43221 - } ptr;
43222 -};
43223 -
43224 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43225 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43226 {
43227 const char __user *native;
43228
43229 @@ -417,14 +435,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43230 compat_uptr_t compat;
43231
43232 if (get_user(compat, argv.ptr.compat + nr))
43233 - return ERR_PTR(-EFAULT);
43234 + return (const char __force_user *)ERR_PTR(-EFAULT);
43235
43236 return compat_ptr(compat);
43237 }
43238 #endif
43239
43240 if (get_user(native, argv.ptr.native + nr))
43241 - return ERR_PTR(-EFAULT);
43242 + return (const char __force_user *)ERR_PTR(-EFAULT);
43243
43244 return native;
43245 }
43246 @@ -443,7 +461,7 @@ static int count(struct user_arg_ptr argv, int max)
43247 if (!p)
43248 break;
43249
43250 - if (IS_ERR(p))
43251 + if (IS_ERR((const char __force_kernel *)p))
43252 return -EFAULT;
43253
43254 if (i++ >= max)
43255 @@ -477,7 +495,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
43256
43257 ret = -EFAULT;
43258 str = get_user_arg_ptr(argv, argc);
43259 - if (IS_ERR(str))
43260 + if (IS_ERR((const char __force_kernel *)str))
43261 goto out;
43262
43263 len = strnlen_user(str, MAX_ARG_STRLEN);
43264 @@ -559,7 +577,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
43265 int r;
43266 mm_segment_t oldfs = get_fs();
43267 struct user_arg_ptr argv = {
43268 - .ptr.native = (const char __user *const __user *)__argv,
43269 + .ptr.native = (const char __force_user *const __force_user *)__argv,
43270 };
43271
43272 set_fs(KERNEL_DS);
43273 @@ -594,7 +612,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43274 unsigned long new_end = old_end - shift;
43275 struct mmu_gather tlb;
43276
43277 - BUG_ON(new_start > new_end);
43278 + if (new_start >= new_end || new_start < mmap_min_addr)
43279 + return -ENOMEM;
43280
43281 /*
43282 * ensure there are no vmas between where we want to go
43283 @@ -603,6 +622,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43284 if (vma != find_vma(mm, new_start))
43285 return -EFAULT;
43286
43287 +#ifdef CONFIG_PAX_SEGMEXEC
43288 + BUG_ON(pax_find_mirror_vma(vma));
43289 +#endif
43290 +
43291 /*
43292 * cover the whole range: [new_start, old_end)
43293 */
43294 @@ -683,10 +706,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43295 stack_top = arch_align_stack(stack_top);
43296 stack_top = PAGE_ALIGN(stack_top);
43297
43298 - if (unlikely(stack_top < mmap_min_addr) ||
43299 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
43300 - return -ENOMEM;
43301 -
43302 stack_shift = vma->vm_end - stack_top;
43303
43304 bprm->p -= stack_shift;
43305 @@ -698,8 +717,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
43306 bprm->exec -= stack_shift;
43307
43308 down_write(&mm->mmap_sem);
43309 +
43310 + /* Move stack pages down in memory. */
43311 + if (stack_shift) {
43312 + ret = shift_arg_pages(vma, stack_shift);
43313 + if (ret)
43314 + goto out_unlock;
43315 + }
43316 +
43317 vm_flags = VM_STACK_FLAGS;
43318
43319 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43320 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43321 + vm_flags &= ~VM_EXEC;
43322 +
43323 +#ifdef CONFIG_PAX_MPROTECT
43324 + if (mm->pax_flags & MF_PAX_MPROTECT)
43325 + vm_flags &= ~VM_MAYEXEC;
43326 +#endif
43327 +
43328 + }
43329 +#endif
43330 +
43331 /*
43332 * Adjust stack execute permissions; explicitly enable for
43333 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
43334 @@ -718,13 +757,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43335 goto out_unlock;
43336 BUG_ON(prev != vma);
43337
43338 - /* Move stack pages down in memory. */
43339 - if (stack_shift) {
43340 - ret = shift_arg_pages(vma, stack_shift);
43341 - if (ret)
43342 - goto out_unlock;
43343 - }
43344 -
43345 /* mprotect_fixup is overkill to remove the temporary stack flags */
43346 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
43347
43348 @@ -805,7 +837,7 @@ int kernel_read(struct file *file, loff_t offset,
43349 old_fs = get_fs();
43350 set_fs(get_ds());
43351 /* The cast to a user pointer is valid due to the set_fs() */
43352 - result = vfs_read(file, (void __user *)addr, count, &pos);
43353 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
43354 set_fs(old_fs);
43355 return result;
43356 }
43357 @@ -1067,6 +1099,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
43358 perf_event_comm(tsk);
43359 }
43360
43361 +static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
43362 +{
43363 + int i, ch;
43364 +
43365 + /* Copies the binary name from after last slash */
43366 + for (i = 0; (ch = *(fn++)) != '\0';) {
43367 + if (ch == '/')
43368 + i = 0; /* overwrite what we wrote */
43369 + else
43370 + if (i < len - 1)
43371 + tcomm[i++] = ch;
43372 + }
43373 + tcomm[i] = '\0';
43374 +}
43375 +
43376 int flush_old_exec(struct linux_binprm * bprm)
43377 {
43378 int retval;
43379 @@ -1081,6 +1128,7 @@ int flush_old_exec(struct linux_binprm * bprm)
43380
43381 set_mm_exe_file(bprm->mm, bprm->file);
43382
43383 + filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
43384 /*
43385 * Release all of the old mmap stuff
43386 */
43387 @@ -1112,10 +1160,6 @@ EXPORT_SYMBOL(would_dump);
43388
43389 void setup_new_exec(struct linux_binprm * bprm)
43390 {
43391 - int i, ch;
43392 - const char *name;
43393 - char tcomm[sizeof(current->comm)];
43394 -
43395 arch_pick_mmap_layout(current->mm);
43396
43397 /* This is the point of no return */
43398 @@ -1126,18 +1170,7 @@ void setup_new_exec(struct linux_binprm * bprm)
43399 else
43400 set_dumpable(current->mm, suid_dumpable);
43401
43402 - name = bprm->filename;
43403 -
43404 - /* Copies the binary name from after last slash */
43405 - for (i=0; (ch = *(name++)) != '\0';) {
43406 - if (ch == '/')
43407 - i = 0; /* overwrite what we wrote */
43408 - else
43409 - if (i < (sizeof(tcomm) - 1))
43410 - tcomm[i++] = ch;
43411 - }
43412 - tcomm[i] = '\0';
43413 - set_task_comm(current, tcomm);
43414 + set_task_comm(current, bprm->tcomm);
43415
43416 /* Set the new mm task size. We have to do that late because it may
43417 * depend on TIF_32BIT which is only updated in flush_thread() on
43418 @@ -1247,7 +1280,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
43419 }
43420 rcu_read_unlock();
43421
43422 - if (p->fs->users > n_fs) {
43423 + if (atomic_read(&p->fs->users) > n_fs) {
43424 bprm->unsafe |= LSM_UNSAFE_SHARE;
43425 } else {
43426 res = -EAGAIN;
43427 @@ -1442,6 +1475,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
43428
43429 EXPORT_SYMBOL(search_binary_handler);
43430
43431 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43432 +static DEFINE_PER_CPU(u64, exec_counter);
43433 +static int __init init_exec_counters(void)
43434 +{
43435 + unsigned int cpu;
43436 +
43437 + for_each_possible_cpu(cpu) {
43438 + per_cpu(exec_counter, cpu) = (u64)cpu;
43439 + }
43440 +
43441 + return 0;
43442 +}
43443 +early_initcall(init_exec_counters);
43444 +static inline void increment_exec_counter(void)
43445 +{
43446 + BUILD_BUG_ON(NR_CPUS > (1 << 16));
43447 + current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
43448 +}
43449 +#else
43450 +static inline void increment_exec_counter(void) {}
43451 +#endif
43452 +
43453 /*
43454 * sys_execve() executes a new program.
43455 */
43456 @@ -1450,6 +1505,11 @@ static int do_execve_common(const char *filename,
43457 struct user_arg_ptr envp,
43458 struct pt_regs *regs)
43459 {
43460 +#ifdef CONFIG_GRKERNSEC
43461 + struct file *old_exec_file;
43462 + struct acl_subject_label *old_acl;
43463 + struct rlimit old_rlim[RLIM_NLIMITS];
43464 +#endif
43465 struct linux_binprm *bprm;
43466 struct file *file;
43467 struct files_struct *displaced;
43468 @@ -1457,6 +1517,8 @@ static int do_execve_common(const char *filename,
43469 int retval;
43470 const struct cred *cred = current_cred();
43471
43472 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
43473 +
43474 /*
43475 * We move the actual failure in case of RLIMIT_NPROC excess from
43476 * set*uid() to execve() because too many poorly written programs
43477 @@ -1497,12 +1559,27 @@ static int do_execve_common(const char *filename,
43478 if (IS_ERR(file))
43479 goto out_unmark;
43480
43481 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
43482 + retval = -EPERM;
43483 + goto out_file;
43484 + }
43485 +
43486 sched_exec();
43487
43488 bprm->file = file;
43489 bprm->filename = filename;
43490 bprm->interp = filename;
43491
43492 + if (gr_process_user_ban()) {
43493 + retval = -EPERM;
43494 + goto out_file;
43495 + }
43496 +
43497 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
43498 + retval = -EACCES;
43499 + goto out_file;
43500 + }
43501 +
43502 retval = bprm_mm_init(bprm);
43503 if (retval)
43504 goto out_file;
43505 @@ -1519,24 +1596,65 @@ static int do_execve_common(const char *filename,
43506 if (retval < 0)
43507 goto out;
43508
43509 +#ifdef CONFIG_GRKERNSEC
43510 + old_acl = current->acl;
43511 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
43512 + old_exec_file = current->exec_file;
43513 + get_file(file);
43514 + current->exec_file = file;
43515 +#endif
43516 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43517 + /* limit suid stack to 8MB
43518 + we saved the old limits above and will restore them if this exec fails
43519 + */
43520 + if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
43521 + (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
43522 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
43523 +#endif
43524 +
43525 + if (!gr_tpe_allow(file)) {
43526 + retval = -EACCES;
43527 + goto out_fail;
43528 + }
43529 +
43530 + if (gr_check_crash_exec(file)) {
43531 + retval = -EACCES;
43532 + goto out_fail;
43533 + }
43534 +
43535 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
43536 + bprm->unsafe);
43537 + if (retval < 0)
43538 + goto out_fail;
43539 +
43540 retval = copy_strings_kernel(1, &bprm->filename, bprm);
43541 if (retval < 0)
43542 - goto out;
43543 + goto out_fail;
43544
43545 bprm->exec = bprm->p;
43546 retval = copy_strings(bprm->envc, envp, bprm);
43547 if (retval < 0)
43548 - goto out;
43549 + goto out_fail;
43550
43551 retval = copy_strings(bprm->argc, argv, bprm);
43552 if (retval < 0)
43553 - goto out;
43554 + goto out_fail;
43555 +
43556 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
43557 +
43558 + gr_handle_exec_args(bprm, argv);
43559
43560 retval = search_binary_handler(bprm,regs);
43561 if (retval < 0)
43562 - goto out;
43563 + goto out_fail;
43564 +#ifdef CONFIG_GRKERNSEC
43565 + if (old_exec_file)
43566 + fput(old_exec_file);
43567 +#endif
43568
43569 /* execve succeeded */
43570 +
43571 + increment_exec_counter();
43572 current->fs->in_exec = 0;
43573 current->in_execve = 0;
43574 acct_update_integrals(current);
43575 @@ -1545,6 +1663,14 @@ static int do_execve_common(const char *filename,
43576 put_files_struct(displaced);
43577 return retval;
43578
43579 +out_fail:
43580 +#ifdef CONFIG_GRKERNSEC
43581 + current->acl = old_acl;
43582 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
43583 + fput(current->exec_file);
43584 + current->exec_file = old_exec_file;
43585 +#endif
43586 +
43587 out:
43588 if (bprm->mm) {
43589 acct_arg_size(bprm, 0);
43590 @@ -1618,7 +1744,7 @@ static int expand_corename(struct core_name *cn)
43591 {
43592 char *old_corename = cn->corename;
43593
43594 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
43595 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
43596 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
43597
43598 if (!cn->corename) {
43599 @@ -1715,7 +1841,7 @@ static int format_corename(struct core_name *cn, long signr)
43600 int pid_in_pattern = 0;
43601 int err = 0;
43602
43603 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
43604 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
43605 cn->corename = kmalloc(cn->size, GFP_KERNEL);
43606 cn->used = 0;
43607
43608 @@ -1812,6 +1938,218 @@ out:
43609 return ispipe;
43610 }
43611
43612 +int pax_check_flags(unsigned long *flags)
43613 +{
43614 + int retval = 0;
43615 +
43616 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
43617 + if (*flags & MF_PAX_SEGMEXEC)
43618 + {
43619 + *flags &= ~MF_PAX_SEGMEXEC;
43620 + retval = -EINVAL;
43621 + }
43622 +#endif
43623 +
43624 + if ((*flags & MF_PAX_PAGEEXEC)
43625 +
43626 +#ifdef CONFIG_PAX_PAGEEXEC
43627 + && (*flags & MF_PAX_SEGMEXEC)
43628 +#endif
43629 +
43630 + )
43631 + {
43632 + *flags &= ~MF_PAX_PAGEEXEC;
43633 + retval = -EINVAL;
43634 + }
43635 +
43636 + if ((*flags & MF_PAX_MPROTECT)
43637 +
43638 +#ifdef CONFIG_PAX_MPROTECT
43639 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43640 +#endif
43641 +
43642 + )
43643 + {
43644 + *flags &= ~MF_PAX_MPROTECT;
43645 + retval = -EINVAL;
43646 + }
43647 +
43648 + if ((*flags & MF_PAX_EMUTRAMP)
43649 +
43650 +#ifdef CONFIG_PAX_EMUTRAMP
43651 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43652 +#endif
43653 +
43654 + )
43655 + {
43656 + *flags &= ~MF_PAX_EMUTRAMP;
43657 + retval = -EINVAL;
43658 + }
43659 +
43660 + return retval;
43661 +}
43662 +
43663 +EXPORT_SYMBOL(pax_check_flags);
43664 +
43665 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43666 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
43667 +{
43668 + struct task_struct *tsk = current;
43669 + struct mm_struct *mm = current->mm;
43670 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
43671 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
43672 + char *path_exec = NULL;
43673 + char *path_fault = NULL;
43674 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
43675 +
43676 + if (buffer_exec && buffer_fault) {
43677 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
43678 +
43679 + down_read(&mm->mmap_sem);
43680 + vma = mm->mmap;
43681 + while (vma && (!vma_exec || !vma_fault)) {
43682 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
43683 + vma_exec = vma;
43684 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
43685 + vma_fault = vma;
43686 + vma = vma->vm_next;
43687 + }
43688 + if (vma_exec) {
43689 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
43690 + if (IS_ERR(path_exec))
43691 + path_exec = "<path too long>";
43692 + else {
43693 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
43694 + if (path_exec) {
43695 + *path_exec = 0;
43696 + path_exec = buffer_exec;
43697 + } else
43698 + path_exec = "<path too long>";
43699 + }
43700 + }
43701 + if (vma_fault) {
43702 + start = vma_fault->vm_start;
43703 + end = vma_fault->vm_end;
43704 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
43705 + if (vma_fault->vm_file) {
43706 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
43707 + if (IS_ERR(path_fault))
43708 + path_fault = "<path too long>";
43709 + else {
43710 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
43711 + if (path_fault) {
43712 + *path_fault = 0;
43713 + path_fault = buffer_fault;
43714 + } else
43715 + path_fault = "<path too long>";
43716 + }
43717 + } else
43718 + path_fault = "<anonymous mapping>";
43719 + }
43720 + up_read(&mm->mmap_sem);
43721 + }
43722 + if (tsk->signal->curr_ip)
43723 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
43724 + else
43725 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
43726 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
43727 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
43728 + task_uid(tsk), task_euid(tsk), pc, sp);
43729 + free_page((unsigned long)buffer_exec);
43730 + free_page((unsigned long)buffer_fault);
43731 + pax_report_insns(regs, pc, sp);
43732 + do_coredump(SIGKILL, SIGKILL, regs);
43733 +}
43734 +#endif
43735 +
43736 +#ifdef CONFIG_PAX_REFCOUNT
43737 +void pax_report_refcount_overflow(struct pt_regs *regs)
43738 +{
43739 + if (current->signal->curr_ip)
43740 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
43741 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
43742 + else
43743 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
43744 + current->comm, task_pid_nr(current), current_uid(), current_euid());
43745 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
43746 + show_regs(regs);
43747 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
43748 +}
43749 +#endif
43750 +
43751 +#ifdef CONFIG_PAX_USERCOPY
43752 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
43753 +int object_is_on_stack(const void *obj, unsigned long len)
43754 +{
43755 + const void * const stack = task_stack_page(current);
43756 + const void * const stackend = stack + THREAD_SIZE;
43757 +
43758 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
43759 + const void *frame = NULL;
43760 + const void *oldframe;
43761 +#endif
43762 +
43763 + if (obj + len < obj)
43764 + return -1;
43765 +
43766 + if (obj + len <= stack || stackend <= obj)
43767 + return 0;
43768 +
43769 + if (obj < stack || stackend < obj + len)
43770 + return -1;
43771 +
43772 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
43773 + oldframe = __builtin_frame_address(1);
43774 + if (oldframe)
43775 + frame = __builtin_frame_address(2);
43776 + /*
43777 + low ----------------------------------------------> high
43778 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
43779 + ^----------------^
43780 + allow copies only within here
43781 + */
43782 + while (stack <= frame && frame < stackend) {
43783 + /* if obj + len extends past the last frame, this
43784 + check won't pass and the next frame will be 0,
43785 + causing us to bail out and correctly report
43786 + the copy as invalid
43787 + */
43788 + if (obj + len <= frame)
43789 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
43790 + oldframe = frame;
43791 + frame = *(const void * const *)frame;
43792 + }
43793 + return -1;
43794 +#else
43795 + return 1;
43796 +#endif
43797 +}
43798 +
43799 +__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
43800 +{
43801 + if (current->signal->curr_ip)
43802 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
43803 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
43804 + else
43805 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
43806 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
43807 + dump_stack();
43808 + gr_handle_kernel_exploit();
43809 + do_group_exit(SIGKILL);
43810 +}
43811 +#endif
43812 +
43813 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
43814 +void pax_track_stack(void)
43815 +{
43816 + unsigned long sp = (unsigned long)&sp;
43817 + if (sp < current_thread_info()->lowest_stack &&
43818 + sp > (unsigned long)task_stack_page(current))
43819 + current_thread_info()->lowest_stack = sp;
43820 +}
43821 +EXPORT_SYMBOL(pax_track_stack);
43822 +#endif
43823 +
43824 static int zap_process(struct task_struct *start, int exit_code)
43825 {
43826 struct task_struct *t;
43827 @@ -2023,17 +2361,17 @@ static void wait_for_dump_helpers(struct file *file)
43828 pipe = file->f_path.dentry->d_inode->i_pipe;
43829
43830 pipe_lock(pipe);
43831 - pipe->readers++;
43832 - pipe->writers--;
43833 + atomic_inc(&pipe->readers);
43834 + atomic_dec(&pipe->writers);
43835
43836 - while ((pipe->readers > 1) && (!signal_pending(current))) {
43837 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
43838 wake_up_interruptible_sync(&pipe->wait);
43839 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
43840 pipe_wait(pipe);
43841 }
43842
43843 - pipe->readers--;
43844 - pipe->writers++;
43845 + atomic_dec(&pipe->readers);
43846 + atomic_inc(&pipe->writers);
43847 pipe_unlock(pipe);
43848
43849 }
43850 @@ -2094,7 +2432,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
43851 int retval = 0;
43852 int flag = 0;
43853 int ispipe;
43854 - static atomic_t core_dump_count = ATOMIC_INIT(0);
43855 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
43856 struct coredump_params cprm = {
43857 .signr = signr,
43858 .regs = regs,
43859 @@ -2109,6 +2447,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
43860
43861 audit_core_dumps(signr);
43862
43863 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
43864 + gr_handle_brute_attach(current, cprm.mm_flags);
43865 +
43866 binfmt = mm->binfmt;
43867 if (!binfmt || !binfmt->core_dump)
43868 goto fail;
43869 @@ -2176,7 +2517,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
43870 }
43871 cprm.limit = RLIM_INFINITY;
43872
43873 - dump_count = atomic_inc_return(&core_dump_count);
43874 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
43875 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
43876 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
43877 task_tgid_vnr(current), current->comm);
43878 @@ -2203,6 +2544,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
43879 } else {
43880 struct inode *inode;
43881
43882 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
43883 +
43884 if (cprm.limit < binfmt->min_coredump)
43885 goto fail_unlock;
43886
43887 @@ -2246,7 +2589,7 @@ close_fail:
43888 filp_close(cprm.file, NULL);
43889 fail_dropcount:
43890 if (ispipe)
43891 - atomic_dec(&core_dump_count);
43892 + atomic_dec_unchecked(&core_dump_count);
43893 fail_unlock:
43894 kfree(cn.corename);
43895 fail_corename:
43896 @@ -2265,7 +2608,7 @@ fail:
43897 */
43898 int dump_write(struct file *file, const void *addr, int nr)
43899 {
43900 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
43901 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
43902 }
43903 EXPORT_SYMBOL(dump_write);
43904
43905 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
43906 index a8cbe1b..fed04cb 100644
43907 --- a/fs/ext2/balloc.c
43908 +++ b/fs/ext2/balloc.c
43909 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
43910
43911 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
43912 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
43913 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
43914 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
43915 sbi->s_resuid != current_fsuid() &&
43916 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
43917 return 0;
43918 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
43919 index a203892..4e64db5 100644
43920 --- a/fs/ext3/balloc.c
43921 +++ b/fs/ext3/balloc.c
43922 @@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
43923
43924 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
43925 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
43926 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
43927 + if (free_blocks < root_blocks + 1 &&
43928 !use_reservation && sbi->s_resuid != current_fsuid() &&
43929 - (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
43930 + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
43931 + !capable_nolog(CAP_SYS_RESOURCE)) {
43932 return 0;
43933 }
43934 return 1;
43935 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
43936 index 12ccacd..a6035fce0 100644
43937 --- a/fs/ext4/balloc.c
43938 +++ b/fs/ext4/balloc.c
43939 @@ -436,8 +436,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
43940 /* Hm, nope. Are (enough) root reserved clusters available? */
43941 if (sbi->s_resuid == current_fsuid() ||
43942 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
43943 - capable(CAP_SYS_RESOURCE) ||
43944 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
43945 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
43946 + capable_nolog(CAP_SYS_RESOURCE)) {
43947
43948 if (free_clusters >= (nclusters + dirty_clusters))
43949 return 1;
43950 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
43951 index 5b0e26a..0aa002d 100644
43952 --- a/fs/ext4/ext4.h
43953 +++ b/fs/ext4/ext4.h
43954 @@ -1208,19 +1208,19 @@ struct ext4_sb_info {
43955 unsigned long s_mb_last_start;
43956
43957 /* stats for buddy allocator */
43958 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
43959 - atomic_t s_bal_success; /* we found long enough chunks */
43960 - atomic_t s_bal_allocated; /* in blocks */
43961 - atomic_t s_bal_ex_scanned; /* total extents scanned */
43962 - atomic_t s_bal_goals; /* goal hits */
43963 - atomic_t s_bal_breaks; /* too long searches */
43964 - atomic_t s_bal_2orders; /* 2^order hits */
43965 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
43966 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
43967 + atomic_unchecked_t s_bal_allocated; /* in blocks */
43968 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
43969 + atomic_unchecked_t s_bal_goals; /* goal hits */
43970 + atomic_unchecked_t s_bal_breaks; /* too long searches */
43971 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
43972 spinlock_t s_bal_lock;
43973 unsigned long s_mb_buddies_generated;
43974 unsigned long long s_mb_generation_time;
43975 - atomic_t s_mb_lost_chunks;
43976 - atomic_t s_mb_preallocated;
43977 - atomic_t s_mb_discarded;
43978 + atomic_unchecked_t s_mb_lost_chunks;
43979 + atomic_unchecked_t s_mb_preallocated;
43980 + atomic_unchecked_t s_mb_discarded;
43981 atomic_t s_lock_busy;
43982
43983 /* locality groups */
43984 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
43985 index e2d8be8..c7f0ce9 100644
43986 --- a/fs/ext4/mballoc.c
43987 +++ b/fs/ext4/mballoc.c
43988 @@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
43989 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
43990
43991 if (EXT4_SB(sb)->s_mb_stats)
43992 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
43993 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
43994
43995 break;
43996 }
43997 @@ -2088,7 +2088,7 @@ repeat:
43998 ac->ac_status = AC_STATUS_CONTINUE;
43999 ac->ac_flags |= EXT4_MB_HINT_FIRST;
44000 cr = 3;
44001 - atomic_inc(&sbi->s_mb_lost_chunks);
44002 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
44003 goto repeat;
44004 }
44005 }
44006 @@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb)
44007 if (sbi->s_mb_stats) {
44008 ext4_msg(sb, KERN_INFO,
44009 "mballoc: %u blocks %u reqs (%u success)",
44010 - atomic_read(&sbi->s_bal_allocated),
44011 - atomic_read(&sbi->s_bal_reqs),
44012 - atomic_read(&sbi->s_bal_success));
44013 + atomic_read_unchecked(&sbi->s_bal_allocated),
44014 + atomic_read_unchecked(&sbi->s_bal_reqs),
44015 + atomic_read_unchecked(&sbi->s_bal_success));
44016 ext4_msg(sb, KERN_INFO,
44017 "mballoc: %u extents scanned, %u goal hits, "
44018 "%u 2^N hits, %u breaks, %u lost",
44019 - atomic_read(&sbi->s_bal_ex_scanned),
44020 - atomic_read(&sbi->s_bal_goals),
44021 - atomic_read(&sbi->s_bal_2orders),
44022 - atomic_read(&sbi->s_bal_breaks),
44023 - atomic_read(&sbi->s_mb_lost_chunks));
44024 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
44025 + atomic_read_unchecked(&sbi->s_bal_goals),
44026 + atomic_read_unchecked(&sbi->s_bal_2orders),
44027 + atomic_read_unchecked(&sbi->s_bal_breaks),
44028 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
44029 ext4_msg(sb, KERN_INFO,
44030 "mballoc: %lu generated and it took %Lu",
44031 sbi->s_mb_buddies_generated,
44032 sbi->s_mb_generation_time);
44033 ext4_msg(sb, KERN_INFO,
44034 "mballoc: %u preallocated, %u discarded",
44035 - atomic_read(&sbi->s_mb_preallocated),
44036 - atomic_read(&sbi->s_mb_discarded));
44037 + atomic_read_unchecked(&sbi->s_mb_preallocated),
44038 + atomic_read_unchecked(&sbi->s_mb_discarded));
44039 }
44040
44041 free_percpu(sbi->s_locality_groups);
44042 @@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
44043 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
44044
44045 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
44046 - atomic_inc(&sbi->s_bal_reqs);
44047 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44048 + atomic_inc_unchecked(&sbi->s_bal_reqs);
44049 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44050 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
44051 - atomic_inc(&sbi->s_bal_success);
44052 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
44053 + atomic_inc_unchecked(&sbi->s_bal_success);
44054 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
44055 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
44056 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
44057 - atomic_inc(&sbi->s_bal_goals);
44058 + atomic_inc_unchecked(&sbi->s_bal_goals);
44059 if (ac->ac_found > sbi->s_mb_max_to_scan)
44060 - atomic_inc(&sbi->s_bal_breaks);
44061 + atomic_inc_unchecked(&sbi->s_bal_breaks);
44062 }
44063
44064 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
44065 @@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
44066 trace_ext4_mb_new_inode_pa(ac, pa);
44067
44068 ext4_mb_use_inode_pa(ac, pa);
44069 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
44070 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
44071
44072 ei = EXT4_I(ac->ac_inode);
44073 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44074 @@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
44075 trace_ext4_mb_new_group_pa(ac, pa);
44076
44077 ext4_mb_use_group_pa(ac, pa);
44078 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44079 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44080
44081 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44082 lg = ac->ac_lg;
44083 @@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
44084 * from the bitmap and continue.
44085 */
44086 }
44087 - atomic_add(free, &sbi->s_mb_discarded);
44088 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
44089
44090 return err;
44091 }
44092 @@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
44093 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
44094 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
44095 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
44096 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44097 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44098 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
44099
44100 return 0;
44101 diff --git a/fs/fcntl.c b/fs/fcntl.c
44102 index 22764c7..86372c9 100644
44103 --- a/fs/fcntl.c
44104 +++ b/fs/fcntl.c
44105 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
44106 if (err)
44107 return err;
44108
44109 + if (gr_handle_chroot_fowner(pid, type))
44110 + return -ENOENT;
44111 + if (gr_check_protected_task_fowner(pid, type))
44112 + return -EACCES;
44113 +
44114 f_modown(filp, pid, type, force);
44115 return 0;
44116 }
44117 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
44118
44119 static int f_setown_ex(struct file *filp, unsigned long arg)
44120 {
44121 - struct f_owner_ex * __user owner_p = (void * __user)arg;
44122 + struct f_owner_ex __user *owner_p = (void __user *)arg;
44123 struct f_owner_ex owner;
44124 struct pid *pid;
44125 int type;
44126 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
44127
44128 static int f_getown_ex(struct file *filp, unsigned long arg)
44129 {
44130 - struct f_owner_ex * __user owner_p = (void * __user)arg;
44131 + struct f_owner_ex __user *owner_p = (void __user *)arg;
44132 struct f_owner_ex owner;
44133 int ret = 0;
44134
44135 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
44136 switch (cmd) {
44137 case F_DUPFD:
44138 case F_DUPFD_CLOEXEC:
44139 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
44140 if (arg >= rlimit(RLIMIT_NOFILE))
44141 break;
44142 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
44143 diff --git a/fs/fifo.c b/fs/fifo.c
44144 index b1a524d..4ee270e 100644
44145 --- a/fs/fifo.c
44146 +++ b/fs/fifo.c
44147 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
44148 */
44149 filp->f_op = &read_pipefifo_fops;
44150 pipe->r_counter++;
44151 - if (pipe->readers++ == 0)
44152 + if (atomic_inc_return(&pipe->readers) == 1)
44153 wake_up_partner(inode);
44154
44155 - if (!pipe->writers) {
44156 + if (!atomic_read(&pipe->writers)) {
44157 if ((filp->f_flags & O_NONBLOCK)) {
44158 /* suppress POLLHUP until we have
44159 * seen a writer */
44160 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
44161 * errno=ENXIO when there is no process reading the FIFO.
44162 */
44163 ret = -ENXIO;
44164 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
44165 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
44166 goto err;
44167
44168 filp->f_op = &write_pipefifo_fops;
44169 pipe->w_counter++;
44170 - if (!pipe->writers++)
44171 + if (atomic_inc_return(&pipe->writers) == 1)
44172 wake_up_partner(inode);
44173
44174 - if (!pipe->readers) {
44175 + if (!atomic_read(&pipe->readers)) {
44176 wait_for_partner(inode, &pipe->r_counter);
44177 if (signal_pending(current))
44178 goto err_wr;
44179 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
44180 */
44181 filp->f_op = &rdwr_pipefifo_fops;
44182
44183 - pipe->readers++;
44184 - pipe->writers++;
44185 + atomic_inc(&pipe->readers);
44186 + atomic_inc(&pipe->writers);
44187 pipe->r_counter++;
44188 pipe->w_counter++;
44189 - if (pipe->readers == 1 || pipe->writers == 1)
44190 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
44191 wake_up_partner(inode);
44192 break;
44193
44194 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
44195 return 0;
44196
44197 err_rd:
44198 - if (!--pipe->readers)
44199 + if (atomic_dec_and_test(&pipe->readers))
44200 wake_up_interruptible(&pipe->wait);
44201 ret = -ERESTARTSYS;
44202 goto err;
44203
44204 err_wr:
44205 - if (!--pipe->writers)
44206 + if (atomic_dec_and_test(&pipe->writers))
44207 wake_up_interruptible(&pipe->wait);
44208 ret = -ERESTARTSYS;
44209 goto err;
44210
44211 err:
44212 - if (!pipe->readers && !pipe->writers)
44213 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
44214 free_pipe_info(inode);
44215
44216 err_nocleanup:
44217 diff --git a/fs/file.c b/fs/file.c
44218 index 4c6992d..104cdea 100644
44219 --- a/fs/file.c
44220 +++ b/fs/file.c
44221 @@ -15,6 +15,7 @@
44222 #include <linux/slab.h>
44223 #include <linux/vmalloc.h>
44224 #include <linux/file.h>
44225 +#include <linux/security.h>
44226 #include <linux/fdtable.h>
44227 #include <linux/bitops.h>
44228 #include <linux/interrupt.h>
44229 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
44230 * N.B. For clone tasks sharing a files structure, this test
44231 * will limit the total number of files that can be opened.
44232 */
44233 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
44234 if (nr >= rlimit(RLIMIT_NOFILE))
44235 return -EMFILE;
44236
44237 diff --git a/fs/filesystems.c b/fs/filesystems.c
44238 index 0845f84..7b4ebef 100644
44239 --- a/fs/filesystems.c
44240 +++ b/fs/filesystems.c
44241 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(const char *name)
44242 int len = dot ? dot - name : strlen(name);
44243
44244 fs = __get_fs_type(name, len);
44245 +
44246 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
44247 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
44248 +#else
44249 if (!fs && (request_module("%.*s", len, name) == 0))
44250 +#endif
44251 fs = __get_fs_type(name, len);
44252
44253 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
44254 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
44255 index 78b519c..a8b4979 100644
44256 --- a/fs/fs_struct.c
44257 +++ b/fs/fs_struct.c
44258 @@ -4,6 +4,7 @@
44259 #include <linux/path.h>
44260 #include <linux/slab.h>
44261 #include <linux/fs_struct.h>
44262 +#include <linux/grsecurity.h>
44263 #include "internal.h"
44264
44265 static inline void path_get_longterm(struct path *path)
44266 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
44267 old_root = fs->root;
44268 fs->root = *path;
44269 path_get_longterm(path);
44270 + gr_set_chroot_entries(current, path);
44271 write_seqcount_end(&fs->seq);
44272 spin_unlock(&fs->lock);
44273 if (old_root.dentry)
44274 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
44275 && fs->root.mnt == old_root->mnt) {
44276 path_get_longterm(new_root);
44277 fs->root = *new_root;
44278 + gr_set_chroot_entries(p, new_root);
44279 count++;
44280 }
44281 if (fs->pwd.dentry == old_root->dentry
44282 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
44283 spin_lock(&fs->lock);
44284 write_seqcount_begin(&fs->seq);
44285 tsk->fs = NULL;
44286 - kill = !--fs->users;
44287 + gr_clear_chroot_entries(tsk);
44288 + kill = !atomic_dec_return(&fs->users);
44289 write_seqcount_end(&fs->seq);
44290 spin_unlock(&fs->lock);
44291 task_unlock(tsk);
44292 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44293 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
44294 /* We don't need to lock fs - think why ;-) */
44295 if (fs) {
44296 - fs->users = 1;
44297 + atomic_set(&fs->users, 1);
44298 fs->in_exec = 0;
44299 spin_lock_init(&fs->lock);
44300 seqcount_init(&fs->seq);
44301 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44302 spin_lock(&old->lock);
44303 fs->root = old->root;
44304 path_get_longterm(&fs->root);
44305 + /* instead of calling gr_set_chroot_entries here,
44306 + we call it from every caller of this function
44307 + */
44308 fs->pwd = old->pwd;
44309 path_get_longterm(&fs->pwd);
44310 spin_unlock(&old->lock);
44311 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
44312
44313 task_lock(current);
44314 spin_lock(&fs->lock);
44315 - kill = !--fs->users;
44316 + kill = !atomic_dec_return(&fs->users);
44317 current->fs = new_fs;
44318 + gr_set_chroot_entries(current, &new_fs->root);
44319 spin_unlock(&fs->lock);
44320 task_unlock(current);
44321
44322 @@ -164,13 +172,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
44323
44324 int current_umask(void)
44325 {
44326 - return current->fs->umask;
44327 + return current->fs->umask | gr_acl_umask();
44328 }
44329 EXPORT_SYMBOL(current_umask);
44330
44331 /* to be mentioned only in INIT_TASK */
44332 struct fs_struct init_fs = {
44333 - .users = 1,
44334 + .users = ATOMIC_INIT(1),
44335 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
44336 .seq = SEQCNT_ZERO,
44337 .umask = 0022,
44338 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
44339 task_lock(current);
44340
44341 spin_lock(&init_fs.lock);
44342 - init_fs.users++;
44343 + atomic_inc(&init_fs.users);
44344 spin_unlock(&init_fs.lock);
44345
44346 spin_lock(&fs->lock);
44347 current->fs = &init_fs;
44348 - kill = !--fs->users;
44349 + gr_set_chroot_entries(current, &current->fs->root);
44350 + kill = !atomic_dec_return(&fs->users);
44351 spin_unlock(&fs->lock);
44352
44353 task_unlock(current);
44354 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
44355 index 9905350..02eaec4 100644
44356 --- a/fs/fscache/cookie.c
44357 +++ b/fs/fscache/cookie.c
44358 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
44359 parent ? (char *) parent->def->name : "<no-parent>",
44360 def->name, netfs_data);
44361
44362 - fscache_stat(&fscache_n_acquires);
44363 + fscache_stat_unchecked(&fscache_n_acquires);
44364
44365 /* if there's no parent cookie, then we don't create one here either */
44366 if (!parent) {
44367 - fscache_stat(&fscache_n_acquires_null);
44368 + fscache_stat_unchecked(&fscache_n_acquires_null);
44369 _leave(" [no parent]");
44370 return NULL;
44371 }
44372 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
44373 /* allocate and initialise a cookie */
44374 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
44375 if (!cookie) {
44376 - fscache_stat(&fscache_n_acquires_oom);
44377 + fscache_stat_unchecked(&fscache_n_acquires_oom);
44378 _leave(" [ENOMEM]");
44379 return NULL;
44380 }
44381 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44382
44383 switch (cookie->def->type) {
44384 case FSCACHE_COOKIE_TYPE_INDEX:
44385 - fscache_stat(&fscache_n_cookie_index);
44386 + fscache_stat_unchecked(&fscache_n_cookie_index);
44387 break;
44388 case FSCACHE_COOKIE_TYPE_DATAFILE:
44389 - fscache_stat(&fscache_n_cookie_data);
44390 + fscache_stat_unchecked(&fscache_n_cookie_data);
44391 break;
44392 default:
44393 - fscache_stat(&fscache_n_cookie_special);
44394 + fscache_stat_unchecked(&fscache_n_cookie_special);
44395 break;
44396 }
44397
44398 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44399 if (fscache_acquire_non_index_cookie(cookie) < 0) {
44400 atomic_dec(&parent->n_children);
44401 __fscache_cookie_put(cookie);
44402 - fscache_stat(&fscache_n_acquires_nobufs);
44403 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
44404 _leave(" = NULL");
44405 return NULL;
44406 }
44407 }
44408
44409 - fscache_stat(&fscache_n_acquires_ok);
44410 + fscache_stat_unchecked(&fscache_n_acquires_ok);
44411 _leave(" = %p", cookie);
44412 return cookie;
44413 }
44414 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
44415 cache = fscache_select_cache_for_object(cookie->parent);
44416 if (!cache) {
44417 up_read(&fscache_addremove_sem);
44418 - fscache_stat(&fscache_n_acquires_no_cache);
44419 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
44420 _leave(" = -ENOMEDIUM [no cache]");
44421 return -ENOMEDIUM;
44422 }
44423 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
44424 object = cache->ops->alloc_object(cache, cookie);
44425 fscache_stat_d(&fscache_n_cop_alloc_object);
44426 if (IS_ERR(object)) {
44427 - fscache_stat(&fscache_n_object_no_alloc);
44428 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
44429 ret = PTR_ERR(object);
44430 goto error;
44431 }
44432
44433 - fscache_stat(&fscache_n_object_alloc);
44434 + fscache_stat_unchecked(&fscache_n_object_alloc);
44435
44436 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
44437
44438 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
44439 struct fscache_object *object;
44440 struct hlist_node *_p;
44441
44442 - fscache_stat(&fscache_n_updates);
44443 + fscache_stat_unchecked(&fscache_n_updates);
44444
44445 if (!cookie) {
44446 - fscache_stat(&fscache_n_updates_null);
44447 + fscache_stat_unchecked(&fscache_n_updates_null);
44448 _leave(" [no cookie]");
44449 return;
44450 }
44451 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44452 struct fscache_object *object;
44453 unsigned long event;
44454
44455 - fscache_stat(&fscache_n_relinquishes);
44456 + fscache_stat_unchecked(&fscache_n_relinquishes);
44457 if (retire)
44458 - fscache_stat(&fscache_n_relinquishes_retire);
44459 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
44460
44461 if (!cookie) {
44462 - fscache_stat(&fscache_n_relinquishes_null);
44463 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
44464 _leave(" [no cookie]");
44465 return;
44466 }
44467 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44468
44469 /* wait for the cookie to finish being instantiated (or to fail) */
44470 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
44471 - fscache_stat(&fscache_n_relinquishes_waitcrt);
44472 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
44473 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
44474 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
44475 }
44476 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
44477 index f6aad48..88dcf26 100644
44478 --- a/fs/fscache/internal.h
44479 +++ b/fs/fscache/internal.h
44480 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
44481 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
44482 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
44483
44484 -extern atomic_t fscache_n_op_pend;
44485 -extern atomic_t fscache_n_op_run;
44486 -extern atomic_t fscache_n_op_enqueue;
44487 -extern atomic_t fscache_n_op_deferred_release;
44488 -extern atomic_t fscache_n_op_release;
44489 -extern atomic_t fscache_n_op_gc;
44490 -extern atomic_t fscache_n_op_cancelled;
44491 -extern atomic_t fscache_n_op_rejected;
44492 +extern atomic_unchecked_t fscache_n_op_pend;
44493 +extern atomic_unchecked_t fscache_n_op_run;
44494 +extern atomic_unchecked_t fscache_n_op_enqueue;
44495 +extern atomic_unchecked_t fscache_n_op_deferred_release;
44496 +extern atomic_unchecked_t fscache_n_op_release;
44497 +extern atomic_unchecked_t fscache_n_op_gc;
44498 +extern atomic_unchecked_t fscache_n_op_cancelled;
44499 +extern atomic_unchecked_t fscache_n_op_rejected;
44500
44501 -extern atomic_t fscache_n_attr_changed;
44502 -extern atomic_t fscache_n_attr_changed_ok;
44503 -extern atomic_t fscache_n_attr_changed_nobufs;
44504 -extern atomic_t fscache_n_attr_changed_nomem;
44505 -extern atomic_t fscache_n_attr_changed_calls;
44506 +extern atomic_unchecked_t fscache_n_attr_changed;
44507 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
44508 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
44509 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
44510 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
44511
44512 -extern atomic_t fscache_n_allocs;
44513 -extern atomic_t fscache_n_allocs_ok;
44514 -extern atomic_t fscache_n_allocs_wait;
44515 -extern atomic_t fscache_n_allocs_nobufs;
44516 -extern atomic_t fscache_n_allocs_intr;
44517 -extern atomic_t fscache_n_allocs_object_dead;
44518 -extern atomic_t fscache_n_alloc_ops;
44519 -extern atomic_t fscache_n_alloc_op_waits;
44520 +extern atomic_unchecked_t fscache_n_allocs;
44521 +extern atomic_unchecked_t fscache_n_allocs_ok;
44522 +extern atomic_unchecked_t fscache_n_allocs_wait;
44523 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
44524 +extern atomic_unchecked_t fscache_n_allocs_intr;
44525 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
44526 +extern atomic_unchecked_t fscache_n_alloc_ops;
44527 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
44528
44529 -extern atomic_t fscache_n_retrievals;
44530 -extern atomic_t fscache_n_retrievals_ok;
44531 -extern atomic_t fscache_n_retrievals_wait;
44532 -extern atomic_t fscache_n_retrievals_nodata;
44533 -extern atomic_t fscache_n_retrievals_nobufs;
44534 -extern atomic_t fscache_n_retrievals_intr;
44535 -extern atomic_t fscache_n_retrievals_nomem;
44536 -extern atomic_t fscache_n_retrievals_object_dead;
44537 -extern atomic_t fscache_n_retrieval_ops;
44538 -extern atomic_t fscache_n_retrieval_op_waits;
44539 +extern atomic_unchecked_t fscache_n_retrievals;
44540 +extern atomic_unchecked_t fscache_n_retrievals_ok;
44541 +extern atomic_unchecked_t fscache_n_retrievals_wait;
44542 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
44543 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
44544 +extern atomic_unchecked_t fscache_n_retrievals_intr;
44545 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
44546 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
44547 +extern atomic_unchecked_t fscache_n_retrieval_ops;
44548 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
44549
44550 -extern atomic_t fscache_n_stores;
44551 -extern atomic_t fscache_n_stores_ok;
44552 -extern atomic_t fscache_n_stores_again;
44553 -extern atomic_t fscache_n_stores_nobufs;
44554 -extern atomic_t fscache_n_stores_oom;
44555 -extern atomic_t fscache_n_store_ops;
44556 -extern atomic_t fscache_n_store_calls;
44557 -extern atomic_t fscache_n_store_pages;
44558 -extern atomic_t fscache_n_store_radix_deletes;
44559 -extern atomic_t fscache_n_store_pages_over_limit;
44560 +extern atomic_unchecked_t fscache_n_stores;
44561 +extern atomic_unchecked_t fscache_n_stores_ok;
44562 +extern atomic_unchecked_t fscache_n_stores_again;
44563 +extern atomic_unchecked_t fscache_n_stores_nobufs;
44564 +extern atomic_unchecked_t fscache_n_stores_oom;
44565 +extern atomic_unchecked_t fscache_n_store_ops;
44566 +extern atomic_unchecked_t fscache_n_store_calls;
44567 +extern atomic_unchecked_t fscache_n_store_pages;
44568 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
44569 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
44570
44571 -extern atomic_t fscache_n_store_vmscan_not_storing;
44572 -extern atomic_t fscache_n_store_vmscan_gone;
44573 -extern atomic_t fscache_n_store_vmscan_busy;
44574 -extern atomic_t fscache_n_store_vmscan_cancelled;
44575 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
44576 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
44577 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
44578 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
44579
44580 -extern atomic_t fscache_n_marks;
44581 -extern atomic_t fscache_n_uncaches;
44582 +extern atomic_unchecked_t fscache_n_marks;
44583 +extern atomic_unchecked_t fscache_n_uncaches;
44584
44585 -extern atomic_t fscache_n_acquires;
44586 -extern atomic_t fscache_n_acquires_null;
44587 -extern atomic_t fscache_n_acquires_no_cache;
44588 -extern atomic_t fscache_n_acquires_ok;
44589 -extern atomic_t fscache_n_acquires_nobufs;
44590 -extern atomic_t fscache_n_acquires_oom;
44591 +extern atomic_unchecked_t fscache_n_acquires;
44592 +extern atomic_unchecked_t fscache_n_acquires_null;
44593 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
44594 +extern atomic_unchecked_t fscache_n_acquires_ok;
44595 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
44596 +extern atomic_unchecked_t fscache_n_acquires_oom;
44597
44598 -extern atomic_t fscache_n_updates;
44599 -extern atomic_t fscache_n_updates_null;
44600 -extern atomic_t fscache_n_updates_run;
44601 +extern atomic_unchecked_t fscache_n_updates;
44602 +extern atomic_unchecked_t fscache_n_updates_null;
44603 +extern atomic_unchecked_t fscache_n_updates_run;
44604
44605 -extern atomic_t fscache_n_relinquishes;
44606 -extern atomic_t fscache_n_relinquishes_null;
44607 -extern atomic_t fscache_n_relinquishes_waitcrt;
44608 -extern atomic_t fscache_n_relinquishes_retire;
44609 +extern atomic_unchecked_t fscache_n_relinquishes;
44610 +extern atomic_unchecked_t fscache_n_relinquishes_null;
44611 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
44612 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
44613
44614 -extern atomic_t fscache_n_cookie_index;
44615 -extern atomic_t fscache_n_cookie_data;
44616 -extern atomic_t fscache_n_cookie_special;
44617 +extern atomic_unchecked_t fscache_n_cookie_index;
44618 +extern atomic_unchecked_t fscache_n_cookie_data;
44619 +extern atomic_unchecked_t fscache_n_cookie_special;
44620
44621 -extern atomic_t fscache_n_object_alloc;
44622 -extern atomic_t fscache_n_object_no_alloc;
44623 -extern atomic_t fscache_n_object_lookups;
44624 -extern atomic_t fscache_n_object_lookups_negative;
44625 -extern atomic_t fscache_n_object_lookups_positive;
44626 -extern atomic_t fscache_n_object_lookups_timed_out;
44627 -extern atomic_t fscache_n_object_created;
44628 -extern atomic_t fscache_n_object_avail;
44629 -extern atomic_t fscache_n_object_dead;
44630 +extern atomic_unchecked_t fscache_n_object_alloc;
44631 +extern atomic_unchecked_t fscache_n_object_no_alloc;
44632 +extern atomic_unchecked_t fscache_n_object_lookups;
44633 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
44634 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
44635 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
44636 +extern atomic_unchecked_t fscache_n_object_created;
44637 +extern atomic_unchecked_t fscache_n_object_avail;
44638 +extern atomic_unchecked_t fscache_n_object_dead;
44639
44640 -extern atomic_t fscache_n_checkaux_none;
44641 -extern atomic_t fscache_n_checkaux_okay;
44642 -extern atomic_t fscache_n_checkaux_update;
44643 -extern atomic_t fscache_n_checkaux_obsolete;
44644 +extern atomic_unchecked_t fscache_n_checkaux_none;
44645 +extern atomic_unchecked_t fscache_n_checkaux_okay;
44646 +extern atomic_unchecked_t fscache_n_checkaux_update;
44647 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
44648
44649 extern atomic_t fscache_n_cop_alloc_object;
44650 extern atomic_t fscache_n_cop_lookup_object;
44651 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
44652 atomic_inc(stat);
44653 }
44654
44655 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
44656 +{
44657 + atomic_inc_unchecked(stat);
44658 +}
44659 +
44660 static inline void fscache_stat_d(atomic_t *stat)
44661 {
44662 atomic_dec(stat);
44663 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
44664
44665 #define __fscache_stat(stat) (NULL)
44666 #define fscache_stat(stat) do {} while (0)
44667 +#define fscache_stat_unchecked(stat) do {} while (0)
44668 #define fscache_stat_d(stat) do {} while (0)
44669 #endif
44670
44671 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
44672 index b6b897c..0ffff9c 100644
44673 --- a/fs/fscache/object.c
44674 +++ b/fs/fscache/object.c
44675 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44676 /* update the object metadata on disk */
44677 case FSCACHE_OBJECT_UPDATING:
44678 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
44679 - fscache_stat(&fscache_n_updates_run);
44680 + fscache_stat_unchecked(&fscache_n_updates_run);
44681 fscache_stat(&fscache_n_cop_update_object);
44682 object->cache->ops->update_object(object);
44683 fscache_stat_d(&fscache_n_cop_update_object);
44684 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44685 spin_lock(&object->lock);
44686 object->state = FSCACHE_OBJECT_DEAD;
44687 spin_unlock(&object->lock);
44688 - fscache_stat(&fscache_n_object_dead);
44689 + fscache_stat_unchecked(&fscache_n_object_dead);
44690 goto terminal_transit;
44691
44692 /* handle the parent cache of this object being withdrawn from
44693 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44694 spin_lock(&object->lock);
44695 object->state = FSCACHE_OBJECT_DEAD;
44696 spin_unlock(&object->lock);
44697 - fscache_stat(&fscache_n_object_dead);
44698 + fscache_stat_unchecked(&fscache_n_object_dead);
44699 goto terminal_transit;
44700
44701 /* complain about the object being woken up once it is
44702 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
44703 parent->cookie->def->name, cookie->def->name,
44704 object->cache->tag->name);
44705
44706 - fscache_stat(&fscache_n_object_lookups);
44707 + fscache_stat_unchecked(&fscache_n_object_lookups);
44708 fscache_stat(&fscache_n_cop_lookup_object);
44709 ret = object->cache->ops->lookup_object(object);
44710 fscache_stat_d(&fscache_n_cop_lookup_object);
44711 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
44712 if (ret == -ETIMEDOUT) {
44713 /* probably stuck behind another object, so move this one to
44714 * the back of the queue */
44715 - fscache_stat(&fscache_n_object_lookups_timed_out);
44716 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
44717 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
44718 }
44719
44720 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
44721
44722 spin_lock(&object->lock);
44723 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
44724 - fscache_stat(&fscache_n_object_lookups_negative);
44725 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
44726
44727 /* transit here to allow write requests to begin stacking up
44728 * and read requests to begin returning ENODATA */
44729 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
44730 * result, in which case there may be data available */
44731 spin_lock(&object->lock);
44732 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
44733 - fscache_stat(&fscache_n_object_lookups_positive);
44734 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
44735
44736 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
44737
44738 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
44739 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
44740 } else {
44741 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
44742 - fscache_stat(&fscache_n_object_created);
44743 + fscache_stat_unchecked(&fscache_n_object_created);
44744
44745 object->state = FSCACHE_OBJECT_AVAILABLE;
44746 spin_unlock(&object->lock);
44747 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
44748 fscache_enqueue_dependents(object);
44749
44750 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
44751 - fscache_stat(&fscache_n_object_avail);
44752 + fscache_stat_unchecked(&fscache_n_object_avail);
44753
44754 _leave("");
44755 }
44756 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
44757 enum fscache_checkaux result;
44758
44759 if (!object->cookie->def->check_aux) {
44760 - fscache_stat(&fscache_n_checkaux_none);
44761 + fscache_stat_unchecked(&fscache_n_checkaux_none);
44762 return FSCACHE_CHECKAUX_OKAY;
44763 }
44764
44765 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
44766 switch (result) {
44767 /* entry okay as is */
44768 case FSCACHE_CHECKAUX_OKAY:
44769 - fscache_stat(&fscache_n_checkaux_okay);
44770 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
44771 break;
44772
44773 /* entry requires update */
44774 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
44775 - fscache_stat(&fscache_n_checkaux_update);
44776 + fscache_stat_unchecked(&fscache_n_checkaux_update);
44777 break;
44778
44779 /* entry requires deletion */
44780 case FSCACHE_CHECKAUX_OBSOLETE:
44781 - fscache_stat(&fscache_n_checkaux_obsolete);
44782 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
44783 break;
44784
44785 default:
44786 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
44787 index 30afdfa..2256596 100644
44788 --- a/fs/fscache/operation.c
44789 +++ b/fs/fscache/operation.c
44790 @@ -17,7 +17,7 @@
44791 #include <linux/slab.h>
44792 #include "internal.h"
44793
44794 -atomic_t fscache_op_debug_id;
44795 +atomic_unchecked_t fscache_op_debug_id;
44796 EXPORT_SYMBOL(fscache_op_debug_id);
44797
44798 /**
44799 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
44800 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
44801 ASSERTCMP(atomic_read(&op->usage), >, 0);
44802
44803 - fscache_stat(&fscache_n_op_enqueue);
44804 + fscache_stat_unchecked(&fscache_n_op_enqueue);
44805 switch (op->flags & FSCACHE_OP_TYPE) {
44806 case FSCACHE_OP_ASYNC:
44807 _debug("queue async");
44808 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
44809 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
44810 if (op->processor)
44811 fscache_enqueue_operation(op);
44812 - fscache_stat(&fscache_n_op_run);
44813 + fscache_stat_unchecked(&fscache_n_op_run);
44814 }
44815
44816 /*
44817 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
44818 if (object->n_ops > 1) {
44819 atomic_inc(&op->usage);
44820 list_add_tail(&op->pend_link, &object->pending_ops);
44821 - fscache_stat(&fscache_n_op_pend);
44822 + fscache_stat_unchecked(&fscache_n_op_pend);
44823 } else if (!list_empty(&object->pending_ops)) {
44824 atomic_inc(&op->usage);
44825 list_add_tail(&op->pend_link, &object->pending_ops);
44826 - fscache_stat(&fscache_n_op_pend);
44827 + fscache_stat_unchecked(&fscache_n_op_pend);
44828 fscache_start_operations(object);
44829 } else {
44830 ASSERTCMP(object->n_in_progress, ==, 0);
44831 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
44832 object->n_exclusive++; /* reads and writes must wait */
44833 atomic_inc(&op->usage);
44834 list_add_tail(&op->pend_link, &object->pending_ops);
44835 - fscache_stat(&fscache_n_op_pend);
44836 + fscache_stat_unchecked(&fscache_n_op_pend);
44837 ret = 0;
44838 } else {
44839 /* not allowed to submit ops in any other state */
44840 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
44841 if (object->n_exclusive > 0) {
44842 atomic_inc(&op->usage);
44843 list_add_tail(&op->pend_link, &object->pending_ops);
44844 - fscache_stat(&fscache_n_op_pend);
44845 + fscache_stat_unchecked(&fscache_n_op_pend);
44846 } else if (!list_empty(&object->pending_ops)) {
44847 atomic_inc(&op->usage);
44848 list_add_tail(&op->pend_link, &object->pending_ops);
44849 - fscache_stat(&fscache_n_op_pend);
44850 + fscache_stat_unchecked(&fscache_n_op_pend);
44851 fscache_start_operations(object);
44852 } else {
44853 ASSERTCMP(object->n_exclusive, ==, 0);
44854 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
44855 object->n_ops++;
44856 atomic_inc(&op->usage);
44857 list_add_tail(&op->pend_link, &object->pending_ops);
44858 - fscache_stat(&fscache_n_op_pend);
44859 + fscache_stat_unchecked(&fscache_n_op_pend);
44860 ret = 0;
44861 } else if (object->state == FSCACHE_OBJECT_DYING ||
44862 object->state == FSCACHE_OBJECT_LC_DYING ||
44863 object->state == FSCACHE_OBJECT_WITHDRAWING) {
44864 - fscache_stat(&fscache_n_op_rejected);
44865 + fscache_stat_unchecked(&fscache_n_op_rejected);
44866 ret = -ENOBUFS;
44867 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
44868 fscache_report_unexpected_submission(object, op, ostate);
44869 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
44870
44871 ret = -EBUSY;
44872 if (!list_empty(&op->pend_link)) {
44873 - fscache_stat(&fscache_n_op_cancelled);
44874 + fscache_stat_unchecked(&fscache_n_op_cancelled);
44875 list_del_init(&op->pend_link);
44876 object->n_ops--;
44877 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
44878 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
44879 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
44880 BUG();
44881
44882 - fscache_stat(&fscache_n_op_release);
44883 + fscache_stat_unchecked(&fscache_n_op_release);
44884
44885 if (op->release) {
44886 op->release(op);
44887 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
44888 * lock, and defer it otherwise */
44889 if (!spin_trylock(&object->lock)) {
44890 _debug("defer put");
44891 - fscache_stat(&fscache_n_op_deferred_release);
44892 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
44893
44894 cache = object->cache;
44895 spin_lock(&cache->op_gc_list_lock);
44896 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
44897
44898 _debug("GC DEFERRED REL OBJ%x OP%x",
44899 object->debug_id, op->debug_id);
44900 - fscache_stat(&fscache_n_op_gc);
44901 + fscache_stat_unchecked(&fscache_n_op_gc);
44902
44903 ASSERTCMP(atomic_read(&op->usage), ==, 0);
44904
44905 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
44906 index 3f7a59b..cf196cc 100644
44907 --- a/fs/fscache/page.c
44908 +++ b/fs/fscache/page.c
44909 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
44910 val = radix_tree_lookup(&cookie->stores, page->index);
44911 if (!val) {
44912 rcu_read_unlock();
44913 - fscache_stat(&fscache_n_store_vmscan_not_storing);
44914 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
44915 __fscache_uncache_page(cookie, page);
44916 return true;
44917 }
44918 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
44919 spin_unlock(&cookie->stores_lock);
44920
44921 if (xpage) {
44922 - fscache_stat(&fscache_n_store_vmscan_cancelled);
44923 - fscache_stat(&fscache_n_store_radix_deletes);
44924 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
44925 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
44926 ASSERTCMP(xpage, ==, page);
44927 } else {
44928 - fscache_stat(&fscache_n_store_vmscan_gone);
44929 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
44930 }
44931
44932 wake_up_bit(&cookie->flags, 0);
44933 @@ -107,7 +107,7 @@ page_busy:
44934 /* we might want to wait here, but that could deadlock the allocator as
44935 * the work threads writing to the cache may all end up sleeping
44936 * on memory allocation */
44937 - fscache_stat(&fscache_n_store_vmscan_busy);
44938 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
44939 return false;
44940 }
44941 EXPORT_SYMBOL(__fscache_maybe_release_page);
44942 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
44943 FSCACHE_COOKIE_STORING_TAG);
44944 if (!radix_tree_tag_get(&cookie->stores, page->index,
44945 FSCACHE_COOKIE_PENDING_TAG)) {
44946 - fscache_stat(&fscache_n_store_radix_deletes);
44947 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
44948 xpage = radix_tree_delete(&cookie->stores, page->index);
44949 }
44950 spin_unlock(&cookie->stores_lock);
44951 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
44952
44953 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
44954
44955 - fscache_stat(&fscache_n_attr_changed_calls);
44956 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
44957
44958 if (fscache_object_is_active(object)) {
44959 fscache_stat(&fscache_n_cop_attr_changed);
44960 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
44961
44962 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
44963
44964 - fscache_stat(&fscache_n_attr_changed);
44965 + fscache_stat_unchecked(&fscache_n_attr_changed);
44966
44967 op = kzalloc(sizeof(*op), GFP_KERNEL);
44968 if (!op) {
44969 - fscache_stat(&fscache_n_attr_changed_nomem);
44970 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
44971 _leave(" = -ENOMEM");
44972 return -ENOMEM;
44973 }
44974 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
44975 if (fscache_submit_exclusive_op(object, op) < 0)
44976 goto nobufs;
44977 spin_unlock(&cookie->lock);
44978 - fscache_stat(&fscache_n_attr_changed_ok);
44979 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
44980 fscache_put_operation(op);
44981 _leave(" = 0");
44982 return 0;
44983 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
44984 nobufs:
44985 spin_unlock(&cookie->lock);
44986 kfree(op);
44987 - fscache_stat(&fscache_n_attr_changed_nobufs);
44988 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
44989 _leave(" = %d", -ENOBUFS);
44990 return -ENOBUFS;
44991 }
44992 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
44993 /* allocate a retrieval operation and attempt to submit it */
44994 op = kzalloc(sizeof(*op), GFP_NOIO);
44995 if (!op) {
44996 - fscache_stat(&fscache_n_retrievals_nomem);
44997 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
44998 return NULL;
44999 }
45000
45001 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45002 return 0;
45003 }
45004
45005 - fscache_stat(&fscache_n_retrievals_wait);
45006 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
45007
45008 jif = jiffies;
45009 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
45010 fscache_wait_bit_interruptible,
45011 TASK_INTERRUPTIBLE) != 0) {
45012 - fscache_stat(&fscache_n_retrievals_intr);
45013 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45014 _leave(" = -ERESTARTSYS");
45015 return -ERESTARTSYS;
45016 }
45017 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45018 */
45019 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45020 struct fscache_retrieval *op,
45021 - atomic_t *stat_op_waits,
45022 - atomic_t *stat_object_dead)
45023 + atomic_unchecked_t *stat_op_waits,
45024 + atomic_unchecked_t *stat_object_dead)
45025 {
45026 int ret;
45027
45028 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45029 goto check_if_dead;
45030
45031 _debug(">>> WT");
45032 - fscache_stat(stat_op_waits);
45033 + fscache_stat_unchecked(stat_op_waits);
45034 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
45035 fscache_wait_bit_interruptible,
45036 TASK_INTERRUPTIBLE) < 0) {
45037 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45038
45039 check_if_dead:
45040 if (unlikely(fscache_object_is_dead(object))) {
45041 - fscache_stat(stat_object_dead);
45042 + fscache_stat_unchecked(stat_object_dead);
45043 return -ENOBUFS;
45044 }
45045 return 0;
45046 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45047
45048 _enter("%p,%p,,,", cookie, page);
45049
45050 - fscache_stat(&fscache_n_retrievals);
45051 + fscache_stat_unchecked(&fscache_n_retrievals);
45052
45053 if (hlist_empty(&cookie->backing_objects))
45054 goto nobufs;
45055 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45056 goto nobufs_unlock;
45057 spin_unlock(&cookie->lock);
45058
45059 - fscache_stat(&fscache_n_retrieval_ops);
45060 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
45061
45062 /* pin the netfs read context in case we need to do the actual netfs
45063 * read because we've encountered a cache read failure */
45064 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45065
45066 error:
45067 if (ret == -ENOMEM)
45068 - fscache_stat(&fscache_n_retrievals_nomem);
45069 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45070 else if (ret == -ERESTARTSYS)
45071 - fscache_stat(&fscache_n_retrievals_intr);
45072 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45073 else if (ret == -ENODATA)
45074 - fscache_stat(&fscache_n_retrievals_nodata);
45075 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45076 else if (ret < 0)
45077 - fscache_stat(&fscache_n_retrievals_nobufs);
45078 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45079 else
45080 - fscache_stat(&fscache_n_retrievals_ok);
45081 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
45082
45083 fscache_put_retrieval(op);
45084 _leave(" = %d", ret);
45085 @@ -429,7 +429,7 @@ nobufs_unlock:
45086 spin_unlock(&cookie->lock);
45087 kfree(op);
45088 nobufs:
45089 - fscache_stat(&fscache_n_retrievals_nobufs);
45090 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45091 _leave(" = -ENOBUFS");
45092 return -ENOBUFS;
45093 }
45094 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45095
45096 _enter("%p,,%d,,,", cookie, *nr_pages);
45097
45098 - fscache_stat(&fscache_n_retrievals);
45099 + fscache_stat_unchecked(&fscache_n_retrievals);
45100
45101 if (hlist_empty(&cookie->backing_objects))
45102 goto nobufs;
45103 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45104 goto nobufs_unlock;
45105 spin_unlock(&cookie->lock);
45106
45107 - fscache_stat(&fscache_n_retrieval_ops);
45108 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
45109
45110 /* pin the netfs read context in case we need to do the actual netfs
45111 * read because we've encountered a cache read failure */
45112 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45113
45114 error:
45115 if (ret == -ENOMEM)
45116 - fscache_stat(&fscache_n_retrievals_nomem);
45117 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45118 else if (ret == -ERESTARTSYS)
45119 - fscache_stat(&fscache_n_retrievals_intr);
45120 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45121 else if (ret == -ENODATA)
45122 - fscache_stat(&fscache_n_retrievals_nodata);
45123 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45124 else if (ret < 0)
45125 - fscache_stat(&fscache_n_retrievals_nobufs);
45126 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45127 else
45128 - fscache_stat(&fscache_n_retrievals_ok);
45129 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
45130
45131 fscache_put_retrieval(op);
45132 _leave(" = %d", ret);
45133 @@ -545,7 +545,7 @@ nobufs_unlock:
45134 spin_unlock(&cookie->lock);
45135 kfree(op);
45136 nobufs:
45137 - fscache_stat(&fscache_n_retrievals_nobufs);
45138 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45139 _leave(" = -ENOBUFS");
45140 return -ENOBUFS;
45141 }
45142 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45143
45144 _enter("%p,%p,,,", cookie, page);
45145
45146 - fscache_stat(&fscache_n_allocs);
45147 + fscache_stat_unchecked(&fscache_n_allocs);
45148
45149 if (hlist_empty(&cookie->backing_objects))
45150 goto nobufs;
45151 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45152 goto nobufs_unlock;
45153 spin_unlock(&cookie->lock);
45154
45155 - fscache_stat(&fscache_n_alloc_ops);
45156 + fscache_stat_unchecked(&fscache_n_alloc_ops);
45157
45158 ret = fscache_wait_for_retrieval_activation(
45159 object, op,
45160 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45161
45162 error:
45163 if (ret == -ERESTARTSYS)
45164 - fscache_stat(&fscache_n_allocs_intr);
45165 + fscache_stat_unchecked(&fscache_n_allocs_intr);
45166 else if (ret < 0)
45167 - fscache_stat(&fscache_n_allocs_nobufs);
45168 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45169 else
45170 - fscache_stat(&fscache_n_allocs_ok);
45171 + fscache_stat_unchecked(&fscache_n_allocs_ok);
45172
45173 fscache_put_retrieval(op);
45174 _leave(" = %d", ret);
45175 @@ -625,7 +625,7 @@ nobufs_unlock:
45176 spin_unlock(&cookie->lock);
45177 kfree(op);
45178 nobufs:
45179 - fscache_stat(&fscache_n_allocs_nobufs);
45180 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45181 _leave(" = -ENOBUFS");
45182 return -ENOBUFS;
45183 }
45184 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45185
45186 spin_lock(&cookie->stores_lock);
45187
45188 - fscache_stat(&fscache_n_store_calls);
45189 + fscache_stat_unchecked(&fscache_n_store_calls);
45190
45191 /* find a page to store */
45192 page = NULL;
45193 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45194 page = results[0];
45195 _debug("gang %d [%lx]", n, page->index);
45196 if (page->index > op->store_limit) {
45197 - fscache_stat(&fscache_n_store_pages_over_limit);
45198 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
45199 goto superseded;
45200 }
45201
45202 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45203 spin_unlock(&cookie->stores_lock);
45204 spin_unlock(&object->lock);
45205
45206 - fscache_stat(&fscache_n_store_pages);
45207 + fscache_stat_unchecked(&fscache_n_store_pages);
45208 fscache_stat(&fscache_n_cop_write_page);
45209 ret = object->cache->ops->write_page(op, page);
45210 fscache_stat_d(&fscache_n_cop_write_page);
45211 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45212 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45213 ASSERT(PageFsCache(page));
45214
45215 - fscache_stat(&fscache_n_stores);
45216 + fscache_stat_unchecked(&fscache_n_stores);
45217
45218 op = kzalloc(sizeof(*op), GFP_NOIO);
45219 if (!op)
45220 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45221 spin_unlock(&cookie->stores_lock);
45222 spin_unlock(&object->lock);
45223
45224 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
45225 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
45226 op->store_limit = object->store_limit;
45227
45228 if (fscache_submit_op(object, &op->op) < 0)
45229 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45230
45231 spin_unlock(&cookie->lock);
45232 radix_tree_preload_end();
45233 - fscache_stat(&fscache_n_store_ops);
45234 - fscache_stat(&fscache_n_stores_ok);
45235 + fscache_stat_unchecked(&fscache_n_store_ops);
45236 + fscache_stat_unchecked(&fscache_n_stores_ok);
45237
45238 /* the work queue now carries its own ref on the object */
45239 fscache_put_operation(&op->op);
45240 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45241 return 0;
45242
45243 already_queued:
45244 - fscache_stat(&fscache_n_stores_again);
45245 + fscache_stat_unchecked(&fscache_n_stores_again);
45246 already_pending:
45247 spin_unlock(&cookie->stores_lock);
45248 spin_unlock(&object->lock);
45249 spin_unlock(&cookie->lock);
45250 radix_tree_preload_end();
45251 kfree(op);
45252 - fscache_stat(&fscache_n_stores_ok);
45253 + fscache_stat_unchecked(&fscache_n_stores_ok);
45254 _leave(" = 0");
45255 return 0;
45256
45257 @@ -851,14 +851,14 @@ nobufs:
45258 spin_unlock(&cookie->lock);
45259 radix_tree_preload_end();
45260 kfree(op);
45261 - fscache_stat(&fscache_n_stores_nobufs);
45262 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
45263 _leave(" = -ENOBUFS");
45264 return -ENOBUFS;
45265
45266 nomem_free:
45267 kfree(op);
45268 nomem:
45269 - fscache_stat(&fscache_n_stores_oom);
45270 + fscache_stat_unchecked(&fscache_n_stores_oom);
45271 _leave(" = -ENOMEM");
45272 return -ENOMEM;
45273 }
45274 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
45275 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45276 ASSERTCMP(page, !=, NULL);
45277
45278 - fscache_stat(&fscache_n_uncaches);
45279 + fscache_stat_unchecked(&fscache_n_uncaches);
45280
45281 /* cache withdrawal may beat us to it */
45282 if (!PageFsCache(page))
45283 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
45284 unsigned long loop;
45285
45286 #ifdef CONFIG_FSCACHE_STATS
45287 - atomic_add(pagevec->nr, &fscache_n_marks);
45288 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
45289 #endif
45290
45291 for (loop = 0; loop < pagevec->nr; loop++) {
45292 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
45293 index 4765190..2a067f2 100644
45294 --- a/fs/fscache/stats.c
45295 +++ b/fs/fscache/stats.c
45296 @@ -18,95 +18,95 @@
45297 /*
45298 * operation counters
45299 */
45300 -atomic_t fscache_n_op_pend;
45301 -atomic_t fscache_n_op_run;
45302 -atomic_t fscache_n_op_enqueue;
45303 -atomic_t fscache_n_op_requeue;
45304 -atomic_t fscache_n_op_deferred_release;
45305 -atomic_t fscache_n_op_release;
45306 -atomic_t fscache_n_op_gc;
45307 -atomic_t fscache_n_op_cancelled;
45308 -atomic_t fscache_n_op_rejected;
45309 +atomic_unchecked_t fscache_n_op_pend;
45310 +atomic_unchecked_t fscache_n_op_run;
45311 +atomic_unchecked_t fscache_n_op_enqueue;
45312 +atomic_unchecked_t fscache_n_op_requeue;
45313 +atomic_unchecked_t fscache_n_op_deferred_release;
45314 +atomic_unchecked_t fscache_n_op_release;
45315 +atomic_unchecked_t fscache_n_op_gc;
45316 +atomic_unchecked_t fscache_n_op_cancelled;
45317 +atomic_unchecked_t fscache_n_op_rejected;
45318
45319 -atomic_t fscache_n_attr_changed;
45320 -atomic_t fscache_n_attr_changed_ok;
45321 -atomic_t fscache_n_attr_changed_nobufs;
45322 -atomic_t fscache_n_attr_changed_nomem;
45323 -atomic_t fscache_n_attr_changed_calls;
45324 +atomic_unchecked_t fscache_n_attr_changed;
45325 +atomic_unchecked_t fscache_n_attr_changed_ok;
45326 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
45327 +atomic_unchecked_t fscache_n_attr_changed_nomem;
45328 +atomic_unchecked_t fscache_n_attr_changed_calls;
45329
45330 -atomic_t fscache_n_allocs;
45331 -atomic_t fscache_n_allocs_ok;
45332 -atomic_t fscache_n_allocs_wait;
45333 -atomic_t fscache_n_allocs_nobufs;
45334 -atomic_t fscache_n_allocs_intr;
45335 -atomic_t fscache_n_allocs_object_dead;
45336 -atomic_t fscache_n_alloc_ops;
45337 -atomic_t fscache_n_alloc_op_waits;
45338 +atomic_unchecked_t fscache_n_allocs;
45339 +atomic_unchecked_t fscache_n_allocs_ok;
45340 +atomic_unchecked_t fscache_n_allocs_wait;
45341 +atomic_unchecked_t fscache_n_allocs_nobufs;
45342 +atomic_unchecked_t fscache_n_allocs_intr;
45343 +atomic_unchecked_t fscache_n_allocs_object_dead;
45344 +atomic_unchecked_t fscache_n_alloc_ops;
45345 +atomic_unchecked_t fscache_n_alloc_op_waits;
45346
45347 -atomic_t fscache_n_retrievals;
45348 -atomic_t fscache_n_retrievals_ok;
45349 -atomic_t fscache_n_retrievals_wait;
45350 -atomic_t fscache_n_retrievals_nodata;
45351 -atomic_t fscache_n_retrievals_nobufs;
45352 -atomic_t fscache_n_retrievals_intr;
45353 -atomic_t fscache_n_retrievals_nomem;
45354 -atomic_t fscache_n_retrievals_object_dead;
45355 -atomic_t fscache_n_retrieval_ops;
45356 -atomic_t fscache_n_retrieval_op_waits;
45357 +atomic_unchecked_t fscache_n_retrievals;
45358 +atomic_unchecked_t fscache_n_retrievals_ok;
45359 +atomic_unchecked_t fscache_n_retrievals_wait;
45360 +atomic_unchecked_t fscache_n_retrievals_nodata;
45361 +atomic_unchecked_t fscache_n_retrievals_nobufs;
45362 +atomic_unchecked_t fscache_n_retrievals_intr;
45363 +atomic_unchecked_t fscache_n_retrievals_nomem;
45364 +atomic_unchecked_t fscache_n_retrievals_object_dead;
45365 +atomic_unchecked_t fscache_n_retrieval_ops;
45366 +atomic_unchecked_t fscache_n_retrieval_op_waits;
45367
45368 -atomic_t fscache_n_stores;
45369 -atomic_t fscache_n_stores_ok;
45370 -atomic_t fscache_n_stores_again;
45371 -atomic_t fscache_n_stores_nobufs;
45372 -atomic_t fscache_n_stores_oom;
45373 -atomic_t fscache_n_store_ops;
45374 -atomic_t fscache_n_store_calls;
45375 -atomic_t fscache_n_store_pages;
45376 -atomic_t fscache_n_store_radix_deletes;
45377 -atomic_t fscache_n_store_pages_over_limit;
45378 +atomic_unchecked_t fscache_n_stores;
45379 +atomic_unchecked_t fscache_n_stores_ok;
45380 +atomic_unchecked_t fscache_n_stores_again;
45381 +atomic_unchecked_t fscache_n_stores_nobufs;
45382 +atomic_unchecked_t fscache_n_stores_oom;
45383 +atomic_unchecked_t fscache_n_store_ops;
45384 +atomic_unchecked_t fscache_n_store_calls;
45385 +atomic_unchecked_t fscache_n_store_pages;
45386 +atomic_unchecked_t fscache_n_store_radix_deletes;
45387 +atomic_unchecked_t fscache_n_store_pages_over_limit;
45388
45389 -atomic_t fscache_n_store_vmscan_not_storing;
45390 -atomic_t fscache_n_store_vmscan_gone;
45391 -atomic_t fscache_n_store_vmscan_busy;
45392 -atomic_t fscache_n_store_vmscan_cancelled;
45393 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45394 +atomic_unchecked_t fscache_n_store_vmscan_gone;
45395 +atomic_unchecked_t fscache_n_store_vmscan_busy;
45396 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45397
45398 -atomic_t fscache_n_marks;
45399 -atomic_t fscache_n_uncaches;
45400 +atomic_unchecked_t fscache_n_marks;
45401 +atomic_unchecked_t fscache_n_uncaches;
45402
45403 -atomic_t fscache_n_acquires;
45404 -atomic_t fscache_n_acquires_null;
45405 -atomic_t fscache_n_acquires_no_cache;
45406 -atomic_t fscache_n_acquires_ok;
45407 -atomic_t fscache_n_acquires_nobufs;
45408 -atomic_t fscache_n_acquires_oom;
45409 +atomic_unchecked_t fscache_n_acquires;
45410 +atomic_unchecked_t fscache_n_acquires_null;
45411 +atomic_unchecked_t fscache_n_acquires_no_cache;
45412 +atomic_unchecked_t fscache_n_acquires_ok;
45413 +atomic_unchecked_t fscache_n_acquires_nobufs;
45414 +atomic_unchecked_t fscache_n_acquires_oom;
45415
45416 -atomic_t fscache_n_updates;
45417 -atomic_t fscache_n_updates_null;
45418 -atomic_t fscache_n_updates_run;
45419 +atomic_unchecked_t fscache_n_updates;
45420 +atomic_unchecked_t fscache_n_updates_null;
45421 +atomic_unchecked_t fscache_n_updates_run;
45422
45423 -atomic_t fscache_n_relinquishes;
45424 -atomic_t fscache_n_relinquishes_null;
45425 -atomic_t fscache_n_relinquishes_waitcrt;
45426 -atomic_t fscache_n_relinquishes_retire;
45427 +atomic_unchecked_t fscache_n_relinquishes;
45428 +atomic_unchecked_t fscache_n_relinquishes_null;
45429 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45430 +atomic_unchecked_t fscache_n_relinquishes_retire;
45431
45432 -atomic_t fscache_n_cookie_index;
45433 -atomic_t fscache_n_cookie_data;
45434 -atomic_t fscache_n_cookie_special;
45435 +atomic_unchecked_t fscache_n_cookie_index;
45436 +atomic_unchecked_t fscache_n_cookie_data;
45437 +atomic_unchecked_t fscache_n_cookie_special;
45438
45439 -atomic_t fscache_n_object_alloc;
45440 -atomic_t fscache_n_object_no_alloc;
45441 -atomic_t fscache_n_object_lookups;
45442 -atomic_t fscache_n_object_lookups_negative;
45443 -atomic_t fscache_n_object_lookups_positive;
45444 -atomic_t fscache_n_object_lookups_timed_out;
45445 -atomic_t fscache_n_object_created;
45446 -atomic_t fscache_n_object_avail;
45447 -atomic_t fscache_n_object_dead;
45448 +atomic_unchecked_t fscache_n_object_alloc;
45449 +atomic_unchecked_t fscache_n_object_no_alloc;
45450 +atomic_unchecked_t fscache_n_object_lookups;
45451 +atomic_unchecked_t fscache_n_object_lookups_negative;
45452 +atomic_unchecked_t fscache_n_object_lookups_positive;
45453 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
45454 +atomic_unchecked_t fscache_n_object_created;
45455 +atomic_unchecked_t fscache_n_object_avail;
45456 +atomic_unchecked_t fscache_n_object_dead;
45457
45458 -atomic_t fscache_n_checkaux_none;
45459 -atomic_t fscache_n_checkaux_okay;
45460 -atomic_t fscache_n_checkaux_update;
45461 -atomic_t fscache_n_checkaux_obsolete;
45462 +atomic_unchecked_t fscache_n_checkaux_none;
45463 +atomic_unchecked_t fscache_n_checkaux_okay;
45464 +atomic_unchecked_t fscache_n_checkaux_update;
45465 +atomic_unchecked_t fscache_n_checkaux_obsolete;
45466
45467 atomic_t fscache_n_cop_alloc_object;
45468 atomic_t fscache_n_cop_lookup_object;
45469 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
45470 seq_puts(m, "FS-Cache statistics\n");
45471
45472 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
45473 - atomic_read(&fscache_n_cookie_index),
45474 - atomic_read(&fscache_n_cookie_data),
45475 - atomic_read(&fscache_n_cookie_special));
45476 + atomic_read_unchecked(&fscache_n_cookie_index),
45477 + atomic_read_unchecked(&fscache_n_cookie_data),
45478 + atomic_read_unchecked(&fscache_n_cookie_special));
45479
45480 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
45481 - atomic_read(&fscache_n_object_alloc),
45482 - atomic_read(&fscache_n_object_no_alloc),
45483 - atomic_read(&fscache_n_object_avail),
45484 - atomic_read(&fscache_n_object_dead));
45485 + atomic_read_unchecked(&fscache_n_object_alloc),
45486 + atomic_read_unchecked(&fscache_n_object_no_alloc),
45487 + atomic_read_unchecked(&fscache_n_object_avail),
45488 + atomic_read_unchecked(&fscache_n_object_dead));
45489 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
45490 - atomic_read(&fscache_n_checkaux_none),
45491 - atomic_read(&fscache_n_checkaux_okay),
45492 - atomic_read(&fscache_n_checkaux_update),
45493 - atomic_read(&fscache_n_checkaux_obsolete));
45494 + atomic_read_unchecked(&fscache_n_checkaux_none),
45495 + atomic_read_unchecked(&fscache_n_checkaux_okay),
45496 + atomic_read_unchecked(&fscache_n_checkaux_update),
45497 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
45498
45499 seq_printf(m, "Pages : mrk=%u unc=%u\n",
45500 - atomic_read(&fscache_n_marks),
45501 - atomic_read(&fscache_n_uncaches));
45502 + atomic_read_unchecked(&fscache_n_marks),
45503 + atomic_read_unchecked(&fscache_n_uncaches));
45504
45505 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
45506 " oom=%u\n",
45507 - atomic_read(&fscache_n_acquires),
45508 - atomic_read(&fscache_n_acquires_null),
45509 - atomic_read(&fscache_n_acquires_no_cache),
45510 - atomic_read(&fscache_n_acquires_ok),
45511 - atomic_read(&fscache_n_acquires_nobufs),
45512 - atomic_read(&fscache_n_acquires_oom));
45513 + atomic_read_unchecked(&fscache_n_acquires),
45514 + atomic_read_unchecked(&fscache_n_acquires_null),
45515 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
45516 + atomic_read_unchecked(&fscache_n_acquires_ok),
45517 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
45518 + atomic_read_unchecked(&fscache_n_acquires_oom));
45519
45520 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
45521 - atomic_read(&fscache_n_object_lookups),
45522 - atomic_read(&fscache_n_object_lookups_negative),
45523 - atomic_read(&fscache_n_object_lookups_positive),
45524 - atomic_read(&fscache_n_object_created),
45525 - atomic_read(&fscache_n_object_lookups_timed_out));
45526 + atomic_read_unchecked(&fscache_n_object_lookups),
45527 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
45528 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
45529 + atomic_read_unchecked(&fscache_n_object_created),
45530 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
45531
45532 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
45533 - atomic_read(&fscache_n_updates),
45534 - atomic_read(&fscache_n_updates_null),
45535 - atomic_read(&fscache_n_updates_run));
45536 + atomic_read_unchecked(&fscache_n_updates),
45537 + atomic_read_unchecked(&fscache_n_updates_null),
45538 + atomic_read_unchecked(&fscache_n_updates_run));
45539
45540 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
45541 - atomic_read(&fscache_n_relinquishes),
45542 - atomic_read(&fscache_n_relinquishes_null),
45543 - atomic_read(&fscache_n_relinquishes_waitcrt),
45544 - atomic_read(&fscache_n_relinquishes_retire));
45545 + atomic_read_unchecked(&fscache_n_relinquishes),
45546 + atomic_read_unchecked(&fscache_n_relinquishes_null),
45547 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
45548 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
45549
45550 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
45551 - atomic_read(&fscache_n_attr_changed),
45552 - atomic_read(&fscache_n_attr_changed_ok),
45553 - atomic_read(&fscache_n_attr_changed_nobufs),
45554 - atomic_read(&fscache_n_attr_changed_nomem),
45555 - atomic_read(&fscache_n_attr_changed_calls));
45556 + atomic_read_unchecked(&fscache_n_attr_changed),
45557 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
45558 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
45559 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
45560 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
45561
45562 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
45563 - atomic_read(&fscache_n_allocs),
45564 - atomic_read(&fscache_n_allocs_ok),
45565 - atomic_read(&fscache_n_allocs_wait),
45566 - atomic_read(&fscache_n_allocs_nobufs),
45567 - atomic_read(&fscache_n_allocs_intr));
45568 + atomic_read_unchecked(&fscache_n_allocs),
45569 + atomic_read_unchecked(&fscache_n_allocs_ok),
45570 + atomic_read_unchecked(&fscache_n_allocs_wait),
45571 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
45572 + atomic_read_unchecked(&fscache_n_allocs_intr));
45573 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
45574 - atomic_read(&fscache_n_alloc_ops),
45575 - atomic_read(&fscache_n_alloc_op_waits),
45576 - atomic_read(&fscache_n_allocs_object_dead));
45577 + atomic_read_unchecked(&fscache_n_alloc_ops),
45578 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
45579 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
45580
45581 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
45582 " int=%u oom=%u\n",
45583 - atomic_read(&fscache_n_retrievals),
45584 - atomic_read(&fscache_n_retrievals_ok),
45585 - atomic_read(&fscache_n_retrievals_wait),
45586 - atomic_read(&fscache_n_retrievals_nodata),
45587 - atomic_read(&fscache_n_retrievals_nobufs),
45588 - atomic_read(&fscache_n_retrievals_intr),
45589 - atomic_read(&fscache_n_retrievals_nomem));
45590 + atomic_read_unchecked(&fscache_n_retrievals),
45591 + atomic_read_unchecked(&fscache_n_retrievals_ok),
45592 + atomic_read_unchecked(&fscache_n_retrievals_wait),
45593 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
45594 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
45595 + atomic_read_unchecked(&fscache_n_retrievals_intr),
45596 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
45597 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
45598 - atomic_read(&fscache_n_retrieval_ops),
45599 - atomic_read(&fscache_n_retrieval_op_waits),
45600 - atomic_read(&fscache_n_retrievals_object_dead));
45601 + atomic_read_unchecked(&fscache_n_retrieval_ops),
45602 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
45603 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
45604
45605 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
45606 - atomic_read(&fscache_n_stores),
45607 - atomic_read(&fscache_n_stores_ok),
45608 - atomic_read(&fscache_n_stores_again),
45609 - atomic_read(&fscache_n_stores_nobufs),
45610 - atomic_read(&fscache_n_stores_oom));
45611 + atomic_read_unchecked(&fscache_n_stores),
45612 + atomic_read_unchecked(&fscache_n_stores_ok),
45613 + atomic_read_unchecked(&fscache_n_stores_again),
45614 + atomic_read_unchecked(&fscache_n_stores_nobufs),
45615 + atomic_read_unchecked(&fscache_n_stores_oom));
45616 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
45617 - atomic_read(&fscache_n_store_ops),
45618 - atomic_read(&fscache_n_store_calls),
45619 - atomic_read(&fscache_n_store_pages),
45620 - atomic_read(&fscache_n_store_radix_deletes),
45621 - atomic_read(&fscache_n_store_pages_over_limit));
45622 + atomic_read_unchecked(&fscache_n_store_ops),
45623 + atomic_read_unchecked(&fscache_n_store_calls),
45624 + atomic_read_unchecked(&fscache_n_store_pages),
45625 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
45626 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
45627
45628 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
45629 - atomic_read(&fscache_n_store_vmscan_not_storing),
45630 - atomic_read(&fscache_n_store_vmscan_gone),
45631 - atomic_read(&fscache_n_store_vmscan_busy),
45632 - atomic_read(&fscache_n_store_vmscan_cancelled));
45633 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
45634 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
45635 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
45636 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
45637
45638 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
45639 - atomic_read(&fscache_n_op_pend),
45640 - atomic_read(&fscache_n_op_run),
45641 - atomic_read(&fscache_n_op_enqueue),
45642 - atomic_read(&fscache_n_op_cancelled),
45643 - atomic_read(&fscache_n_op_rejected));
45644 + atomic_read_unchecked(&fscache_n_op_pend),
45645 + atomic_read_unchecked(&fscache_n_op_run),
45646 + atomic_read_unchecked(&fscache_n_op_enqueue),
45647 + atomic_read_unchecked(&fscache_n_op_cancelled),
45648 + atomic_read_unchecked(&fscache_n_op_rejected));
45649 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
45650 - atomic_read(&fscache_n_op_deferred_release),
45651 - atomic_read(&fscache_n_op_release),
45652 - atomic_read(&fscache_n_op_gc));
45653 + atomic_read_unchecked(&fscache_n_op_deferred_release),
45654 + atomic_read_unchecked(&fscache_n_op_release),
45655 + atomic_read_unchecked(&fscache_n_op_gc));
45656
45657 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
45658 atomic_read(&fscache_n_cop_alloc_object),
45659 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
45660 index 3426521..3b75162 100644
45661 --- a/fs/fuse/cuse.c
45662 +++ b/fs/fuse/cuse.c
45663 @@ -587,10 +587,12 @@ static int __init cuse_init(void)
45664 INIT_LIST_HEAD(&cuse_conntbl[i]);
45665
45666 /* inherit and extend fuse_dev_operations */
45667 - cuse_channel_fops = fuse_dev_operations;
45668 - cuse_channel_fops.owner = THIS_MODULE;
45669 - cuse_channel_fops.open = cuse_channel_open;
45670 - cuse_channel_fops.release = cuse_channel_release;
45671 + pax_open_kernel();
45672 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
45673 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
45674 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
45675 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
45676 + pax_close_kernel();
45677
45678 cuse_class = class_create(THIS_MODULE, "cuse");
45679 if (IS_ERR(cuse_class))
45680 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
45681 index 2aaf3ea..8e50863 100644
45682 --- a/fs/fuse/dev.c
45683 +++ b/fs/fuse/dev.c
45684 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
45685 ret = 0;
45686 pipe_lock(pipe);
45687
45688 - if (!pipe->readers) {
45689 + if (!atomic_read(&pipe->readers)) {
45690 send_sig(SIGPIPE, current, 0);
45691 if (!ret)
45692 ret = -EPIPE;
45693 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
45694 index 9f63e49..d8a64c0 100644
45695 --- a/fs/fuse/dir.c
45696 +++ b/fs/fuse/dir.c
45697 @@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *dentry)
45698 return link;
45699 }
45700
45701 -static void free_link(char *link)
45702 +static void free_link(const char *link)
45703 {
45704 if (!IS_ERR(link))
45705 free_page((unsigned long) link);
45706 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
45707 index cfd4959..a780959 100644
45708 --- a/fs/gfs2/inode.c
45709 +++ b/fs/gfs2/inode.c
45710 @@ -1490,7 +1490,7 @@ out:
45711
45712 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
45713 {
45714 - char *s = nd_get_link(nd);
45715 + const char *s = nd_get_link(nd);
45716 if (!IS_ERR(s))
45717 kfree(s);
45718 }
45719 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
45720 index 0be5a78..9cfb853 100644
45721 --- a/fs/hugetlbfs/inode.c
45722 +++ b/fs/hugetlbfs/inode.c
45723 @@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs_fs_type = {
45724 .kill_sb = kill_litter_super,
45725 };
45726
45727 -static struct vfsmount *hugetlbfs_vfsmount;
45728 +struct vfsmount *hugetlbfs_vfsmount;
45729
45730 static int can_do_hugetlb_shm(void)
45731 {
45732 diff --git a/fs/inode.c b/fs/inode.c
45733 index ee4e66b..0451521 100644
45734 --- a/fs/inode.c
45735 +++ b/fs/inode.c
45736 @@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
45737
45738 #ifdef CONFIG_SMP
45739 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
45740 - static atomic_t shared_last_ino;
45741 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
45742 + static atomic_unchecked_t shared_last_ino;
45743 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
45744
45745 res = next - LAST_INO_BATCH;
45746 }
45747 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
45748 index e513f19..2ab1351 100644
45749 --- a/fs/jffs2/erase.c
45750 +++ b/fs/jffs2/erase.c
45751 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
45752 struct jffs2_unknown_node marker = {
45753 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
45754 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
45755 - .totlen = cpu_to_je32(c->cleanmarker_size)
45756 + .totlen = cpu_to_je32(c->cleanmarker_size),
45757 + .hdr_crc = cpu_to_je32(0)
45758 };
45759
45760 jffs2_prealloc_raw_node_refs(c, jeb, 1);
45761 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
45762 index b09e51d..e482afa 100644
45763 --- a/fs/jffs2/wbuf.c
45764 +++ b/fs/jffs2/wbuf.c
45765 @@ -1011,7 +1011,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
45766 {
45767 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
45768 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
45769 - .totlen = constant_cpu_to_je32(8)
45770 + .totlen = constant_cpu_to_je32(8),
45771 + .hdr_crc = constant_cpu_to_je32(0)
45772 };
45773
45774 /*
45775 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
45776 index a44eff0..462e07d 100644
45777 --- a/fs/jfs/super.c
45778 +++ b/fs/jfs/super.c
45779 @@ -802,7 +802,7 @@ static int __init init_jfs_fs(void)
45780
45781 jfs_inode_cachep =
45782 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
45783 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
45784 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
45785 init_once);
45786 if (jfs_inode_cachep == NULL)
45787 return -ENOMEM;
45788 diff --git a/fs/libfs.c b/fs/libfs.c
45789 index f6d411e..e82a08d 100644
45790 --- a/fs/libfs.c
45791 +++ b/fs/libfs.c
45792 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
45793
45794 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
45795 struct dentry *next;
45796 + char d_name[sizeof(next->d_iname)];
45797 + const unsigned char *name;
45798 +
45799 next = list_entry(p, struct dentry, d_u.d_child);
45800 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
45801 if (!simple_positive(next)) {
45802 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
45803
45804 spin_unlock(&next->d_lock);
45805 spin_unlock(&dentry->d_lock);
45806 - if (filldir(dirent, next->d_name.name,
45807 + name = next->d_name.name;
45808 + if (name == next->d_iname) {
45809 + memcpy(d_name, name, next->d_name.len);
45810 + name = d_name;
45811 + }
45812 + if (filldir(dirent, name,
45813 next->d_name.len, filp->f_pos,
45814 next->d_inode->i_ino,
45815 dt_type(next->d_inode)) < 0)
45816 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
45817 index 8392cb8..80d6193 100644
45818 --- a/fs/lockd/clntproc.c
45819 +++ b/fs/lockd/clntproc.c
45820 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
45821 /*
45822 * Cookie counter for NLM requests
45823 */
45824 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
45825 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
45826
45827 void nlmclnt_next_cookie(struct nlm_cookie *c)
45828 {
45829 - u32 cookie = atomic_inc_return(&nlm_cookie);
45830 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
45831
45832 memcpy(c->data, &cookie, 4);
45833 c->len=4;
45834 diff --git a/fs/locks.c b/fs/locks.c
45835 index 637694b..f84a121 100644
45836 --- a/fs/locks.c
45837 +++ b/fs/locks.c
45838 @@ -2074,16 +2074,16 @@ void locks_remove_flock(struct file *filp)
45839 return;
45840
45841 if (filp->f_op && filp->f_op->flock) {
45842 - struct file_lock fl = {
45843 + struct file_lock flock = {
45844 .fl_pid = current->tgid,
45845 .fl_file = filp,
45846 .fl_flags = FL_FLOCK,
45847 .fl_type = F_UNLCK,
45848 .fl_end = OFFSET_MAX,
45849 };
45850 - filp->f_op->flock(filp, F_SETLKW, &fl);
45851 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
45852 - fl.fl_ops->fl_release_private(&fl);
45853 + filp->f_op->flock(filp, F_SETLKW, &flock);
45854 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
45855 + flock.fl_ops->fl_release_private(&flock);
45856 }
45857
45858 lock_flocks();
45859 diff --git a/fs/namei.c b/fs/namei.c
45860 index 744e942..24ef47f 100644
45861 --- a/fs/namei.c
45862 +++ b/fs/namei.c
45863 @@ -279,16 +279,32 @@ int generic_permission(struct inode *inode, int mask)
45864 if (ret != -EACCES)
45865 return ret;
45866
45867 +#ifdef CONFIG_GRKERNSEC
45868 + /* we'll block if we have to log due to a denied capability use */
45869 + if (mask & MAY_NOT_BLOCK)
45870 + return -ECHILD;
45871 +#endif
45872 +
45873 if (S_ISDIR(inode->i_mode)) {
45874 /* DACs are overridable for directories */
45875 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
45876 - return 0;
45877 if (!(mask & MAY_WRITE))
45878 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
45879 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
45880 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
45881 return 0;
45882 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
45883 + return 0;
45884 return -EACCES;
45885 }
45886 /*
45887 + * Searching includes executable on directories, else just read.
45888 + */
45889 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
45890 + if (mask == MAY_READ)
45891 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
45892 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
45893 + return 0;
45894 +
45895 + /*
45896 * Read/write DACs are always overridable.
45897 * Executable DACs are overridable when there is
45898 * at least one exec bit set.
45899 @@ -297,14 +313,6 @@ int generic_permission(struct inode *inode, int mask)
45900 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
45901 return 0;
45902
45903 - /*
45904 - * Searching includes executable on directories, else just read.
45905 - */
45906 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
45907 - if (mask == MAY_READ)
45908 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
45909 - return 0;
45910 -
45911 return -EACCES;
45912 }
45913
45914 @@ -653,11 +661,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
45915 return error;
45916 }
45917
45918 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
45919 + dentry->d_inode, dentry, nd->path.mnt)) {
45920 + error = -EACCES;
45921 + *p = ERR_PTR(error); /* no ->put_link(), please */
45922 + path_put(&nd->path);
45923 + return error;
45924 + }
45925 +
45926 nd->last_type = LAST_BIND;
45927 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
45928 error = PTR_ERR(*p);
45929 if (!IS_ERR(*p)) {
45930 - char *s = nd_get_link(nd);
45931 + const char *s = nd_get_link(nd);
45932 error = 0;
45933 if (s)
45934 error = __vfs_follow_link(nd, s);
45935 @@ -1624,6 +1640,21 @@ static int path_lookupat(int dfd, const char *name,
45936 if (!err)
45937 err = complete_walk(nd);
45938
45939 + if (!(nd->flags & LOOKUP_PARENT)) {
45940 +#ifdef CONFIG_GRKERNSEC
45941 + if (flags & LOOKUP_RCU) {
45942 + if (!err)
45943 + path_put(&nd->path);
45944 + err = -ECHILD;
45945 + } else
45946 +#endif
45947 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
45948 + if (!err)
45949 + path_put(&nd->path);
45950 + err = -ENOENT;
45951 + }
45952 + }
45953 +
45954 if (!err && nd->flags & LOOKUP_DIRECTORY) {
45955 if (!nd->inode->i_op->lookup) {
45956 path_put(&nd->path);
45957 @@ -1651,6 +1682,15 @@ static int do_path_lookup(int dfd, const char *name,
45958 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
45959
45960 if (likely(!retval)) {
45961 + if (*name != '/' && nd->path.dentry && nd->inode) {
45962 +#ifdef CONFIG_GRKERNSEC
45963 + if (flags & LOOKUP_RCU)
45964 + return -ECHILD;
45965 +#endif
45966 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
45967 + return -ENOENT;
45968 + }
45969 +
45970 if (unlikely(!audit_dummy_context())) {
45971 if (nd->path.dentry && nd->inode)
45972 audit_inode(name, nd->path.dentry);
45973 @@ -2048,6 +2088,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
45974 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
45975 return -EPERM;
45976
45977 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
45978 + return -EPERM;
45979 + if (gr_handle_rawio(inode))
45980 + return -EPERM;
45981 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
45982 + return -EACCES;
45983 +
45984 return 0;
45985 }
45986
45987 @@ -2109,6 +2156,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
45988 error = complete_walk(nd);
45989 if (error)
45990 return ERR_PTR(error);
45991 +#ifdef CONFIG_GRKERNSEC
45992 + if (nd->flags & LOOKUP_RCU) {
45993 + error = -ECHILD;
45994 + goto exit;
45995 + }
45996 +#endif
45997 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
45998 + error = -ENOENT;
45999 + goto exit;
46000 + }
46001 audit_inode(pathname, nd->path.dentry);
46002 if (open_flag & O_CREAT) {
46003 error = -EISDIR;
46004 @@ -2119,6 +2176,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46005 error = complete_walk(nd);
46006 if (error)
46007 return ERR_PTR(error);
46008 +#ifdef CONFIG_GRKERNSEC
46009 + if (nd->flags & LOOKUP_RCU) {
46010 + error = -ECHILD;
46011 + goto exit;
46012 + }
46013 +#endif
46014 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
46015 + error = -ENOENT;
46016 + goto exit;
46017 + }
46018 audit_inode(pathname, dir);
46019 goto ok;
46020 }
46021 @@ -2140,6 +2207,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46022 error = complete_walk(nd);
46023 if (error)
46024 return ERR_PTR(-ECHILD);
46025 +#ifdef CONFIG_GRKERNSEC
46026 + if (nd->flags & LOOKUP_RCU) {
46027 + error = -ECHILD;
46028 + goto exit;
46029 + }
46030 +#endif
46031 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46032 + error = -ENOENT;
46033 + goto exit;
46034 + }
46035
46036 error = -ENOTDIR;
46037 if (nd->flags & LOOKUP_DIRECTORY) {
46038 @@ -2180,6 +2257,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46039 /* Negative dentry, just create the file */
46040 if (!dentry->d_inode) {
46041 int mode = op->mode;
46042 +
46043 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
46044 + error = -EACCES;
46045 + goto exit_mutex_unlock;
46046 + }
46047 +
46048 if (!IS_POSIXACL(dir->d_inode))
46049 mode &= ~current_umask();
46050 /*
46051 @@ -2203,6 +2286,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46052 error = vfs_create(dir->d_inode, dentry, mode, nd);
46053 if (error)
46054 goto exit_mutex_unlock;
46055 + else
46056 + gr_handle_create(path->dentry, path->mnt);
46057 mutex_unlock(&dir->d_inode->i_mutex);
46058 dput(nd->path.dentry);
46059 nd->path.dentry = dentry;
46060 @@ -2212,6 +2297,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46061 /*
46062 * It already exists.
46063 */
46064 +
46065 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
46066 + error = -ENOENT;
46067 + goto exit_mutex_unlock;
46068 + }
46069 +
46070 + /* only check if O_CREAT is specified, all other checks need to go
46071 + into may_open */
46072 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
46073 + error = -EACCES;
46074 + goto exit_mutex_unlock;
46075 + }
46076 +
46077 mutex_unlock(&dir->d_inode->i_mutex);
46078 audit_inode(pathname, path->dentry);
46079
46080 @@ -2424,6 +2522,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
46081 *path = nd.path;
46082 return dentry;
46083 eexist:
46084 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
46085 + dput(dentry);
46086 + dentry = ERR_PTR(-ENOENT);
46087 + goto fail;
46088 + }
46089 dput(dentry);
46090 dentry = ERR_PTR(-EEXIST);
46091 fail:
46092 @@ -2446,6 +2549,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
46093 }
46094 EXPORT_SYMBOL(user_path_create);
46095
46096 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
46097 +{
46098 + char *tmp = getname(pathname);
46099 + struct dentry *res;
46100 + if (IS_ERR(tmp))
46101 + return ERR_CAST(tmp);
46102 + res = kern_path_create(dfd, tmp, path, is_dir);
46103 + if (IS_ERR(res))
46104 + putname(tmp);
46105 + else
46106 + *to = tmp;
46107 + return res;
46108 +}
46109 +
46110 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
46111 {
46112 int error = may_create(dir, dentry);
46113 @@ -2513,6 +2630,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
46114 error = mnt_want_write(path.mnt);
46115 if (error)
46116 goto out_dput;
46117 +
46118 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
46119 + error = -EPERM;
46120 + goto out_drop_write;
46121 + }
46122 +
46123 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
46124 + error = -EACCES;
46125 + goto out_drop_write;
46126 + }
46127 +
46128 error = security_path_mknod(&path, dentry, mode, dev);
46129 if (error)
46130 goto out_drop_write;
46131 @@ -2530,6 +2658,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
46132 }
46133 out_drop_write:
46134 mnt_drop_write(path.mnt);
46135 +
46136 + if (!error)
46137 + gr_handle_create(dentry, path.mnt);
46138 out_dput:
46139 dput(dentry);
46140 mutex_unlock(&path.dentry->d_inode->i_mutex);
46141 @@ -2579,12 +2710,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
46142 error = mnt_want_write(path.mnt);
46143 if (error)
46144 goto out_dput;
46145 +
46146 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
46147 + error = -EACCES;
46148 + goto out_drop_write;
46149 + }
46150 +
46151 error = security_path_mkdir(&path, dentry, mode);
46152 if (error)
46153 goto out_drop_write;
46154 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
46155 out_drop_write:
46156 mnt_drop_write(path.mnt);
46157 +
46158 + if (!error)
46159 + gr_handle_create(dentry, path.mnt);
46160 out_dput:
46161 dput(dentry);
46162 mutex_unlock(&path.dentry->d_inode->i_mutex);
46163 @@ -2664,6 +2804,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46164 char * name;
46165 struct dentry *dentry;
46166 struct nameidata nd;
46167 + ino_t saved_ino = 0;
46168 + dev_t saved_dev = 0;
46169
46170 error = user_path_parent(dfd, pathname, &nd, &name);
46171 if (error)
46172 @@ -2692,6 +2834,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
46173 error = -ENOENT;
46174 goto exit3;
46175 }
46176 +
46177 + saved_ino = dentry->d_inode->i_ino;
46178 + saved_dev = gr_get_dev_from_dentry(dentry);
46179 +
46180 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
46181 + error = -EACCES;
46182 + goto exit3;
46183 + }
46184 +
46185 error = mnt_want_write(nd.path.mnt);
46186 if (error)
46187 goto exit3;
46188 @@ -2699,6 +2850,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46189 if (error)
46190 goto exit4;
46191 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
46192 + if (!error && (saved_dev || saved_ino))
46193 + gr_handle_delete(saved_ino, saved_dev);
46194 exit4:
46195 mnt_drop_write(nd.path.mnt);
46196 exit3:
46197 @@ -2761,6 +2914,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46198 struct dentry *dentry;
46199 struct nameidata nd;
46200 struct inode *inode = NULL;
46201 + ino_t saved_ino = 0;
46202 + dev_t saved_dev = 0;
46203
46204 error = user_path_parent(dfd, pathname, &nd, &name);
46205 if (error)
46206 @@ -2783,6 +2938,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46207 if (!inode)
46208 goto slashes;
46209 ihold(inode);
46210 +
46211 + if (inode->i_nlink <= 1) {
46212 + saved_ino = inode->i_ino;
46213 + saved_dev = gr_get_dev_from_dentry(dentry);
46214 + }
46215 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
46216 + error = -EACCES;
46217 + goto exit2;
46218 + }
46219 +
46220 error = mnt_want_write(nd.path.mnt);
46221 if (error)
46222 goto exit2;
46223 @@ -2790,6 +2955,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46224 if (error)
46225 goto exit3;
46226 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
46227 + if (!error && (saved_ino || saved_dev))
46228 + gr_handle_delete(saved_ino, saved_dev);
46229 exit3:
46230 mnt_drop_write(nd.path.mnt);
46231 exit2:
46232 @@ -2865,10 +3032,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
46233 error = mnt_want_write(path.mnt);
46234 if (error)
46235 goto out_dput;
46236 +
46237 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
46238 + error = -EACCES;
46239 + goto out_drop_write;
46240 + }
46241 +
46242 error = security_path_symlink(&path, dentry, from);
46243 if (error)
46244 goto out_drop_write;
46245 error = vfs_symlink(path.dentry->d_inode, dentry, from);
46246 + if (!error)
46247 + gr_handle_create(dentry, path.mnt);
46248 out_drop_write:
46249 mnt_drop_write(path.mnt);
46250 out_dput:
46251 @@ -2940,6 +3115,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46252 {
46253 struct dentry *new_dentry;
46254 struct path old_path, new_path;
46255 + char *to = NULL;
46256 int how = 0;
46257 int error;
46258
46259 @@ -2963,7 +3139,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46260 if (error)
46261 return error;
46262
46263 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
46264 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
46265 error = PTR_ERR(new_dentry);
46266 if (IS_ERR(new_dentry))
46267 goto out;
46268 @@ -2974,13 +3150,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46269 error = mnt_want_write(new_path.mnt);
46270 if (error)
46271 goto out_dput;
46272 +
46273 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
46274 + old_path.dentry->d_inode,
46275 + old_path.dentry->d_inode->i_mode, to)) {
46276 + error = -EACCES;
46277 + goto out_drop_write;
46278 + }
46279 +
46280 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
46281 + old_path.dentry, old_path.mnt, to)) {
46282 + error = -EACCES;
46283 + goto out_drop_write;
46284 + }
46285 +
46286 error = security_path_link(old_path.dentry, &new_path, new_dentry);
46287 if (error)
46288 goto out_drop_write;
46289 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
46290 + if (!error)
46291 + gr_handle_create(new_dentry, new_path.mnt);
46292 out_drop_write:
46293 mnt_drop_write(new_path.mnt);
46294 out_dput:
46295 + putname(to);
46296 dput(new_dentry);
46297 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
46298 path_put(&new_path);
46299 @@ -3208,6 +3401,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46300 if (new_dentry == trap)
46301 goto exit5;
46302
46303 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
46304 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
46305 + to);
46306 + if (error)
46307 + goto exit5;
46308 +
46309 error = mnt_want_write(oldnd.path.mnt);
46310 if (error)
46311 goto exit5;
46312 @@ -3217,6 +3416,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46313 goto exit6;
46314 error = vfs_rename(old_dir->d_inode, old_dentry,
46315 new_dir->d_inode, new_dentry);
46316 + if (!error)
46317 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
46318 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
46319 exit6:
46320 mnt_drop_write(oldnd.path.mnt);
46321 exit5:
46322 @@ -3242,6 +3444,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
46323
46324 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
46325 {
46326 + char tmpbuf[64];
46327 + const char *newlink;
46328 int len;
46329
46330 len = PTR_ERR(link);
46331 @@ -3251,7 +3455,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
46332 len = strlen(link);
46333 if (len > (unsigned) buflen)
46334 len = buflen;
46335 - if (copy_to_user(buffer, link, len))
46336 +
46337 + if (len < sizeof(tmpbuf)) {
46338 + memcpy(tmpbuf, link, len);
46339 + newlink = tmpbuf;
46340 + } else
46341 + newlink = link;
46342 +
46343 + if (copy_to_user(buffer, newlink, len))
46344 len = -EFAULT;
46345 out:
46346 return len;
46347 diff --git a/fs/namespace.c b/fs/namespace.c
46348 index cfc6d44..b4632a5 100644
46349 --- a/fs/namespace.c
46350 +++ b/fs/namespace.c
46351 @@ -1326,6 +1326,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
46352 if (!(sb->s_flags & MS_RDONLY))
46353 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
46354 up_write(&sb->s_umount);
46355 +
46356 + gr_log_remount(mnt->mnt_devname, retval);
46357 +
46358 return retval;
46359 }
46360
46361 @@ -1345,6 +1348,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
46362 br_write_unlock(vfsmount_lock);
46363 up_write(&namespace_sem);
46364 release_mounts(&umount_list);
46365 +
46366 + gr_log_unmount(mnt->mnt_devname, retval);
46367 +
46368 return retval;
46369 }
46370
46371 @@ -2336,6 +2342,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
46372 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
46373 MS_STRICTATIME);
46374
46375 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
46376 + retval = -EPERM;
46377 + goto dput_out;
46378 + }
46379 +
46380 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
46381 + retval = -EPERM;
46382 + goto dput_out;
46383 + }
46384 +
46385 if (flags & MS_REMOUNT)
46386 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
46387 data_page);
46388 @@ -2350,6 +2366,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
46389 dev_name, data_page);
46390 dput_out:
46391 path_put(&path);
46392 +
46393 + gr_log_mount(dev_name, dir_name, retval);
46394 +
46395 return retval;
46396 }
46397
46398 @@ -2605,6 +2624,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
46399 if (error)
46400 goto out2;
46401
46402 + if (gr_handle_chroot_pivot()) {
46403 + error = -EPERM;
46404 + goto out2;
46405 + }
46406 +
46407 get_fs_root(current->fs, &root);
46408 error = lock_mount(&old);
46409 if (error)
46410 diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
46411 index 3db6b82..a57597e 100644
46412 --- a/fs/nfs/blocklayout/blocklayout.c
46413 +++ b/fs/nfs/blocklayout/blocklayout.c
46414 @@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block_extent *be, sector_t isect)
46415 */
46416 struct parallel_io {
46417 struct kref refcnt;
46418 - struct rpc_call_ops call_ops;
46419 + rpc_call_ops_no_const call_ops;
46420 void (*pnfs_callback) (void *data);
46421 void *data;
46422 };
46423 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
46424 index 50a15fa..ca113f9 100644
46425 --- a/fs/nfs/inode.c
46426 +++ b/fs/nfs/inode.c
46427 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
46428 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
46429 nfsi->attrtimeo_timestamp = jiffies;
46430
46431 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
46432 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
46433 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
46434 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
46435 else
46436 @@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
46437 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
46438 }
46439
46440 -static atomic_long_t nfs_attr_generation_counter;
46441 +static atomic_long_unchecked_t nfs_attr_generation_counter;
46442
46443 static unsigned long nfs_read_attr_generation_counter(void)
46444 {
46445 - return atomic_long_read(&nfs_attr_generation_counter);
46446 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
46447 }
46448
46449 unsigned long nfs_inc_attr_generation_counter(void)
46450 {
46451 - return atomic_long_inc_return(&nfs_attr_generation_counter);
46452 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
46453 }
46454
46455 void nfs_fattr_init(struct nfs_fattr *fattr)
46456 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
46457 index 7a2e442..8e544cc 100644
46458 --- a/fs/nfsd/vfs.c
46459 +++ b/fs/nfsd/vfs.c
46460 @@ -914,7 +914,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
46461 } else {
46462 oldfs = get_fs();
46463 set_fs(KERNEL_DS);
46464 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
46465 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
46466 set_fs(oldfs);
46467 }
46468
46469 @@ -1018,7 +1018,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
46470
46471 /* Write the data. */
46472 oldfs = get_fs(); set_fs(KERNEL_DS);
46473 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
46474 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
46475 set_fs(oldfs);
46476 if (host_err < 0)
46477 goto out_nfserr;
46478 @@ -1553,7 +1553,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
46479 */
46480
46481 oldfs = get_fs(); set_fs(KERNEL_DS);
46482 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
46483 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
46484 set_fs(oldfs);
46485
46486 if (host_err < 0)
46487 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
46488 index 9fde1c0..14e8827 100644
46489 --- a/fs/notify/fanotify/fanotify_user.c
46490 +++ b/fs/notify/fanotify/fanotify_user.c
46491 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
46492 goto out_close_fd;
46493
46494 ret = -EFAULT;
46495 - if (copy_to_user(buf, &fanotify_event_metadata,
46496 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
46497 + copy_to_user(buf, &fanotify_event_metadata,
46498 fanotify_event_metadata.event_len))
46499 goto out_kill_access_response;
46500
46501 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
46502 index ee18815..7aa5d01 100644
46503 --- a/fs/notify/notification.c
46504 +++ b/fs/notify/notification.c
46505 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
46506 * get set to 0 so it will never get 'freed'
46507 */
46508 static struct fsnotify_event *q_overflow_event;
46509 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46510 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46511
46512 /**
46513 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
46514 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
46515 */
46516 u32 fsnotify_get_cookie(void)
46517 {
46518 - return atomic_inc_return(&fsnotify_sync_cookie);
46519 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
46520 }
46521 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
46522
46523 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
46524 index 99e3610..02c1068 100644
46525 --- a/fs/ntfs/dir.c
46526 +++ b/fs/ntfs/dir.c
46527 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
46528 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
46529 ~(s64)(ndir->itype.index.block_size - 1)));
46530 /* Bounds checks. */
46531 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
46532 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
46533 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
46534 "inode 0x%lx or driver bug.", vdir->i_ino);
46535 goto err_out;
46536 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
46537 index c587e2d..3641eaa 100644
46538 --- a/fs/ntfs/file.c
46539 +++ b/fs/ntfs/file.c
46540 @@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
46541 #endif /* NTFS_RW */
46542 };
46543
46544 -const struct file_operations ntfs_empty_file_ops = {};
46545 +const struct file_operations ntfs_empty_file_ops __read_only;
46546
46547 -const struct inode_operations ntfs_empty_inode_ops = {};
46548 +const struct inode_operations ntfs_empty_inode_ops __read_only;
46549 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
46550 index 210c352..a174f83 100644
46551 --- a/fs/ocfs2/localalloc.c
46552 +++ b/fs/ocfs2/localalloc.c
46553 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
46554 goto bail;
46555 }
46556
46557 - atomic_inc(&osb->alloc_stats.moves);
46558 + atomic_inc_unchecked(&osb->alloc_stats.moves);
46559
46560 bail:
46561 if (handle)
46562 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
46563 index d355e6e..578d905 100644
46564 --- a/fs/ocfs2/ocfs2.h
46565 +++ b/fs/ocfs2/ocfs2.h
46566 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
46567
46568 struct ocfs2_alloc_stats
46569 {
46570 - atomic_t moves;
46571 - atomic_t local_data;
46572 - atomic_t bitmap_data;
46573 - atomic_t bg_allocs;
46574 - atomic_t bg_extends;
46575 + atomic_unchecked_t moves;
46576 + atomic_unchecked_t local_data;
46577 + atomic_unchecked_t bitmap_data;
46578 + atomic_unchecked_t bg_allocs;
46579 + atomic_unchecked_t bg_extends;
46580 };
46581
46582 enum ocfs2_local_alloc_state
46583 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
46584 index ba5d97e..c77db25 100644
46585 --- a/fs/ocfs2/suballoc.c
46586 +++ b/fs/ocfs2/suballoc.c
46587 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
46588 mlog_errno(status);
46589 goto bail;
46590 }
46591 - atomic_inc(&osb->alloc_stats.bg_extends);
46592 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
46593
46594 /* You should never ask for this much metadata */
46595 BUG_ON(bits_wanted >
46596 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
46597 mlog_errno(status);
46598 goto bail;
46599 }
46600 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46601 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46602
46603 *suballoc_loc = res.sr_bg_blkno;
46604 *suballoc_bit_start = res.sr_bit_offset;
46605 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
46606 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
46607 res->sr_bits);
46608
46609 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46610 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46611
46612 BUG_ON(res->sr_bits != 1);
46613
46614 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
46615 mlog_errno(status);
46616 goto bail;
46617 }
46618 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46619 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
46620
46621 BUG_ON(res.sr_bits != 1);
46622
46623 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
46624 cluster_start,
46625 num_clusters);
46626 if (!status)
46627 - atomic_inc(&osb->alloc_stats.local_data);
46628 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
46629 } else {
46630 if (min_clusters > (osb->bitmap_cpg - 1)) {
46631 /* The only paths asking for contiguousness
46632 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
46633 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
46634 res.sr_bg_blkno,
46635 res.sr_bit_offset);
46636 - atomic_inc(&osb->alloc_stats.bitmap_data);
46637 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
46638 *num_clusters = res.sr_bits;
46639 }
46640 }
46641 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
46642 index 4994f8b..eaab8eb 100644
46643 --- a/fs/ocfs2/super.c
46644 +++ b/fs/ocfs2/super.c
46645 @@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
46646 "%10s => GlobalAllocs: %d LocalAllocs: %d "
46647 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
46648 "Stats",
46649 - atomic_read(&osb->alloc_stats.bitmap_data),
46650 - atomic_read(&osb->alloc_stats.local_data),
46651 - atomic_read(&osb->alloc_stats.bg_allocs),
46652 - atomic_read(&osb->alloc_stats.moves),
46653 - atomic_read(&osb->alloc_stats.bg_extends));
46654 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
46655 + atomic_read_unchecked(&osb->alloc_stats.local_data),
46656 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
46657 + atomic_read_unchecked(&osb->alloc_stats.moves),
46658 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
46659
46660 out += snprintf(buf + out, len - out,
46661 "%10s => State: %u Descriptor: %llu Size: %u bits "
46662 @@ -2119,11 +2119,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
46663 spin_lock_init(&osb->osb_xattr_lock);
46664 ocfs2_init_steal_slots(osb);
46665
46666 - atomic_set(&osb->alloc_stats.moves, 0);
46667 - atomic_set(&osb->alloc_stats.local_data, 0);
46668 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
46669 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
46670 - atomic_set(&osb->alloc_stats.bg_extends, 0);
46671 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
46672 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
46673 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
46674 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
46675 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
46676
46677 /* Copy the blockcheck stats from the superblock probe */
46678 osb->osb_ecc_stats = *stats;
46679 diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
46680 index 5d22872..523db20 100644
46681 --- a/fs/ocfs2/symlink.c
46682 +++ b/fs/ocfs2/symlink.c
46683 @@ -142,7 +142,7 @@ bail:
46684
46685 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46686 {
46687 - char *link = nd_get_link(nd);
46688 + const char *link = nd_get_link(nd);
46689 if (!IS_ERR(link))
46690 kfree(link);
46691 }
46692 diff --git a/fs/open.c b/fs/open.c
46693 index 22c41b5..78894cf 100644
46694 --- a/fs/open.c
46695 +++ b/fs/open.c
46696 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
46697 error = locks_verify_truncate(inode, NULL, length);
46698 if (!error)
46699 error = security_path_truncate(&path);
46700 +
46701 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
46702 + error = -EACCES;
46703 +
46704 if (!error)
46705 error = do_truncate(path.dentry, length, 0, NULL);
46706
46707 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
46708 if (__mnt_is_readonly(path.mnt))
46709 res = -EROFS;
46710
46711 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
46712 + res = -EACCES;
46713 +
46714 out_path_release:
46715 path_put(&path);
46716 out:
46717 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
46718 if (error)
46719 goto dput_and_out;
46720
46721 + gr_log_chdir(path.dentry, path.mnt);
46722 +
46723 set_fs_pwd(current->fs, &path);
46724
46725 dput_and_out:
46726 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
46727 goto out_putf;
46728
46729 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
46730 +
46731 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
46732 + error = -EPERM;
46733 +
46734 + if (!error)
46735 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
46736 +
46737 if (!error)
46738 set_fs_pwd(current->fs, &file->f_path);
46739 out_putf:
46740 @@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
46741 if (error)
46742 goto dput_and_out;
46743
46744 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
46745 + goto dput_and_out;
46746 +
46747 set_fs_root(current->fs, &path);
46748 +
46749 + gr_handle_chroot_chdir(&path);
46750 +
46751 error = 0;
46752 dput_and_out:
46753 path_put(&path);
46754 @@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
46755 if (error)
46756 return error;
46757 mutex_lock(&inode->i_mutex);
46758 +
46759 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
46760 + error = -EACCES;
46761 + goto out_unlock;
46762 + }
46763 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
46764 + error = -EACCES;
46765 + goto out_unlock;
46766 + }
46767 +
46768 error = security_path_chmod(path->dentry, path->mnt, mode);
46769 if (error)
46770 goto out_unlock;
46771 @@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
46772 int error;
46773 struct iattr newattrs;
46774
46775 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
46776 + return -EACCES;
46777 +
46778 newattrs.ia_valid = ATTR_CTIME;
46779 if (user != (uid_t) -1) {
46780 newattrs.ia_valid |= ATTR_UID;
46781 diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
46782 index 6296b40..417c00f 100644
46783 --- a/fs/partitions/efi.c
46784 +++ b/fs/partitions/efi.c
46785 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
46786 if (!gpt)
46787 return NULL;
46788
46789 + if (!le32_to_cpu(gpt->num_partition_entries))
46790 + return NULL;
46791 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
46792 + if (!pte)
46793 + return NULL;
46794 +
46795 count = le32_to_cpu(gpt->num_partition_entries) *
46796 le32_to_cpu(gpt->sizeof_partition_entry);
46797 - if (!count)
46798 - return NULL;
46799 - pte = kzalloc(count, GFP_KERNEL);
46800 - if (!pte)
46801 - return NULL;
46802 -
46803 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
46804 (u8 *) pte,
46805 count) < count) {
46806 diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
46807 index bd8ae78..539d250 100644
46808 --- a/fs/partitions/ldm.c
46809 +++ b/fs/partitions/ldm.c
46810 @@ -1324,7 +1324,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
46811 goto found;
46812 }
46813
46814 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
46815 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
46816 if (!f) {
46817 ldm_crit ("Out of memory.");
46818 return false;
46819 diff --git a/fs/pipe.c b/fs/pipe.c
46820 index 4065f07..68c0706 100644
46821 --- a/fs/pipe.c
46822 +++ b/fs/pipe.c
46823 @@ -420,9 +420,9 @@ redo:
46824 }
46825 if (bufs) /* More to do? */
46826 continue;
46827 - if (!pipe->writers)
46828 + if (!atomic_read(&pipe->writers))
46829 break;
46830 - if (!pipe->waiting_writers) {
46831 + if (!atomic_read(&pipe->waiting_writers)) {
46832 /* syscall merging: Usually we must not sleep
46833 * if O_NONBLOCK is set, or if we got some data.
46834 * But if a writer sleeps in kernel space, then
46835 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
46836 mutex_lock(&inode->i_mutex);
46837 pipe = inode->i_pipe;
46838
46839 - if (!pipe->readers) {
46840 + if (!atomic_read(&pipe->readers)) {
46841 send_sig(SIGPIPE, current, 0);
46842 ret = -EPIPE;
46843 goto out;
46844 @@ -530,7 +530,7 @@ redo1:
46845 for (;;) {
46846 int bufs;
46847
46848 - if (!pipe->readers) {
46849 + if (!atomic_read(&pipe->readers)) {
46850 send_sig(SIGPIPE, current, 0);
46851 if (!ret)
46852 ret = -EPIPE;
46853 @@ -616,9 +616,9 @@ redo2:
46854 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
46855 do_wakeup = 0;
46856 }
46857 - pipe->waiting_writers++;
46858 + atomic_inc(&pipe->waiting_writers);
46859 pipe_wait(pipe);
46860 - pipe->waiting_writers--;
46861 + atomic_dec(&pipe->waiting_writers);
46862 }
46863 out:
46864 mutex_unlock(&inode->i_mutex);
46865 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
46866 mask = 0;
46867 if (filp->f_mode & FMODE_READ) {
46868 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
46869 - if (!pipe->writers && filp->f_version != pipe->w_counter)
46870 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
46871 mask |= POLLHUP;
46872 }
46873
46874 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
46875 * Most Unices do not set POLLERR for FIFOs but on Linux they
46876 * behave exactly like pipes for poll().
46877 */
46878 - if (!pipe->readers)
46879 + if (!atomic_read(&pipe->readers))
46880 mask |= POLLERR;
46881 }
46882
46883 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
46884
46885 mutex_lock(&inode->i_mutex);
46886 pipe = inode->i_pipe;
46887 - pipe->readers -= decr;
46888 - pipe->writers -= decw;
46889 + atomic_sub(decr, &pipe->readers);
46890 + atomic_sub(decw, &pipe->writers);
46891
46892 - if (!pipe->readers && !pipe->writers) {
46893 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
46894 free_pipe_info(inode);
46895 } else {
46896 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
46897 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
46898
46899 if (inode->i_pipe) {
46900 ret = 0;
46901 - inode->i_pipe->readers++;
46902 + atomic_inc(&inode->i_pipe->readers);
46903 }
46904
46905 mutex_unlock(&inode->i_mutex);
46906 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
46907
46908 if (inode->i_pipe) {
46909 ret = 0;
46910 - inode->i_pipe->writers++;
46911 + atomic_inc(&inode->i_pipe->writers);
46912 }
46913
46914 mutex_unlock(&inode->i_mutex);
46915 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
46916 if (inode->i_pipe) {
46917 ret = 0;
46918 if (filp->f_mode & FMODE_READ)
46919 - inode->i_pipe->readers++;
46920 + atomic_inc(&inode->i_pipe->readers);
46921 if (filp->f_mode & FMODE_WRITE)
46922 - inode->i_pipe->writers++;
46923 + atomic_inc(&inode->i_pipe->writers);
46924 }
46925
46926 mutex_unlock(&inode->i_mutex);
46927 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
46928 inode->i_pipe = NULL;
46929 }
46930
46931 -static struct vfsmount *pipe_mnt __read_mostly;
46932 +struct vfsmount *pipe_mnt __read_mostly;
46933
46934 /*
46935 * pipefs_dname() is called from d_path().
46936 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
46937 goto fail_iput;
46938 inode->i_pipe = pipe;
46939
46940 - pipe->readers = pipe->writers = 1;
46941 + atomic_set(&pipe->readers, 1);
46942 + atomic_set(&pipe->writers, 1);
46943 inode->i_fop = &rdwr_pipefifo_fops;
46944
46945 /*
46946 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
46947 index 15af622..0e9f4467 100644
46948 --- a/fs/proc/Kconfig
46949 +++ b/fs/proc/Kconfig
46950 @@ -30,12 +30,12 @@ config PROC_FS
46951
46952 config PROC_KCORE
46953 bool "/proc/kcore support" if !ARM
46954 - depends on PROC_FS && MMU
46955 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
46956
46957 config PROC_VMCORE
46958 bool "/proc/vmcore support"
46959 - depends on PROC_FS && CRASH_DUMP
46960 - default y
46961 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
46962 + default n
46963 help
46964 Exports the dump image of crashed kernel in ELF format.
46965
46966 @@ -59,8 +59,8 @@ config PROC_SYSCTL
46967 limited in memory.
46968
46969 config PROC_PAGE_MONITOR
46970 - default y
46971 - depends on PROC_FS && MMU
46972 + default n
46973 + depends on PROC_FS && MMU && !GRKERNSEC
46974 bool "Enable /proc page monitoring" if EXPERT
46975 help
46976 Various /proc files exist to monitor process memory utilization:
46977 diff --git a/fs/proc/array.c b/fs/proc/array.c
46978 index 3a1dafd..bf1bd84 100644
46979 --- a/fs/proc/array.c
46980 +++ b/fs/proc/array.c
46981 @@ -60,6 +60,7 @@
46982 #include <linux/tty.h>
46983 #include <linux/string.h>
46984 #include <linux/mman.h>
46985 +#include <linux/grsecurity.h>
46986 #include <linux/proc_fs.h>
46987 #include <linux/ioport.h>
46988 #include <linux/uaccess.h>
46989 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
46990 seq_putc(m, '\n');
46991 }
46992
46993 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46994 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
46995 +{
46996 + if (p->mm)
46997 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
46998 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
46999 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
47000 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
47001 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
47002 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
47003 + else
47004 + seq_printf(m, "PaX:\t-----\n");
47005 +}
47006 +#endif
47007 +
47008 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47009 struct pid *pid, struct task_struct *task)
47010 {
47011 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47012 task_cpus_allowed(m, task);
47013 cpuset_task_status_allowed(m, task);
47014 task_context_switch_counts(m, task);
47015 +
47016 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47017 + task_pax(m, task);
47018 +#endif
47019 +
47020 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
47021 + task_grsec_rbac(m, task);
47022 +#endif
47023 +
47024 return 0;
47025 }
47026
47027 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47028 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47029 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47030 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47031 +#endif
47032 +
47033 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47034 struct pid *pid, struct task_struct *task, int whole)
47035 {
47036 @@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47037 char tcomm[sizeof(task->comm)];
47038 unsigned long flags;
47039
47040 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47041 + if (current->exec_id != m->exec_id) {
47042 + gr_log_badprocpid("stat");
47043 + return 0;
47044 + }
47045 +#endif
47046 +
47047 state = *get_task_state(task);
47048 vsize = eip = esp = 0;
47049 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
47050 @@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47051 gtime = task->gtime;
47052 }
47053
47054 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47055 + if (PAX_RAND_FLAGS(mm)) {
47056 + eip = 0;
47057 + esp = 0;
47058 + wchan = 0;
47059 + }
47060 +#endif
47061 +#ifdef CONFIG_GRKERNSEC_HIDESYM
47062 + wchan = 0;
47063 + eip =0;
47064 + esp =0;
47065 +#endif
47066 +
47067 /* scale priority and nice values from timeslices to -20..20 */
47068 /* to make it look like a "normal" Unix priority/nice value */
47069 priority = task_prio(task);
47070 @@ -489,9 +540,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47071 vsize,
47072 mm ? get_mm_rss(mm) : 0,
47073 rsslim,
47074 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47075 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
47076 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
47077 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
47078 +#else
47079 mm ? (permitted ? mm->start_code : 1) : 0,
47080 mm ? (permitted ? mm->end_code : 1) : 0,
47081 (permitted && mm) ? mm->start_stack : 0,
47082 +#endif
47083 esp,
47084 eip,
47085 /* The signal information here is obsolete.
47086 @@ -533,8 +590,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47087 struct pid *pid, struct task_struct *task)
47088 {
47089 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
47090 - struct mm_struct *mm = get_task_mm(task);
47091 + struct mm_struct *mm;
47092
47093 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47094 + if (current->exec_id != m->exec_id) {
47095 + gr_log_badprocpid("statm");
47096 + return 0;
47097 + }
47098 +#endif
47099 + mm = get_task_mm(task);
47100 if (mm) {
47101 size = task_statm(mm, &shared, &text, &data, &resident);
47102 mmput(mm);
47103 @@ -544,3 +608,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47104
47105 return 0;
47106 }
47107 +
47108 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47109 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
47110 +{
47111 + u32 curr_ip = 0;
47112 + unsigned long flags;
47113 +
47114 + if (lock_task_sighand(task, &flags)) {
47115 + curr_ip = task->signal->curr_ip;
47116 + unlock_task_sighand(task, &flags);
47117 + }
47118 +
47119 + return sprintf(buffer, "%pI4\n", &curr_ip);
47120 +}
47121 +#endif
47122 diff --git a/fs/proc/base.c b/fs/proc/base.c
47123 index 1ace83d..f5e575d 100644
47124 --- a/fs/proc/base.c
47125 +++ b/fs/proc/base.c
47126 @@ -107,6 +107,22 @@ struct pid_entry {
47127 union proc_op op;
47128 };
47129
47130 +struct getdents_callback {
47131 + struct linux_dirent __user * current_dir;
47132 + struct linux_dirent __user * previous;
47133 + struct file * file;
47134 + int count;
47135 + int error;
47136 +};
47137 +
47138 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
47139 + loff_t offset, u64 ino, unsigned int d_type)
47140 +{
47141 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
47142 + buf->error = -EINVAL;
47143 + return 0;
47144 +}
47145 +
47146 #define NOD(NAME, MODE, IOP, FOP, OP) { \
47147 .name = (NAME), \
47148 .len = sizeof(NAME) - 1, \
47149 @@ -194,26 +210,6 @@ static int proc_root_link(struct inode *inode, struct path *path)
47150 return result;
47151 }
47152
47153 -static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
47154 -{
47155 - struct mm_struct *mm;
47156 - int err;
47157 -
47158 - err = mutex_lock_killable(&task->signal->cred_guard_mutex);
47159 - if (err)
47160 - return ERR_PTR(err);
47161 -
47162 - mm = get_task_mm(task);
47163 - if (mm && mm != current->mm &&
47164 - !ptrace_may_access(task, mode)) {
47165 - mmput(mm);
47166 - mm = ERR_PTR(-EACCES);
47167 - }
47168 - mutex_unlock(&task->signal->cred_guard_mutex);
47169 -
47170 - return mm;
47171 -}
47172 -
47173 struct mm_struct *mm_for_maps(struct task_struct *task)
47174 {
47175 return mm_access(task, PTRACE_MODE_READ);
47176 @@ -229,6 +225,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
47177 if (!mm->arg_end)
47178 goto out_mm; /* Shh! No looking before we're done */
47179
47180 + if (gr_acl_handle_procpidmem(task))
47181 + goto out_mm;
47182 +
47183 len = mm->arg_end - mm->arg_start;
47184
47185 if (len > PAGE_SIZE)
47186 @@ -256,12 +255,28 @@ out:
47187 return res;
47188 }
47189
47190 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47191 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47192 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47193 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47194 +#endif
47195 +
47196 static int proc_pid_auxv(struct task_struct *task, char *buffer)
47197 {
47198 struct mm_struct *mm = mm_for_maps(task);
47199 int res = PTR_ERR(mm);
47200 if (mm && !IS_ERR(mm)) {
47201 unsigned int nwords = 0;
47202 +
47203 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47204 + /* allow if we're currently ptracing this task */
47205 + if (PAX_RAND_FLAGS(mm) &&
47206 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
47207 + mmput(mm);
47208 + return 0;
47209 + }
47210 +#endif
47211 +
47212 do {
47213 nwords += 2;
47214 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
47215 @@ -275,7 +290,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
47216 }
47217
47218
47219 -#ifdef CONFIG_KALLSYMS
47220 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47221 /*
47222 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
47223 * Returns the resolved symbol. If that fails, simply return the address.
47224 @@ -314,7 +329,7 @@ static void unlock_trace(struct task_struct *task)
47225 mutex_unlock(&task->signal->cred_guard_mutex);
47226 }
47227
47228 -#ifdef CONFIG_STACKTRACE
47229 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47230
47231 #define MAX_STACK_TRACE_DEPTH 64
47232
47233 @@ -505,7 +520,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
47234 return count;
47235 }
47236
47237 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47238 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47239 static int proc_pid_syscall(struct task_struct *task, char *buffer)
47240 {
47241 long nr;
47242 @@ -534,7 +549,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
47243 /************************************************************************/
47244
47245 /* permission checks */
47246 -static int proc_fd_access_allowed(struct inode *inode)
47247 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
47248 {
47249 struct task_struct *task;
47250 int allowed = 0;
47251 @@ -544,7 +559,10 @@ static int proc_fd_access_allowed(struct inode *inode)
47252 */
47253 task = get_proc_task(inode);
47254 if (task) {
47255 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47256 + if (log)
47257 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
47258 + else
47259 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47260 put_task_struct(task);
47261 }
47262 return allowed;
47263 @@ -786,6 +804,10 @@ static int mem_open(struct inode* inode, struct file* file)
47264 file->f_mode |= FMODE_UNSIGNED_OFFSET;
47265 file->private_data = mm;
47266
47267 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47268 + file->f_version = current->exec_id;
47269 +#endif
47270 +
47271 return 0;
47272 }
47273
47274 @@ -797,6 +819,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
47275 ssize_t copied;
47276 char *page;
47277
47278 +#ifdef CONFIG_GRKERNSEC
47279 + if (write)
47280 + return -EPERM;
47281 +#endif
47282 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47283 + if (file->f_version != current->exec_id) {
47284 + gr_log_badprocpid("mem");
47285 + return 0;
47286 + }
47287 +#endif
47288 +
47289 if (!mm)
47290 return 0;
47291
47292 @@ -897,6 +930,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
47293 if (!task)
47294 goto out_no_task;
47295
47296 + if (gr_acl_handle_procpidmem(task))
47297 + goto out;
47298 +
47299 ret = -ENOMEM;
47300 page = (char *)__get_free_page(GFP_TEMPORARY);
47301 if (!page)
47302 @@ -1519,7 +1555,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
47303 path_put(&nd->path);
47304
47305 /* Are we allowed to snoop on the tasks file descriptors? */
47306 - if (!proc_fd_access_allowed(inode))
47307 + if (!proc_fd_access_allowed(inode,0))
47308 goto out;
47309
47310 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
47311 @@ -1558,8 +1594,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
47312 struct path path;
47313
47314 /* Are we allowed to snoop on the tasks file descriptors? */
47315 - if (!proc_fd_access_allowed(inode))
47316 - goto out;
47317 + /* logging this is needed for learning on chromium to work properly,
47318 + but we don't want to flood the logs from 'ps' which does a readlink
47319 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
47320 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
47321 + */
47322 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
47323 + if (!proc_fd_access_allowed(inode,0))
47324 + goto out;
47325 + } else {
47326 + if (!proc_fd_access_allowed(inode,1))
47327 + goto out;
47328 + }
47329
47330 error = PROC_I(inode)->op.proc_get_link(inode, &path);
47331 if (error)
47332 @@ -1624,7 +1670,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
47333 rcu_read_lock();
47334 cred = __task_cred(task);
47335 inode->i_uid = cred->euid;
47336 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47337 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47338 +#else
47339 inode->i_gid = cred->egid;
47340 +#endif
47341 rcu_read_unlock();
47342 }
47343 security_task_to_inode(task, inode);
47344 @@ -1642,6 +1692,9 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
47345 struct inode *inode = dentry->d_inode;
47346 struct task_struct *task;
47347 const struct cred *cred;
47348 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47349 + const struct cred *tmpcred = current_cred();
47350 +#endif
47351
47352 generic_fillattr(inode, stat);
47353
47354 @@ -1649,13 +1702,41 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
47355 stat->uid = 0;
47356 stat->gid = 0;
47357 task = pid_task(proc_pid(inode), PIDTYPE_PID);
47358 +
47359 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
47360 + rcu_read_unlock();
47361 + return -ENOENT;
47362 + }
47363 +
47364 if (task) {
47365 + cred = __task_cred(task);
47366 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47367 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
47368 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47369 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
47370 +#endif
47371 + ) {
47372 +#endif
47373 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
47374 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47375 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
47376 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47377 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
47378 +#endif
47379 task_dumpable(task)) {
47380 - cred = __task_cred(task);
47381 stat->uid = cred->euid;
47382 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47383 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
47384 +#else
47385 stat->gid = cred->egid;
47386 +#endif
47387 }
47388 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47389 + } else {
47390 + rcu_read_unlock();
47391 + return -ENOENT;
47392 + }
47393 +#endif
47394 }
47395 rcu_read_unlock();
47396 return 0;
47397 @@ -1692,11 +1773,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
47398
47399 if (task) {
47400 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
47401 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47402 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
47403 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47404 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
47405 +#endif
47406 task_dumpable(task)) {
47407 rcu_read_lock();
47408 cred = __task_cred(task);
47409 inode->i_uid = cred->euid;
47410 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47411 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47412 +#else
47413 inode->i_gid = cred->egid;
47414 +#endif
47415 rcu_read_unlock();
47416 } else {
47417 inode->i_uid = 0;
47418 @@ -1814,7 +1904,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
47419 int fd = proc_fd(inode);
47420
47421 if (task) {
47422 - files = get_files_struct(task);
47423 + if (!gr_acl_handle_procpidmem(task))
47424 + files = get_files_struct(task);
47425 put_task_struct(task);
47426 }
47427 if (files) {
47428 @@ -2082,11 +2173,21 @@ static const struct file_operations proc_fd_operations = {
47429 */
47430 static int proc_fd_permission(struct inode *inode, int mask)
47431 {
47432 + struct task_struct *task;
47433 int rv = generic_permission(inode, mask);
47434 - if (rv == 0)
47435 - return 0;
47436 +
47437 if (task_pid(current) == proc_pid(inode))
47438 rv = 0;
47439 +
47440 + task = get_proc_task(inode);
47441 + if (task == NULL)
47442 + return rv;
47443 +
47444 + if (gr_acl_handle_procpidmem(task))
47445 + rv = -EACCES;
47446 +
47447 + put_task_struct(task);
47448 +
47449 return rv;
47450 }
47451
47452 @@ -2196,6 +2297,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
47453 if (!task)
47454 goto out_no_task;
47455
47456 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47457 + goto out;
47458 +
47459 /*
47460 * Yes, it does not scale. And it should not. Don't add
47461 * new entries into /proc/<tgid>/ without very good reasons.
47462 @@ -2240,6 +2344,9 @@ static int proc_pident_readdir(struct file *filp,
47463 if (!task)
47464 goto out_no_task;
47465
47466 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47467 + goto out;
47468 +
47469 ret = 0;
47470 i = filp->f_pos;
47471 switch (i) {
47472 @@ -2510,7 +2617,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
47473 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
47474 void *cookie)
47475 {
47476 - char *s = nd_get_link(nd);
47477 + const char *s = nd_get_link(nd);
47478 if (!IS_ERR(s))
47479 __putname(s);
47480 }
47481 @@ -2708,7 +2815,7 @@ static const struct pid_entry tgid_base_stuff[] = {
47482 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
47483 #endif
47484 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
47485 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47486 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47487 INF("syscall", S_IRUGO, proc_pid_syscall),
47488 #endif
47489 INF("cmdline", S_IRUGO, proc_pid_cmdline),
47490 @@ -2733,10 +2840,10 @@ static const struct pid_entry tgid_base_stuff[] = {
47491 #ifdef CONFIG_SECURITY
47492 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
47493 #endif
47494 -#ifdef CONFIG_KALLSYMS
47495 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47496 INF("wchan", S_IRUGO, proc_pid_wchan),
47497 #endif
47498 -#ifdef CONFIG_STACKTRACE
47499 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47500 ONE("stack", S_IRUGO, proc_pid_stack),
47501 #endif
47502 #ifdef CONFIG_SCHEDSTATS
47503 @@ -2770,6 +2877,9 @@ static const struct pid_entry tgid_base_stuff[] = {
47504 #ifdef CONFIG_HARDWALL
47505 INF("hardwall", S_IRUGO, proc_pid_hardwall),
47506 #endif
47507 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47508 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
47509 +#endif
47510 };
47511
47512 static int proc_tgid_base_readdir(struct file * filp,
47513 @@ -2895,7 +3005,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
47514 if (!inode)
47515 goto out;
47516
47517 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47518 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
47519 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47520 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47521 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
47522 +#else
47523 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
47524 +#endif
47525 inode->i_op = &proc_tgid_base_inode_operations;
47526 inode->i_fop = &proc_tgid_base_operations;
47527 inode->i_flags|=S_IMMUTABLE;
47528 @@ -2937,7 +3054,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
47529 if (!task)
47530 goto out;
47531
47532 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47533 + goto out_put_task;
47534 +
47535 result = proc_pid_instantiate(dir, dentry, task, NULL);
47536 +out_put_task:
47537 put_task_struct(task);
47538 out:
47539 return result;
47540 @@ -3002,6 +3123,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
47541 {
47542 unsigned int nr;
47543 struct task_struct *reaper;
47544 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47545 + const struct cred *tmpcred = current_cred();
47546 + const struct cred *itercred;
47547 +#endif
47548 + filldir_t __filldir = filldir;
47549 struct tgid_iter iter;
47550 struct pid_namespace *ns;
47551
47552 @@ -3025,8 +3151,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
47553 for (iter = next_tgid(ns, iter);
47554 iter.task;
47555 iter.tgid += 1, iter = next_tgid(ns, iter)) {
47556 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47557 + rcu_read_lock();
47558 + itercred = __task_cred(iter.task);
47559 +#endif
47560 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
47561 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47562 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
47563 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47564 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
47565 +#endif
47566 + )
47567 +#endif
47568 + )
47569 + __filldir = &gr_fake_filldir;
47570 + else
47571 + __filldir = filldir;
47572 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47573 + rcu_read_unlock();
47574 +#endif
47575 filp->f_pos = iter.tgid + TGID_OFFSET;
47576 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
47577 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
47578 put_task_struct(iter.task);
47579 goto out;
47580 }
47581 @@ -3054,7 +3199,7 @@ static const struct pid_entry tid_base_stuff[] = {
47582 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
47583 #endif
47584 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
47585 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47586 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47587 INF("syscall", S_IRUGO, proc_pid_syscall),
47588 #endif
47589 INF("cmdline", S_IRUGO, proc_pid_cmdline),
47590 @@ -3078,10 +3223,10 @@ static const struct pid_entry tid_base_stuff[] = {
47591 #ifdef CONFIG_SECURITY
47592 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
47593 #endif
47594 -#ifdef CONFIG_KALLSYMS
47595 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47596 INF("wchan", S_IRUGO, proc_pid_wchan),
47597 #endif
47598 -#ifdef CONFIG_STACKTRACE
47599 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47600 ONE("stack", S_IRUGO, proc_pid_stack),
47601 #endif
47602 #ifdef CONFIG_SCHEDSTATS
47603 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
47604 index 82676e3..5f8518a 100644
47605 --- a/fs/proc/cmdline.c
47606 +++ b/fs/proc/cmdline.c
47607 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
47608
47609 static int __init proc_cmdline_init(void)
47610 {
47611 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
47612 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
47613 +#else
47614 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
47615 +#endif
47616 return 0;
47617 }
47618 module_init(proc_cmdline_init);
47619 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
47620 index b143471..bb105e5 100644
47621 --- a/fs/proc/devices.c
47622 +++ b/fs/proc/devices.c
47623 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
47624
47625 static int __init proc_devices_init(void)
47626 {
47627 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
47628 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
47629 +#else
47630 proc_create("devices", 0, NULL, &proc_devinfo_operations);
47631 +#endif
47632 return 0;
47633 }
47634 module_init(proc_devices_init);
47635 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
47636 index 7737c54..7172574 100644
47637 --- a/fs/proc/inode.c
47638 +++ b/fs/proc/inode.c
47639 @@ -18,12 +18,18 @@
47640 #include <linux/module.h>
47641 #include <linux/sysctl.h>
47642 #include <linux/slab.h>
47643 +#include <linux/grsecurity.h>
47644
47645 #include <asm/system.h>
47646 #include <asm/uaccess.h>
47647
47648 #include "internal.h"
47649
47650 +#ifdef CONFIG_PROC_SYSCTL
47651 +extern const struct inode_operations proc_sys_inode_operations;
47652 +extern const struct inode_operations proc_sys_dir_operations;
47653 +#endif
47654 +
47655 static void proc_evict_inode(struct inode *inode)
47656 {
47657 struct proc_dir_entry *de;
47658 @@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
47659 ns_ops = PROC_I(inode)->ns_ops;
47660 if (ns_ops && ns_ops->put)
47661 ns_ops->put(PROC_I(inode)->ns);
47662 +
47663 +#ifdef CONFIG_PROC_SYSCTL
47664 + if (inode->i_op == &proc_sys_inode_operations ||
47665 + inode->i_op == &proc_sys_dir_operations)
47666 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
47667 +#endif
47668 +
47669 }
47670
47671 static struct kmem_cache * proc_inode_cachep;
47672 @@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
47673 if (de->mode) {
47674 inode->i_mode = de->mode;
47675 inode->i_uid = de->uid;
47676 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47677 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47678 +#else
47679 inode->i_gid = de->gid;
47680 +#endif
47681 }
47682 if (de->size)
47683 inode->i_size = de->size;
47684 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
47685 index 7838e5c..ff92cbc 100644
47686 --- a/fs/proc/internal.h
47687 +++ b/fs/proc/internal.h
47688 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47689 struct pid *pid, struct task_struct *task);
47690 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47691 struct pid *pid, struct task_struct *task);
47692 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47693 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
47694 +#endif
47695 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
47696
47697 extern const struct file_operations proc_maps_operations;
47698 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
47699 index d245cb2..f4e8498 100644
47700 --- a/fs/proc/kcore.c
47701 +++ b/fs/proc/kcore.c
47702 @@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47703 * the addresses in the elf_phdr on our list.
47704 */
47705 start = kc_offset_to_vaddr(*fpos - elf_buflen);
47706 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
47707 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
47708 + if (tsz > buflen)
47709 tsz = buflen;
47710 -
47711 +
47712 while (buflen) {
47713 struct kcore_list *m;
47714
47715 @@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47716 kfree(elf_buf);
47717 } else {
47718 if (kern_addr_valid(start)) {
47719 - unsigned long n;
47720 + char *elf_buf;
47721 + mm_segment_t oldfs;
47722
47723 - n = copy_to_user(buffer, (char *)start, tsz);
47724 - /*
47725 - * We cannot distingush between fault on source
47726 - * and fault on destination. When this happens
47727 - * we clear too and hope it will trigger the
47728 - * EFAULT again.
47729 - */
47730 - if (n) {
47731 - if (clear_user(buffer + tsz - n,
47732 - n))
47733 + elf_buf = kmalloc(tsz, GFP_KERNEL);
47734 + if (!elf_buf)
47735 + return -ENOMEM;
47736 + oldfs = get_fs();
47737 + set_fs(KERNEL_DS);
47738 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
47739 + set_fs(oldfs);
47740 + if (copy_to_user(buffer, elf_buf, tsz)) {
47741 + kfree(elf_buf);
47742 return -EFAULT;
47743 + }
47744 }
47745 + set_fs(oldfs);
47746 + kfree(elf_buf);
47747 } else {
47748 if (clear_user(buffer, tsz))
47749 return -EFAULT;
47750 @@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
47751
47752 static int open_kcore(struct inode *inode, struct file *filp)
47753 {
47754 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
47755 + return -EPERM;
47756 +#endif
47757 if (!capable(CAP_SYS_RAWIO))
47758 return -EPERM;
47759 if (kcore_need_update)
47760 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
47761 index 80e4645..53e5fcf 100644
47762 --- a/fs/proc/meminfo.c
47763 +++ b/fs/proc/meminfo.c
47764 @@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
47765 vmi.used >> 10,
47766 vmi.largest_chunk >> 10
47767 #ifdef CONFIG_MEMORY_FAILURE
47768 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
47769 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
47770 #endif
47771 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
47772 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
47773 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
47774 index b1822dd..df622cb 100644
47775 --- a/fs/proc/nommu.c
47776 +++ b/fs/proc/nommu.c
47777 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
47778 if (len < 1)
47779 len = 1;
47780 seq_printf(m, "%*c", len, ' ');
47781 - seq_path(m, &file->f_path, "");
47782 + seq_path(m, &file->f_path, "\n\\");
47783 }
47784
47785 seq_putc(m, '\n');
47786 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
47787 index f738024..876984a 100644
47788 --- a/fs/proc/proc_net.c
47789 +++ b/fs/proc/proc_net.c
47790 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
47791 struct task_struct *task;
47792 struct nsproxy *ns;
47793 struct net *net = NULL;
47794 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47795 + const struct cred *cred = current_cred();
47796 +#endif
47797 +
47798 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47799 + if (cred->fsuid)
47800 + return net;
47801 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47802 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
47803 + return net;
47804 +#endif
47805
47806 rcu_read_lock();
47807 task = pid_task(proc_pid(dir), PIDTYPE_PID);
47808 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
47809 index a6b6217..1e0579d 100644
47810 --- a/fs/proc/proc_sysctl.c
47811 +++ b/fs/proc/proc_sysctl.c
47812 @@ -9,11 +9,13 @@
47813 #include <linux/namei.h>
47814 #include "internal.h"
47815
47816 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
47817 +
47818 static const struct dentry_operations proc_sys_dentry_operations;
47819 static const struct file_operations proc_sys_file_operations;
47820 -static const struct inode_operations proc_sys_inode_operations;
47821 +const struct inode_operations proc_sys_inode_operations;
47822 static const struct file_operations proc_sys_dir_file_operations;
47823 -static const struct inode_operations proc_sys_dir_operations;
47824 +const struct inode_operations proc_sys_dir_operations;
47825
47826 void proc_sys_poll_notify(struct ctl_table_poll *poll)
47827 {
47828 @@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
47829
47830 err = NULL;
47831 d_set_d_op(dentry, &proc_sys_dentry_operations);
47832 +
47833 + gr_handle_proc_create(dentry, inode);
47834 +
47835 d_add(dentry, inode);
47836
47837 + if (gr_handle_sysctl(p, MAY_EXEC))
47838 + err = ERR_PTR(-ENOENT);
47839 +
47840 out:
47841 sysctl_head_finish(head);
47842 return err;
47843 @@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
47844 if (!table->proc_handler)
47845 goto out;
47846
47847 +#ifdef CONFIG_GRKERNSEC
47848 + error = -EPERM;
47849 + if (write && !capable(CAP_SYS_ADMIN))
47850 + goto out;
47851 +#endif
47852 +
47853 /* careful: calling conventions are nasty here */
47854 res = count;
47855 error = table->proc_handler(table, write, buf, &res, ppos);
47856 @@ -245,6 +259,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
47857 return -ENOMEM;
47858 } else {
47859 d_set_d_op(child, &proc_sys_dentry_operations);
47860 +
47861 + gr_handle_proc_create(child, inode);
47862 +
47863 d_add(child, inode);
47864 }
47865 } else {
47866 @@ -273,6 +290,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
47867 if (*pos < file->f_pos)
47868 continue;
47869
47870 + if (gr_handle_sysctl(table, 0))
47871 + continue;
47872 +
47873 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
47874 if (res)
47875 return res;
47876 @@ -398,6 +418,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
47877 if (IS_ERR(head))
47878 return PTR_ERR(head);
47879
47880 + if (table && gr_handle_sysctl(table, MAY_EXEC))
47881 + return -ENOENT;
47882 +
47883 generic_fillattr(inode, stat);
47884 if (table)
47885 stat->mode = (stat->mode & S_IFMT) | table->mode;
47886 @@ -420,13 +443,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
47887 .llseek = generic_file_llseek,
47888 };
47889
47890 -static const struct inode_operations proc_sys_inode_operations = {
47891 +const struct inode_operations proc_sys_inode_operations = {
47892 .permission = proc_sys_permission,
47893 .setattr = proc_sys_setattr,
47894 .getattr = proc_sys_getattr,
47895 };
47896
47897 -static const struct inode_operations proc_sys_dir_operations = {
47898 +const struct inode_operations proc_sys_dir_operations = {
47899 .lookup = proc_sys_lookup,
47900 .permission = proc_sys_permission,
47901 .setattr = proc_sys_setattr,
47902 diff --git a/fs/proc/root.c b/fs/proc/root.c
47903 index 03102d9..4ae347e 100644
47904 --- a/fs/proc/root.c
47905 +++ b/fs/proc/root.c
47906 @@ -121,7 +121,15 @@ void __init proc_root_init(void)
47907 #ifdef CONFIG_PROC_DEVICETREE
47908 proc_device_tree_init();
47909 #endif
47910 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
47911 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47912 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
47913 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47914 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
47915 +#endif
47916 +#else
47917 proc_mkdir("bus", NULL);
47918 +#endif
47919 proc_sys_init();
47920 }
47921
47922 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
47923 index 7dcd2a2..b2f410e 100644
47924 --- a/fs/proc/task_mmu.c
47925 +++ b/fs/proc/task_mmu.c
47926 @@ -11,6 +11,7 @@
47927 #include <linux/rmap.h>
47928 #include <linux/swap.h>
47929 #include <linux/swapops.h>
47930 +#include <linux/grsecurity.h>
47931
47932 #include <asm/elf.h>
47933 #include <asm/uaccess.h>
47934 @@ -52,8 +53,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
47935 "VmExe:\t%8lu kB\n"
47936 "VmLib:\t%8lu kB\n"
47937 "VmPTE:\t%8lu kB\n"
47938 - "VmSwap:\t%8lu kB\n",
47939 - hiwater_vm << (PAGE_SHIFT-10),
47940 + "VmSwap:\t%8lu kB\n"
47941 +
47942 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
47943 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
47944 +#endif
47945 +
47946 + ,hiwater_vm << (PAGE_SHIFT-10),
47947 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
47948 mm->locked_vm << (PAGE_SHIFT-10),
47949 mm->pinned_vm << (PAGE_SHIFT-10),
47950 @@ -62,7 +68,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
47951 data << (PAGE_SHIFT-10),
47952 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
47953 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
47954 - swap << (PAGE_SHIFT-10));
47955 + swap << (PAGE_SHIFT-10)
47956 +
47957 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
47958 + , mm->context.user_cs_base, mm->context.user_cs_limit
47959 +#endif
47960 +
47961 + );
47962 }
47963
47964 unsigned long task_vsize(struct mm_struct *mm)
47965 @@ -209,6 +221,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
47966 return ret;
47967 }
47968
47969 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47970 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47971 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47972 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47973 +#endif
47974 +
47975 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47976 {
47977 struct mm_struct *mm = vma->vm_mm;
47978 @@ -227,13 +245,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47979 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
47980 }
47981
47982 - /* We don't show the stack guard page in /proc/maps */
47983 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47984 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
47985 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
47986 +#else
47987 start = vma->vm_start;
47988 - if (stack_guard_page_start(vma, start))
47989 - start += PAGE_SIZE;
47990 end = vma->vm_end;
47991 - if (stack_guard_page_end(vma, end))
47992 - end -= PAGE_SIZE;
47993 +#endif
47994
47995 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
47996 start,
47997 @@ -242,7 +260,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
47998 flags & VM_WRITE ? 'w' : '-',
47999 flags & VM_EXEC ? 'x' : '-',
48000 flags & VM_MAYSHARE ? 's' : 'p',
48001 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48002 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
48003 +#else
48004 pgoff,
48005 +#endif
48006 MAJOR(dev), MINOR(dev), ino, &len);
48007
48008 /*
48009 @@ -251,7 +273,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48010 */
48011 if (file) {
48012 pad_len_spaces(m, len);
48013 - seq_path(m, &file->f_path, "\n");
48014 + seq_path(m, &file->f_path, "\n\\");
48015 } else {
48016 const char *name = arch_vma_name(vma);
48017 if (!name) {
48018 @@ -259,8 +281,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48019 if (vma->vm_start <= mm->brk &&
48020 vma->vm_end >= mm->start_brk) {
48021 name = "[heap]";
48022 - } else if (vma->vm_start <= mm->start_stack &&
48023 - vma->vm_end >= mm->start_stack) {
48024 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
48025 + (vma->vm_start <= mm->start_stack &&
48026 + vma->vm_end >= mm->start_stack)) {
48027 name = "[stack]";
48028 }
48029 } else {
48030 @@ -281,6 +304,13 @@ static int show_map(struct seq_file *m, void *v)
48031 struct proc_maps_private *priv = m->private;
48032 struct task_struct *task = priv->task;
48033
48034 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48035 + if (current->exec_id != m->exec_id) {
48036 + gr_log_badprocpid("maps");
48037 + return 0;
48038 + }
48039 +#endif
48040 +
48041 show_map_vma(m, vma);
48042
48043 if (m->count < m->size) /* vma is copied successfully */
48044 @@ -434,12 +464,23 @@ static int show_smap(struct seq_file *m, void *v)
48045 .private = &mss,
48046 };
48047
48048 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48049 + if (current->exec_id != m->exec_id) {
48050 + gr_log_badprocpid("smaps");
48051 + return 0;
48052 + }
48053 +#endif
48054 memset(&mss, 0, sizeof mss);
48055 - mss.vma = vma;
48056 - /* mmap_sem is held in m_start */
48057 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48058 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48059 -
48060 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48061 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
48062 +#endif
48063 + mss.vma = vma;
48064 + /* mmap_sem is held in m_start */
48065 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48066 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48067 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48068 + }
48069 +#endif
48070 show_map_vma(m, vma);
48071
48072 seq_printf(m,
48073 @@ -457,7 +498,11 @@ static int show_smap(struct seq_file *m, void *v)
48074 "KernelPageSize: %8lu kB\n"
48075 "MMUPageSize: %8lu kB\n"
48076 "Locked: %8lu kB\n",
48077 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48078 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
48079 +#else
48080 (vma->vm_end - vma->vm_start) >> 10,
48081 +#endif
48082 mss.resident >> 10,
48083 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
48084 mss.shared_clean >> 10,
48085 @@ -1015,6 +1060,13 @@ static int show_numa_map(struct seq_file *m, void *v)
48086 int n;
48087 char buffer[50];
48088
48089 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48090 + if (current->exec_id != m->exec_id) {
48091 + gr_log_badprocpid("numa_maps");
48092 + return 0;
48093 + }
48094 +#endif
48095 +
48096 if (!mm)
48097 return 0;
48098
48099 @@ -1032,11 +1084,15 @@ static int show_numa_map(struct seq_file *m, void *v)
48100 mpol_to_str(buffer, sizeof(buffer), pol, 0);
48101 mpol_cond_put(pol);
48102
48103 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48104 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
48105 +#else
48106 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
48107 +#endif
48108
48109 if (file) {
48110 seq_printf(m, " file=");
48111 - seq_path(m, &file->f_path, "\n\t= ");
48112 + seq_path(m, &file->f_path, "\n\t\\= ");
48113 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
48114 seq_printf(m, " heap");
48115 } else if (vma->vm_start <= mm->start_stack &&
48116 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
48117 index 980de54..2a4db5f 100644
48118 --- a/fs/proc/task_nommu.c
48119 +++ b/fs/proc/task_nommu.c
48120 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48121 else
48122 bytes += kobjsize(mm);
48123
48124 - if (current->fs && current->fs->users > 1)
48125 + if (current->fs && atomic_read(&current->fs->users) > 1)
48126 sbytes += kobjsize(current->fs);
48127 else
48128 bytes += kobjsize(current->fs);
48129 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
48130
48131 if (file) {
48132 pad_len_spaces(m, len);
48133 - seq_path(m, &file->f_path, "");
48134 + seq_path(m, &file->f_path, "\n\\");
48135 } else if (mm) {
48136 if (vma->vm_start <= mm->start_stack &&
48137 vma->vm_end >= mm->start_stack) {
48138 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
48139 index d67908b..d13f6a6 100644
48140 --- a/fs/quota/netlink.c
48141 +++ b/fs/quota/netlink.c
48142 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
48143 void quota_send_warning(short type, unsigned int id, dev_t dev,
48144 const char warntype)
48145 {
48146 - static atomic_t seq;
48147 + static atomic_unchecked_t seq;
48148 struct sk_buff *skb;
48149 void *msg_head;
48150 int ret;
48151 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
48152 "VFS: Not enough memory to send quota warning.\n");
48153 return;
48154 }
48155 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
48156 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
48157 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
48158 if (!msg_head) {
48159 printk(KERN_ERR
48160 diff --git a/fs/readdir.c b/fs/readdir.c
48161 index 356f715..c918d38 100644
48162 --- a/fs/readdir.c
48163 +++ b/fs/readdir.c
48164 @@ -17,6 +17,7 @@
48165 #include <linux/security.h>
48166 #include <linux/syscalls.h>
48167 #include <linux/unistd.h>
48168 +#include <linux/namei.h>
48169
48170 #include <asm/uaccess.h>
48171
48172 @@ -67,6 +68,7 @@ struct old_linux_dirent {
48173
48174 struct readdir_callback {
48175 struct old_linux_dirent __user * dirent;
48176 + struct file * file;
48177 int result;
48178 };
48179
48180 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
48181 buf->result = -EOVERFLOW;
48182 return -EOVERFLOW;
48183 }
48184 +
48185 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48186 + return 0;
48187 +
48188 buf->result++;
48189 dirent = buf->dirent;
48190 if (!access_ok(VERIFY_WRITE, dirent,
48191 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
48192
48193 buf.result = 0;
48194 buf.dirent = dirent;
48195 + buf.file = file;
48196
48197 error = vfs_readdir(file, fillonedir, &buf);
48198 if (buf.result)
48199 @@ -142,6 +149,7 @@ struct linux_dirent {
48200 struct getdents_callback {
48201 struct linux_dirent __user * current_dir;
48202 struct linux_dirent __user * previous;
48203 + struct file * file;
48204 int count;
48205 int error;
48206 };
48207 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
48208 buf->error = -EOVERFLOW;
48209 return -EOVERFLOW;
48210 }
48211 +
48212 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48213 + return 0;
48214 +
48215 dirent = buf->previous;
48216 if (dirent) {
48217 if (__put_user(offset, &dirent->d_off))
48218 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
48219 buf.previous = NULL;
48220 buf.count = count;
48221 buf.error = 0;
48222 + buf.file = file;
48223
48224 error = vfs_readdir(file, filldir, &buf);
48225 if (error >= 0)
48226 @@ -229,6 +242,7 @@ out:
48227 struct getdents_callback64 {
48228 struct linux_dirent64 __user * current_dir;
48229 struct linux_dirent64 __user * previous;
48230 + struct file *file;
48231 int count;
48232 int error;
48233 };
48234 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
48235 buf->error = -EINVAL; /* only used if we fail.. */
48236 if (reclen > buf->count)
48237 return -EINVAL;
48238 +
48239 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48240 + return 0;
48241 +
48242 dirent = buf->previous;
48243 if (dirent) {
48244 if (__put_user(offset, &dirent->d_off))
48245 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48246
48247 buf.current_dir = dirent;
48248 buf.previous = NULL;
48249 + buf.file = file;
48250 buf.count = count;
48251 buf.error = 0;
48252
48253 @@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48254 error = buf.error;
48255 lastdirent = buf.previous;
48256 if (lastdirent) {
48257 - typeof(lastdirent->d_off) d_off = file->f_pos;
48258 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48259 if (__put_user(d_off, &lastdirent->d_off))
48260 error = -EFAULT;
48261 else
48262 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
48263 index 60c0804..d814f98 100644
48264 --- a/fs/reiserfs/do_balan.c
48265 +++ b/fs/reiserfs/do_balan.c
48266 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
48267 return;
48268 }
48269
48270 - atomic_inc(&(fs_generation(tb->tb_sb)));
48271 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
48272 do_balance_starts(tb);
48273
48274 /* balance leaf returns 0 except if combining L R and S into
48275 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
48276 index 7a99811..a7c96c4 100644
48277 --- a/fs/reiserfs/procfs.c
48278 +++ b/fs/reiserfs/procfs.c
48279 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
48280 "SMALL_TAILS " : "NO_TAILS ",
48281 replay_only(sb) ? "REPLAY_ONLY " : "",
48282 convert_reiserfs(sb) ? "CONV " : "",
48283 - atomic_read(&r->s_generation_counter),
48284 + atomic_read_unchecked(&r->s_generation_counter),
48285 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
48286 SF(s_do_balance), SF(s_unneeded_left_neighbor),
48287 SF(s_good_search_by_key_reada), SF(s_bmaps),
48288 diff --git a/fs/select.c b/fs/select.c
48289 index d33418f..2a5345e 100644
48290 --- a/fs/select.c
48291 +++ b/fs/select.c
48292 @@ -20,6 +20,7 @@
48293 #include <linux/module.h>
48294 #include <linux/slab.h>
48295 #include <linux/poll.h>
48296 +#include <linux/security.h>
48297 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
48298 #include <linux/file.h>
48299 #include <linux/fdtable.h>
48300 @@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
48301 struct poll_list *walk = head;
48302 unsigned long todo = nfds;
48303
48304 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
48305 if (nfds > rlimit(RLIMIT_NOFILE))
48306 return -EINVAL;
48307
48308 diff --git a/fs/seq_file.c b/fs/seq_file.c
48309 index dba43c3..9fb8511 100644
48310 --- a/fs/seq_file.c
48311 +++ b/fs/seq_file.c
48312 @@ -9,6 +9,7 @@
48313 #include <linux/module.h>
48314 #include <linux/seq_file.h>
48315 #include <linux/slab.h>
48316 +#include <linux/sched.h>
48317
48318 #include <asm/uaccess.h>
48319 #include <asm/page.h>
48320 @@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
48321 memset(p, 0, sizeof(*p));
48322 mutex_init(&p->lock);
48323 p->op = op;
48324 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48325 + p->exec_id = current->exec_id;
48326 +#endif
48327
48328 /*
48329 * Wrappers around seq_open(e.g. swaps_open) need to be
48330 @@ -76,7 +80,8 @@ static int traverse(struct seq_file *m, loff_t offset)
48331 return 0;
48332 }
48333 if (!m->buf) {
48334 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
48335 + m->size = PAGE_SIZE;
48336 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
48337 if (!m->buf)
48338 return -ENOMEM;
48339 }
48340 @@ -116,7 +121,8 @@ static int traverse(struct seq_file *m, loff_t offset)
48341 Eoverflow:
48342 m->op->stop(m, p);
48343 kfree(m->buf);
48344 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
48345 + m->size <<= 1;
48346 + m->buf = kmalloc(m->size, GFP_KERNEL);
48347 return !m->buf ? -ENOMEM : -EAGAIN;
48348 }
48349
48350 @@ -169,7 +175,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
48351 m->version = file->f_version;
48352 /* grab buffer if we didn't have one */
48353 if (!m->buf) {
48354 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
48355 + m->size = PAGE_SIZE;
48356 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
48357 if (!m->buf)
48358 goto Enomem;
48359 }
48360 @@ -210,7 +217,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
48361 goto Fill;
48362 m->op->stop(m, p);
48363 kfree(m->buf);
48364 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
48365 + m->size <<= 1;
48366 + m->buf = kmalloc(m->size, GFP_KERNEL);
48367 if (!m->buf)
48368 goto Enomem;
48369 m->count = 0;
48370 @@ -549,7 +557,7 @@ static void single_stop(struct seq_file *p, void *v)
48371 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
48372 void *data)
48373 {
48374 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
48375 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
48376 int res = -ENOMEM;
48377
48378 if (op) {
48379 diff --git a/fs/splice.c b/fs/splice.c
48380 index fa2defa..8601650 100644
48381 --- a/fs/splice.c
48382 +++ b/fs/splice.c
48383 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
48384 pipe_lock(pipe);
48385
48386 for (;;) {
48387 - if (!pipe->readers) {
48388 + if (!atomic_read(&pipe->readers)) {
48389 send_sig(SIGPIPE, current, 0);
48390 if (!ret)
48391 ret = -EPIPE;
48392 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
48393 do_wakeup = 0;
48394 }
48395
48396 - pipe->waiting_writers++;
48397 + atomic_inc(&pipe->waiting_writers);
48398 pipe_wait(pipe);
48399 - pipe->waiting_writers--;
48400 + atomic_dec(&pipe->waiting_writers);
48401 }
48402
48403 pipe_unlock(pipe);
48404 @@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
48405 old_fs = get_fs();
48406 set_fs(get_ds());
48407 /* The cast to a user pointer is valid due to the set_fs() */
48408 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
48409 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
48410 set_fs(old_fs);
48411
48412 return res;
48413 @@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
48414 old_fs = get_fs();
48415 set_fs(get_ds());
48416 /* The cast to a user pointer is valid due to the set_fs() */
48417 - res = vfs_write(file, (const char __user *)buf, count, &pos);
48418 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
48419 set_fs(old_fs);
48420
48421 return res;
48422 @@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
48423 goto err;
48424
48425 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
48426 - vec[i].iov_base = (void __user *) page_address(page);
48427 + vec[i].iov_base = (void __force_user *) page_address(page);
48428 vec[i].iov_len = this_len;
48429 spd.pages[i] = page;
48430 spd.nr_pages++;
48431 @@ -846,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
48432 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
48433 {
48434 while (!pipe->nrbufs) {
48435 - if (!pipe->writers)
48436 + if (!atomic_read(&pipe->writers))
48437 return 0;
48438
48439 - if (!pipe->waiting_writers && sd->num_spliced)
48440 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
48441 return 0;
48442
48443 if (sd->flags & SPLICE_F_NONBLOCK)
48444 @@ -1182,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
48445 * out of the pipe right after the splice_to_pipe(). So set
48446 * PIPE_READERS appropriately.
48447 */
48448 - pipe->readers = 1;
48449 + atomic_set(&pipe->readers, 1);
48450
48451 current->splice_pipe = pipe;
48452 }
48453 @@ -1734,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48454 ret = -ERESTARTSYS;
48455 break;
48456 }
48457 - if (!pipe->writers)
48458 + if (!atomic_read(&pipe->writers))
48459 break;
48460 - if (!pipe->waiting_writers) {
48461 + if (!atomic_read(&pipe->waiting_writers)) {
48462 if (flags & SPLICE_F_NONBLOCK) {
48463 ret = -EAGAIN;
48464 break;
48465 @@ -1768,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48466 pipe_lock(pipe);
48467
48468 while (pipe->nrbufs >= pipe->buffers) {
48469 - if (!pipe->readers) {
48470 + if (!atomic_read(&pipe->readers)) {
48471 send_sig(SIGPIPE, current, 0);
48472 ret = -EPIPE;
48473 break;
48474 @@ -1781,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
48475 ret = -ERESTARTSYS;
48476 break;
48477 }
48478 - pipe->waiting_writers++;
48479 + atomic_inc(&pipe->waiting_writers);
48480 pipe_wait(pipe);
48481 - pipe->waiting_writers--;
48482 + atomic_dec(&pipe->waiting_writers);
48483 }
48484
48485 pipe_unlock(pipe);
48486 @@ -1819,14 +1819,14 @@ retry:
48487 pipe_double_lock(ipipe, opipe);
48488
48489 do {
48490 - if (!opipe->readers) {
48491 + if (!atomic_read(&opipe->readers)) {
48492 send_sig(SIGPIPE, current, 0);
48493 if (!ret)
48494 ret = -EPIPE;
48495 break;
48496 }
48497
48498 - if (!ipipe->nrbufs && !ipipe->writers)
48499 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
48500 break;
48501
48502 /*
48503 @@ -1923,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
48504 pipe_double_lock(ipipe, opipe);
48505
48506 do {
48507 - if (!opipe->readers) {
48508 + if (!atomic_read(&opipe->readers)) {
48509 send_sig(SIGPIPE, current, 0);
48510 if (!ret)
48511 ret = -EPIPE;
48512 @@ -1968,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
48513 * return EAGAIN if we have the potential of some data in the
48514 * future, otherwise just return 0
48515 */
48516 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
48517 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
48518 ret = -EAGAIN;
48519
48520 pipe_unlock(ipipe);
48521 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
48522 index 7fdf6a7..e6cd8ad 100644
48523 --- a/fs/sysfs/dir.c
48524 +++ b/fs/sysfs/dir.c
48525 @@ -642,6 +642,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
48526 struct sysfs_dirent *sd;
48527 int rc;
48528
48529 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
48530 + const char *parent_name = parent_sd->s_name;
48531 +
48532 + mode = S_IFDIR | S_IRWXU;
48533 +
48534 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
48535 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
48536 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
48537 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
48538 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
48539 +#endif
48540 +
48541 /* allocate */
48542 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
48543 if (!sd)
48544 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
48545 index 779789a..f58193c 100644
48546 --- a/fs/sysfs/file.c
48547 +++ b/fs/sysfs/file.c
48548 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
48549
48550 struct sysfs_open_dirent {
48551 atomic_t refcnt;
48552 - atomic_t event;
48553 + atomic_unchecked_t event;
48554 wait_queue_head_t poll;
48555 struct list_head buffers; /* goes through sysfs_buffer.list */
48556 };
48557 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
48558 if (!sysfs_get_active(attr_sd))
48559 return -ENODEV;
48560
48561 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
48562 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
48563 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
48564
48565 sysfs_put_active(attr_sd);
48566 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
48567 return -ENOMEM;
48568
48569 atomic_set(&new_od->refcnt, 0);
48570 - atomic_set(&new_od->event, 1);
48571 + atomic_set_unchecked(&new_od->event, 1);
48572 init_waitqueue_head(&new_od->poll);
48573 INIT_LIST_HEAD(&new_od->buffers);
48574 goto retry;
48575 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
48576
48577 sysfs_put_active(attr_sd);
48578
48579 - if (buffer->event != atomic_read(&od->event))
48580 + if (buffer->event != atomic_read_unchecked(&od->event))
48581 goto trigger;
48582
48583 return DEFAULT_POLLMASK;
48584 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
48585
48586 od = sd->s_attr.open;
48587 if (od) {
48588 - atomic_inc(&od->event);
48589 + atomic_inc_unchecked(&od->event);
48590 wake_up_interruptible(&od->poll);
48591 }
48592
48593 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
48594 index a7ac78f..02158e1 100644
48595 --- a/fs/sysfs/symlink.c
48596 +++ b/fs/sysfs/symlink.c
48597 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48598
48599 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
48600 {
48601 - char *page = nd_get_link(nd);
48602 + const char *page = nd_get_link(nd);
48603 if (!IS_ERR(page))
48604 free_page((unsigned long)page);
48605 }
48606 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
48607 index c175b4d..8f36a16 100644
48608 --- a/fs/udf/misc.c
48609 +++ b/fs/udf/misc.c
48610 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
48611
48612 u8 udf_tag_checksum(const struct tag *t)
48613 {
48614 - u8 *data = (u8 *)t;
48615 + const u8 *data = (const u8 *)t;
48616 u8 checksum = 0;
48617 int i;
48618 for (i = 0; i < sizeof(struct tag); ++i)
48619 diff --git a/fs/utimes.c b/fs/utimes.c
48620 index ba653f3..06ea4b1 100644
48621 --- a/fs/utimes.c
48622 +++ b/fs/utimes.c
48623 @@ -1,6 +1,7 @@
48624 #include <linux/compiler.h>
48625 #include <linux/file.h>
48626 #include <linux/fs.h>
48627 +#include <linux/security.h>
48628 #include <linux/linkage.h>
48629 #include <linux/mount.h>
48630 #include <linux/namei.h>
48631 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
48632 goto mnt_drop_write_and_out;
48633 }
48634 }
48635 +
48636 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
48637 + error = -EACCES;
48638 + goto mnt_drop_write_and_out;
48639 + }
48640 +
48641 mutex_lock(&inode->i_mutex);
48642 error = notify_change(path->dentry, &newattrs);
48643 mutex_unlock(&inode->i_mutex);
48644 diff --git a/fs/xattr.c b/fs/xattr.c
48645 index 67583de..c5aad14 100644
48646 --- a/fs/xattr.c
48647 +++ b/fs/xattr.c
48648 @@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
48649 * Extended attribute SET operations
48650 */
48651 static long
48652 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
48653 +setxattr(struct path *path, const char __user *name, const void __user *value,
48654 size_t size, int flags)
48655 {
48656 int error;
48657 @@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
48658 return PTR_ERR(kvalue);
48659 }
48660
48661 - error = vfs_setxattr(d, kname, kvalue, size, flags);
48662 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
48663 + error = -EACCES;
48664 + goto out;
48665 + }
48666 +
48667 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
48668 +out:
48669 kfree(kvalue);
48670 return error;
48671 }
48672 @@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
48673 return error;
48674 error = mnt_want_write(path.mnt);
48675 if (!error) {
48676 - error = setxattr(path.dentry, name, value, size, flags);
48677 + error = setxattr(&path, name, value, size, flags);
48678 mnt_drop_write(path.mnt);
48679 }
48680 path_put(&path);
48681 @@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
48682 return error;
48683 error = mnt_want_write(path.mnt);
48684 if (!error) {
48685 - error = setxattr(path.dentry, name, value, size, flags);
48686 + error = setxattr(&path, name, value, size, flags);
48687 mnt_drop_write(path.mnt);
48688 }
48689 path_put(&path);
48690 @@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
48691 const void __user *,value, size_t, size, int, flags)
48692 {
48693 struct file *f;
48694 - struct dentry *dentry;
48695 int error = -EBADF;
48696
48697 f = fget(fd);
48698 if (!f)
48699 return error;
48700 - dentry = f->f_path.dentry;
48701 - audit_inode(NULL, dentry);
48702 + audit_inode(NULL, f->f_path.dentry);
48703 error = mnt_want_write_file(f);
48704 if (!error) {
48705 - error = setxattr(dentry, name, value, size, flags);
48706 + error = setxattr(&f->f_path, name, value, size, flags);
48707 mnt_drop_write(f->f_path.mnt);
48708 }
48709 fput(f);
48710 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
48711 index 8d5a506..7f62712 100644
48712 --- a/fs/xattr_acl.c
48713 +++ b/fs/xattr_acl.c
48714 @@ -17,8 +17,8 @@
48715 struct posix_acl *
48716 posix_acl_from_xattr(const void *value, size_t size)
48717 {
48718 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
48719 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
48720 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
48721 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
48722 int count;
48723 struct posix_acl *acl;
48724 struct posix_acl_entry *acl_e;
48725 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
48726 index d0ab788..827999b 100644
48727 --- a/fs/xfs/xfs_bmap.c
48728 +++ b/fs/xfs/xfs_bmap.c
48729 @@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
48730 int nmap,
48731 int ret_nmap);
48732 #else
48733 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
48734 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
48735 #endif /* DEBUG */
48736
48737 STATIC int
48738 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
48739 index 79d05e8..e3e5861 100644
48740 --- a/fs/xfs/xfs_dir2_sf.c
48741 +++ b/fs/xfs/xfs_dir2_sf.c
48742 @@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
48743 }
48744
48745 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
48746 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
48747 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
48748 + char name[sfep->namelen];
48749 + memcpy(name, sfep->name, sfep->namelen);
48750 + if (filldir(dirent, name, sfep->namelen,
48751 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
48752 + *offset = off & 0x7fffffff;
48753 + return 0;
48754 + }
48755 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
48756 off & 0x7fffffff, ino, DT_UNKNOWN)) {
48757 *offset = off & 0x7fffffff;
48758 return 0;
48759 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
48760 index d99a905..9f88202 100644
48761 --- a/fs/xfs/xfs_ioctl.c
48762 +++ b/fs/xfs/xfs_ioctl.c
48763 @@ -128,7 +128,7 @@ xfs_find_handle(
48764 }
48765
48766 error = -EFAULT;
48767 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
48768 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
48769 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
48770 goto out_put;
48771
48772 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
48773 index 23ce927..e274cc1 100644
48774 --- a/fs/xfs/xfs_iops.c
48775 +++ b/fs/xfs/xfs_iops.c
48776 @@ -447,7 +447,7 @@ xfs_vn_put_link(
48777 struct nameidata *nd,
48778 void *p)
48779 {
48780 - char *s = nd_get_link(nd);
48781 + const char *s = nd_get_link(nd);
48782
48783 if (!IS_ERR(s))
48784 kfree(s);
48785 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
48786 new file mode 100644
48787 index 0000000..4089e05
48788 --- /dev/null
48789 +++ b/grsecurity/Kconfig
48790 @@ -0,0 +1,1078 @@
48791 +#
48792 +# grecurity configuration
48793 +#
48794 +
48795 +menu "Grsecurity"
48796 +
48797 +config GRKERNSEC
48798 + bool "Grsecurity"
48799 + select CRYPTO
48800 + select CRYPTO_SHA256
48801 + help
48802 + If you say Y here, you will be able to configure many features
48803 + that will enhance the security of your system. It is highly
48804 + recommended that you say Y here and read through the help
48805 + for each option so that you fully understand the features and
48806 + can evaluate their usefulness for your machine.
48807 +
48808 +choice
48809 + prompt "Security Level"
48810 + depends on GRKERNSEC
48811 + default GRKERNSEC_CUSTOM
48812 +
48813 +config GRKERNSEC_LOW
48814 + bool "Low"
48815 + select GRKERNSEC_LINK
48816 + select GRKERNSEC_FIFO
48817 + select GRKERNSEC_RANDNET
48818 + select GRKERNSEC_DMESG
48819 + select GRKERNSEC_CHROOT
48820 + select GRKERNSEC_CHROOT_CHDIR
48821 +
48822 + help
48823 + If you choose this option, several of the grsecurity options will
48824 + be enabled that will give you greater protection against a number
48825 + of attacks, while assuring that none of your software will have any
48826 + conflicts with the additional security measures. If you run a lot
48827 + of unusual software, or you are having problems with the higher
48828 + security levels, you should say Y here. With this option, the
48829 + following features are enabled:
48830 +
48831 + - Linking restrictions
48832 + - FIFO restrictions
48833 + - Restricted dmesg
48834 + - Enforced chdir("/") on chroot
48835 + - Runtime module disabling
48836 +
48837 +config GRKERNSEC_MEDIUM
48838 + bool "Medium"
48839 + select PAX
48840 + select PAX_EI_PAX
48841 + select PAX_PT_PAX_FLAGS
48842 + select PAX_HAVE_ACL_FLAGS
48843 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
48844 + select GRKERNSEC_CHROOT
48845 + select GRKERNSEC_CHROOT_SYSCTL
48846 + select GRKERNSEC_LINK
48847 + select GRKERNSEC_FIFO
48848 + select GRKERNSEC_DMESG
48849 + select GRKERNSEC_RANDNET
48850 + select GRKERNSEC_FORKFAIL
48851 + select GRKERNSEC_TIME
48852 + select GRKERNSEC_SIGNAL
48853 + select GRKERNSEC_CHROOT
48854 + select GRKERNSEC_CHROOT_UNIX
48855 + select GRKERNSEC_CHROOT_MOUNT
48856 + select GRKERNSEC_CHROOT_PIVOT
48857 + select GRKERNSEC_CHROOT_DOUBLE
48858 + select GRKERNSEC_CHROOT_CHDIR
48859 + select GRKERNSEC_CHROOT_MKNOD
48860 + select GRKERNSEC_PROC
48861 + select GRKERNSEC_PROC_USERGROUP
48862 + select PAX_RANDUSTACK
48863 + select PAX_ASLR
48864 + select PAX_RANDMMAP
48865 + select PAX_REFCOUNT if (X86 || SPARC64)
48866 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
48867 +
48868 + help
48869 + If you say Y here, several features in addition to those included
48870 + in the low additional security level will be enabled. These
48871 + features provide even more security to your system, though in rare
48872 + cases they may be incompatible with very old or poorly written
48873 + software. If you enable this option, make sure that your auth
48874 + service (identd) is running as gid 1001. With this option,
48875 + the following features (in addition to those provided in the
48876 + low additional security level) will be enabled:
48877 +
48878 + - Failed fork logging
48879 + - Time change logging
48880 + - Signal logging
48881 + - Deny mounts in chroot
48882 + - Deny double chrooting
48883 + - Deny sysctl writes in chroot
48884 + - Deny mknod in chroot
48885 + - Deny access to abstract AF_UNIX sockets out of chroot
48886 + - Deny pivot_root in chroot
48887 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
48888 + - /proc restrictions with special GID set to 10 (usually wheel)
48889 + - Address Space Layout Randomization (ASLR)
48890 + - Prevent exploitation of most refcount overflows
48891 + - Bounds checking of copying between the kernel and userland
48892 +
48893 +config GRKERNSEC_HIGH
48894 + bool "High"
48895 + select GRKERNSEC_LINK
48896 + select GRKERNSEC_FIFO
48897 + select GRKERNSEC_DMESG
48898 + select GRKERNSEC_FORKFAIL
48899 + select GRKERNSEC_TIME
48900 + select GRKERNSEC_SIGNAL
48901 + select GRKERNSEC_CHROOT
48902 + select GRKERNSEC_CHROOT_SHMAT
48903 + select GRKERNSEC_CHROOT_UNIX
48904 + select GRKERNSEC_CHROOT_MOUNT
48905 + select GRKERNSEC_CHROOT_FCHDIR
48906 + select GRKERNSEC_CHROOT_PIVOT
48907 + select GRKERNSEC_CHROOT_DOUBLE
48908 + select GRKERNSEC_CHROOT_CHDIR
48909 + select GRKERNSEC_CHROOT_MKNOD
48910 + select GRKERNSEC_CHROOT_CAPS
48911 + select GRKERNSEC_CHROOT_SYSCTL
48912 + select GRKERNSEC_CHROOT_FINDTASK
48913 + select GRKERNSEC_SYSFS_RESTRICT
48914 + select GRKERNSEC_PROC
48915 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
48916 + select GRKERNSEC_HIDESYM
48917 + select GRKERNSEC_BRUTE
48918 + select GRKERNSEC_PROC_USERGROUP
48919 + select GRKERNSEC_KMEM
48920 + select GRKERNSEC_RESLOG
48921 + select GRKERNSEC_RANDNET
48922 + select GRKERNSEC_PROC_ADD
48923 + select GRKERNSEC_CHROOT_CHMOD
48924 + select GRKERNSEC_CHROOT_NICE
48925 + select GRKERNSEC_SETXID
48926 + select GRKERNSEC_AUDIT_MOUNT
48927 + select GRKERNSEC_MODHARDEN if (MODULES)
48928 + select GRKERNSEC_HARDEN_PTRACE
48929 + select GRKERNSEC_PTRACE_READEXEC
48930 + select GRKERNSEC_VM86 if (X86_32)
48931 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
48932 + select PAX
48933 + select PAX_RANDUSTACK
48934 + select PAX_ASLR
48935 + select PAX_RANDMMAP
48936 + select PAX_NOEXEC
48937 + select PAX_MPROTECT
48938 + select PAX_EI_PAX
48939 + select PAX_PT_PAX_FLAGS
48940 + select PAX_HAVE_ACL_FLAGS
48941 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
48942 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
48943 + select PAX_RANDKSTACK if (X86_TSC && X86)
48944 + select PAX_SEGMEXEC if (X86_32)
48945 + select PAX_PAGEEXEC
48946 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
48947 + select PAX_EMUTRAMP if (PARISC)
48948 + select PAX_EMUSIGRT if (PARISC)
48949 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
48950 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
48951 + select PAX_REFCOUNT if (X86 || SPARC64)
48952 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
48953 + help
48954 + If you say Y here, many of the features of grsecurity will be
48955 + enabled, which will protect you against many kinds of attacks
48956 + against your system. The heightened security comes at a cost
48957 + of an increased chance of incompatibilities with rare software
48958 + on your machine. Since this security level enables PaX, you should
48959 + view <http://pax.grsecurity.net> and read about the PaX
48960 + project. While you are there, download chpax and run it on
48961 + binaries that cause problems with PaX. Also remember that
48962 + since the /proc restrictions are enabled, you must run your
48963 + identd as gid 1001. This security level enables the following
48964 + features in addition to those listed in the low and medium
48965 + security levels:
48966 +
48967 + - Additional /proc restrictions
48968 + - Chmod restrictions in chroot
48969 + - No signals, ptrace, or viewing of processes outside of chroot
48970 + - Capability restrictions in chroot
48971 + - Deny fchdir out of chroot
48972 + - Priority restrictions in chroot
48973 + - Segmentation-based implementation of PaX
48974 + - Mprotect restrictions
48975 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
48976 + - Kernel stack randomization
48977 + - Mount/unmount/remount logging
48978 + - Kernel symbol hiding
48979 + - Hardening of module auto-loading
48980 + - Ptrace restrictions
48981 + - Restricted vm86 mode
48982 + - Restricted sysfs/debugfs
48983 + - Active kernel exploit response
48984 +
48985 +config GRKERNSEC_CUSTOM
48986 + bool "Custom"
48987 + help
48988 + If you say Y here, you will be able to configure every grsecurity
48989 + option, which allows you to enable many more features that aren't
48990 + covered in the basic security levels. These additional features
48991 + include TPE, socket restrictions, and the sysctl system for
48992 + grsecurity. It is advised that you read through the help for
48993 + each option to determine its usefulness in your situation.
48994 +
48995 +endchoice
48996 +
48997 +menu "Memory Protections"
48998 +depends on GRKERNSEC
48999 +
49000 +config GRKERNSEC_KMEM
49001 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
49002 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
49003 + help
49004 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
49005 + be written to or read from to modify or leak the contents of the running
49006 + kernel. /dev/port will also not be allowed to be opened. If you have module
49007 + support disabled, enabling this will close up four ways that are
49008 + currently used to insert malicious code into the running kernel.
49009 + Even with all these features enabled, we still highly recommend that
49010 + you use the RBAC system, as it is still possible for an attacker to
49011 + modify the running kernel through privileged I/O granted by ioperm/iopl.
49012 + If you are not using XFree86, you may be able to stop this additional
49013 + case by enabling the 'Disable privileged I/O' option. Though nothing
49014 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
49015 + but only to video memory, which is the only writing we allow in this
49016 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
49017 + not be allowed to mprotect it with PROT_WRITE later.
49018 + It is highly recommended that you say Y here if you meet all the
49019 + conditions above.
49020 +
49021 +config GRKERNSEC_VM86
49022 + bool "Restrict VM86 mode"
49023 + depends on X86_32
49024 +
49025 + help
49026 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
49027 + make use of a special execution mode on 32bit x86 processors called
49028 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
49029 + video cards and will still work with this option enabled. The purpose
49030 + of the option is to prevent exploitation of emulation errors in
49031 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
49032 + Nearly all users should be able to enable this option.
49033 +
49034 +config GRKERNSEC_IO
49035 + bool "Disable privileged I/O"
49036 + depends on X86
49037 + select RTC_CLASS
49038 + select RTC_INTF_DEV
49039 + select RTC_DRV_CMOS
49040 +
49041 + help
49042 + If you say Y here, all ioperm and iopl calls will return an error.
49043 + Ioperm and iopl can be used to modify the running kernel.
49044 + Unfortunately, some programs need this access to operate properly,
49045 + the most notable of which are XFree86 and hwclock. hwclock can be
49046 + remedied by having RTC support in the kernel, so real-time
49047 + clock support is enabled if this option is enabled, to ensure
49048 + that hwclock operates correctly. XFree86 still will not
49049 + operate correctly with this option enabled, so DO NOT CHOOSE Y
49050 + IF YOU USE XFree86. If you use XFree86 and you still want to
49051 + protect your kernel against modification, use the RBAC system.
49052 +
49053 +config GRKERNSEC_PROC_MEMMAP
49054 + bool "Harden ASLR against information leaks and entropy reduction"
49055 + default y if (PAX_NOEXEC || PAX_ASLR)
49056 + depends on PAX_NOEXEC || PAX_ASLR
49057 + help
49058 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
49059 + give no information about the addresses of its mappings if
49060 + PaX features that rely on random addresses are enabled on the task.
49061 + In addition to sanitizing this information and disabling other
49062 + dangerous sources of information, this option causes reads of sensitive
49063 + /proc/<pid> entries where the file descriptor was opened in a different
49064 + task than the one performing the read. Such attempts are logged.
49065 + This option also limits argv/env strings for suid/sgid binaries
49066 + to 512KB to prevent a complete exhaustion of the stack entropy provided
49067 + by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
49068 + binaries to prevent alternative mmap layouts from being abused.
49069 +
49070 + If you use PaX it is essential that you say Y here as it closes up
49071 + several holes that make full ASLR useless locally.
49072 +
49073 +config GRKERNSEC_BRUTE
49074 + bool "Deter exploit bruteforcing"
49075 + help
49076 + If you say Y here, attempts to bruteforce exploits against forking
49077 + daemons such as apache or sshd, as well as against suid/sgid binaries
49078 + will be deterred. When a child of a forking daemon is killed by PaX
49079 + or crashes due to an illegal instruction or other suspicious signal,
49080 + the parent process will be delayed 30 seconds upon every subsequent
49081 + fork until the administrator is able to assess the situation and
49082 + restart the daemon.
49083 + In the suid/sgid case, the attempt is logged, the user has all their
49084 + processes terminated, and they are prevented from executing any further
49085 + processes for 15 minutes.
49086 + It is recommended that you also enable signal logging in the auditing
49087 + section so that logs are generated when a process triggers a suspicious
49088 + signal.
49089 + If the sysctl option is enabled, a sysctl option with name
49090 + "deter_bruteforce" is created.
49091 +
49092 +
49093 +config GRKERNSEC_MODHARDEN
49094 + bool "Harden module auto-loading"
49095 + depends on MODULES
49096 + help
49097 + If you say Y here, module auto-loading in response to use of some
49098 + feature implemented by an unloaded module will be restricted to
49099 + root users. Enabling this option helps defend against attacks
49100 + by unprivileged users who abuse the auto-loading behavior to
49101 + cause a vulnerable module to load that is then exploited.
49102 +
49103 + If this option prevents a legitimate use of auto-loading for a
49104 + non-root user, the administrator can execute modprobe manually
49105 + with the exact name of the module mentioned in the alert log.
49106 + Alternatively, the administrator can add the module to the list
49107 + of modules loaded at boot by modifying init scripts.
49108 +
49109 + Modification of init scripts will most likely be needed on
49110 + Ubuntu servers with encrypted home directory support enabled,
49111 + as the first non-root user logging in will cause the ecb(aes),
49112 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
49113 +
49114 +config GRKERNSEC_HIDESYM
49115 + bool "Hide kernel symbols"
49116 + help
49117 + If you say Y here, getting information on loaded modules, and
49118 + displaying all kernel symbols through a syscall will be restricted
49119 + to users with CAP_SYS_MODULE. For software compatibility reasons,
49120 + /proc/kallsyms will be restricted to the root user. The RBAC
49121 + system can hide that entry even from root.
49122 +
49123 + This option also prevents leaking of kernel addresses through
49124 + several /proc entries.
49125 +
49126 + Note that this option is only effective provided the following
49127 + conditions are met:
49128 + 1) The kernel using grsecurity is not precompiled by some distribution
49129 + 2) You have also enabled GRKERNSEC_DMESG
49130 + 3) You are using the RBAC system and hiding other files such as your
49131 + kernel image and System.map. Alternatively, enabling this option
49132 + causes the permissions on /boot, /lib/modules, and the kernel
49133 + source directory to change at compile time to prevent
49134 + reading by non-root users.
49135 + If the above conditions are met, this option will aid in providing a
49136 + useful protection against local kernel exploitation of overflows
49137 + and arbitrary read/write vulnerabilities.
49138 +
49139 +config GRKERNSEC_KERN_LOCKOUT
49140 + bool "Active kernel exploit response"
49141 + depends on X86 || ARM || PPC || SPARC
49142 + help
49143 + If you say Y here, when a PaX alert is triggered due to suspicious
49144 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
49145 + or an OOPs occurs due to bad memory accesses, instead of just
49146 + terminating the offending process (and potentially allowing
49147 + a subsequent exploit from the same user), we will take one of two
49148 + actions:
49149 + If the user was root, we will panic the system
49150 + If the user was non-root, we will log the attempt, terminate
49151 + all processes owned by the user, then prevent them from creating
49152 + any new processes until the system is restarted
49153 + This deters repeated kernel exploitation/bruteforcing attempts
49154 + and is useful for later forensics.
49155 +
49156 +endmenu
49157 +menu "Role Based Access Control Options"
49158 +depends on GRKERNSEC
49159 +
49160 +config GRKERNSEC_RBAC_DEBUG
49161 + bool
49162 +
49163 +config GRKERNSEC_NO_RBAC
49164 + bool "Disable RBAC system"
49165 + help
49166 + If you say Y here, the /dev/grsec device will be removed from the kernel,
49167 + preventing the RBAC system from being enabled. You should only say Y
49168 + here if you have no intention of using the RBAC system, so as to prevent
49169 + an attacker with root access from misusing the RBAC system to hide files
49170 + and processes when loadable module support and /dev/[k]mem have been
49171 + locked down.
49172 +
49173 +config GRKERNSEC_ACL_HIDEKERN
49174 + bool "Hide kernel processes"
49175 + help
49176 + If you say Y here, all kernel threads will be hidden to all
49177 + processes but those whose subject has the "view hidden processes"
49178 + flag.
49179 +
49180 +config GRKERNSEC_ACL_MAXTRIES
49181 + int "Maximum tries before password lockout"
49182 + default 3
49183 + help
49184 + This option enforces the maximum number of times a user can attempt
49185 + to authorize themselves with the grsecurity RBAC system before being
49186 + denied the ability to attempt authorization again for a specified time.
49187 + The lower the number, the harder it will be to brute-force a password.
49188 +
49189 +config GRKERNSEC_ACL_TIMEOUT
49190 + int "Time to wait after max password tries, in seconds"
49191 + default 30
49192 + help
49193 + This option specifies the time the user must wait after attempting to
49194 + authorize to the RBAC system with the maximum number of invalid
49195 + passwords. The higher the number, the harder it will be to brute-force
49196 + a password.
49197 +
49198 +endmenu
49199 +menu "Filesystem Protections"
49200 +depends on GRKERNSEC
49201 +
49202 +config GRKERNSEC_PROC
49203 + bool "Proc restrictions"
49204 + help
49205 + If you say Y here, the permissions of the /proc filesystem
49206 + will be altered to enhance system security and privacy. You MUST
49207 + choose either a user only restriction or a user and group restriction.
49208 + Depending upon the option you choose, you can either restrict users to
49209 + see only the processes they themselves run, or choose a group that can
49210 + view all processes and files normally restricted to root if you choose
49211 + the "restrict to user only" option. NOTE: If you're running identd or
49212 + ntpd as a non-root user, you will have to run it as the group you
49213 + specify here.
49214 +
49215 +config GRKERNSEC_PROC_USER
49216 + bool "Restrict /proc to user only"
49217 + depends on GRKERNSEC_PROC
49218 + help
49219 + If you say Y here, non-root users will only be able to view their own
49220 + processes, and restricts them from viewing network-related information,
49221 + and viewing kernel symbol and module information.
49222 +
49223 +config GRKERNSEC_PROC_USERGROUP
49224 + bool "Allow special group"
49225 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
49226 + help
49227 + If you say Y here, you will be able to select a group that will be
49228 + able to view all processes and network-related information. If you've
49229 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
49230 + remain hidden. This option is useful if you want to run identd as
49231 + a non-root user.
49232 +
49233 +config GRKERNSEC_PROC_GID
49234 + int "GID for special group"
49235 + depends on GRKERNSEC_PROC_USERGROUP
49236 + default 1001
49237 +
49238 +config GRKERNSEC_PROC_ADD
49239 + bool "Additional restrictions"
49240 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
49241 + help
49242 + If you say Y here, additional restrictions will be placed on
49243 + /proc that keep normal users from viewing device information and
49244 + slabinfo information that could be useful for exploits.
49245 +
49246 +config GRKERNSEC_LINK
49247 + bool "Linking restrictions"
49248 + help
49249 + If you say Y here, /tmp race exploits will be prevented, since users
49250 + will no longer be able to follow symlinks owned by other users in
49251 + world-writable +t directories (e.g. /tmp), unless the owner of the
49252 + symlink is the owner of the directory. users will also not be
49253 + able to hardlink to files they do not own. If the sysctl option is
49254 + enabled, a sysctl option with name "linking_restrictions" is created.
49255 +
49256 +config GRKERNSEC_FIFO
49257 + bool "FIFO restrictions"
49258 + help
49259 + If you say Y here, users will not be able to write to FIFOs they don't
49260 + own in world-writable +t directories (e.g. /tmp), unless the owner of
49261 + the FIFO is the same owner of the directory it's held in. If the sysctl
49262 + option is enabled, a sysctl option with name "fifo_restrictions" is
49263 + created.
49264 +
49265 +config GRKERNSEC_SYSFS_RESTRICT
49266 + bool "Sysfs/debugfs restriction"
49267 + depends on SYSFS
49268 + help
49269 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
49270 + any filesystem normally mounted under it (e.g. debugfs) will be
49271 + mostly accessible only by root. These filesystems generally provide access
49272 + to hardware and debug information that isn't appropriate for unprivileged
49273 + users of the system. Sysfs and debugfs have also become a large source
49274 + of new vulnerabilities, ranging from infoleaks to local compromise.
49275 + There has been very little oversight with an eye toward security involved
49276 + in adding new exporters of information to these filesystems, so their
49277 + use is discouraged.
49278 + For reasons of compatibility, a few directories have been whitelisted
49279 + for access by non-root users:
49280 + /sys/fs/selinux
49281 + /sys/fs/fuse
49282 + /sys/devices/system/cpu
49283 +
49284 +config GRKERNSEC_ROFS
49285 + bool "Runtime read-only mount protection"
49286 + help
49287 + If you say Y here, a sysctl option with name "romount_protect" will
49288 + be created. By setting this option to 1 at runtime, filesystems
49289 + will be protected in the following ways:
49290 + * No new writable mounts will be allowed
49291 + * Existing read-only mounts won't be able to be remounted read/write
49292 + * Write operations will be denied on all block devices
49293 + This option acts independently of grsec_lock: once it is set to 1,
49294 + it cannot be turned off. Therefore, please be mindful of the resulting
49295 + behavior if this option is enabled in an init script on a read-only
49296 + filesystem. This feature is mainly intended for secure embedded systems.
49297 +
49298 +config GRKERNSEC_CHROOT
49299 + bool "Chroot jail restrictions"
49300 + help
49301 + If you say Y here, you will be able to choose several options that will
49302 + make breaking out of a chrooted jail much more difficult. If you
49303 + encounter no software incompatibilities with the following options, it
49304 + is recommended that you enable each one.
49305 +
49306 +config GRKERNSEC_CHROOT_MOUNT
49307 + bool "Deny mounts"
49308 + depends on GRKERNSEC_CHROOT
49309 + help
49310 + If you say Y here, processes inside a chroot will not be able to
49311 + mount or remount filesystems. If the sysctl option is enabled, a
49312 + sysctl option with name "chroot_deny_mount" is created.
49313 +
49314 +config GRKERNSEC_CHROOT_DOUBLE
49315 + bool "Deny double-chroots"
49316 + depends on GRKERNSEC_CHROOT
49317 + help
49318 + If you say Y here, processes inside a chroot will not be able to chroot
49319 + again outside the chroot. This is a widely used method of breaking
49320 + out of a chroot jail and should not be allowed. If the sysctl
49321 + option is enabled, a sysctl option with name
49322 + "chroot_deny_chroot" is created.
49323 +
49324 +config GRKERNSEC_CHROOT_PIVOT
49325 + bool "Deny pivot_root in chroot"
49326 + depends on GRKERNSEC_CHROOT
49327 + help
49328 + If you say Y here, processes inside a chroot will not be able to use
49329 + a function called pivot_root() that was introduced in Linux 2.3.41. It
49330 + works similar to chroot in that it changes the root filesystem. This
49331 + function could be misused in a chrooted process to attempt to break out
49332 + of the chroot, and therefore should not be allowed. If the sysctl
49333 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
49334 + created.
49335 +
49336 +config GRKERNSEC_CHROOT_CHDIR
49337 + bool "Enforce chdir(\"/\") on all chroots"
49338 + depends on GRKERNSEC_CHROOT
49339 + help
49340 + If you say Y here, the current working directory of all newly-chrooted
49341 + applications will be set to the the root directory of the chroot.
49342 + The man page on chroot(2) states:
49343 + Note that this call does not change the current working
49344 + directory, so that `.' can be outside the tree rooted at
49345 + `/'. In particular, the super-user can escape from a
49346 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
49347 +
49348 + It is recommended that you say Y here, since it's not known to break
49349 + any software. If the sysctl option is enabled, a sysctl option with
49350 + name "chroot_enforce_chdir" is created.
49351 +
49352 +config GRKERNSEC_CHROOT_CHMOD
49353 + bool "Deny (f)chmod +s"
49354 + depends on GRKERNSEC_CHROOT
49355 + help
49356 + If you say Y here, processes inside a chroot will not be able to chmod
49357 + or fchmod files to make them have suid or sgid bits. This protects
49358 + against another published method of breaking a chroot. If the sysctl
49359 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
49360 + created.
49361 +
49362 +config GRKERNSEC_CHROOT_FCHDIR
49363 + bool "Deny fchdir out of chroot"
49364 + depends on GRKERNSEC_CHROOT
49365 + help
49366 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
49367 + to a file descriptor of the chrooting process that points to a directory
49368 + outside the filesystem will be stopped. If the sysctl option
49369 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
49370 +
49371 +config GRKERNSEC_CHROOT_MKNOD
49372 + bool "Deny mknod"
49373 + depends on GRKERNSEC_CHROOT
49374 + help
49375 + If you say Y here, processes inside a chroot will not be allowed to
49376 + mknod. The problem with using mknod inside a chroot is that it
49377 + would allow an attacker to create a device entry that is the same
49378 + as one on the physical root of your system, which could range from
49379 + anything from the console device to a device for your harddrive (which
49380 + they could then use to wipe the drive or steal data). It is recommended
49381 + that you say Y here, unless you run into software incompatibilities.
49382 + If the sysctl option is enabled, a sysctl option with name
49383 + "chroot_deny_mknod" is created.
49384 +
49385 +config GRKERNSEC_CHROOT_SHMAT
49386 + bool "Deny shmat() out of chroot"
49387 + depends on GRKERNSEC_CHROOT
49388 + help
49389 + If you say Y here, processes inside a chroot will not be able to attach
49390 + to shared memory segments that were created outside of the chroot jail.
49391 + It is recommended that you say Y here. If the sysctl option is enabled,
49392 + a sysctl option with name "chroot_deny_shmat" is created.
49393 +
49394 +config GRKERNSEC_CHROOT_UNIX
49395 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
49396 + depends on GRKERNSEC_CHROOT
49397 + help
49398 + If you say Y here, processes inside a chroot will not be able to
49399 + connect to abstract (meaning not belonging to a filesystem) Unix
49400 + domain sockets that were bound outside of a chroot. It is recommended
49401 + that you say Y here. If the sysctl option is enabled, a sysctl option
49402 + with name "chroot_deny_unix" is created.
49403 +
49404 +config GRKERNSEC_CHROOT_FINDTASK
49405 + bool "Protect outside processes"
49406 + depends on GRKERNSEC_CHROOT
49407 + help
49408 + If you say Y here, processes inside a chroot will not be able to
49409 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
49410 + getsid, or view any process outside of the chroot. If the sysctl
49411 + option is enabled, a sysctl option with name "chroot_findtask" is
49412 + created.
49413 +
49414 +config GRKERNSEC_CHROOT_NICE
49415 + bool "Restrict priority changes"
49416 + depends on GRKERNSEC_CHROOT
49417 + help
49418 + If you say Y here, processes inside a chroot will not be able to raise
49419 + the priority of processes in the chroot, or alter the priority of
49420 + processes outside the chroot. This provides more security than simply
49421 + removing CAP_SYS_NICE from the process' capability set. If the
49422 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
49423 + is created.
49424 +
49425 +config GRKERNSEC_CHROOT_SYSCTL
49426 + bool "Deny sysctl writes"
49427 + depends on GRKERNSEC_CHROOT
49428 + help
49429 + If you say Y here, an attacker in a chroot will not be able to
49430 + write to sysctl entries, either by sysctl(2) or through a /proc
49431 + interface. It is strongly recommended that you say Y here. If the
49432 + sysctl option is enabled, a sysctl option with name
49433 + "chroot_deny_sysctl" is created.
49434 +
49435 +config GRKERNSEC_CHROOT_CAPS
49436 + bool "Capability restrictions"
49437 + depends on GRKERNSEC_CHROOT
49438 + help
49439 + If you say Y here, the capabilities on all processes within a
49440 + chroot jail will be lowered to stop module insertion, raw i/o,
49441 + system and net admin tasks, rebooting the system, modifying immutable
49442 + files, modifying IPC owned by another, and changing the system time.
49443 + This is left an option because it can break some apps. Disable this
49444 + if your chrooted apps are having problems performing those kinds of
49445 + tasks. If the sysctl option is enabled, a sysctl option with
49446 + name "chroot_caps" is created.
49447 +
49448 +endmenu
49449 +menu "Kernel Auditing"
49450 +depends on GRKERNSEC
49451 +
49452 +config GRKERNSEC_AUDIT_GROUP
49453 + bool "Single group for auditing"
49454 + help
49455 + If you say Y here, the exec, chdir, and (un)mount logging features
49456 + will only operate on a group you specify. This option is recommended
49457 + if you only want to watch certain users instead of having a large
49458 + amount of logs from the entire system. If the sysctl option is enabled,
49459 + a sysctl option with name "audit_group" is created.
49460 +
49461 +config GRKERNSEC_AUDIT_GID
49462 + int "GID for auditing"
49463 + depends on GRKERNSEC_AUDIT_GROUP
49464 + default 1007
49465 +
49466 +config GRKERNSEC_EXECLOG
49467 + bool "Exec logging"
49468 + help
49469 + If you say Y here, all execve() calls will be logged (since the
49470 + other exec*() calls are frontends to execve(), all execution
49471 + will be logged). Useful for shell-servers that like to keep track
49472 + of their users. If the sysctl option is enabled, a sysctl option with
49473 + name "exec_logging" is created.
49474 + WARNING: This option when enabled will produce a LOT of logs, especially
49475 + on an active system.
49476 +
49477 +config GRKERNSEC_RESLOG
49478 + bool "Resource logging"
49479 + help
49480 + If you say Y here, all attempts to overstep resource limits will
49481 + be logged with the resource name, the requested size, and the current
49482 + limit. It is highly recommended that you say Y here. If the sysctl
49483 + option is enabled, a sysctl option with name "resource_logging" is
49484 + created. If the RBAC system is enabled, the sysctl value is ignored.
49485 +
49486 +config GRKERNSEC_CHROOT_EXECLOG
49487 + bool "Log execs within chroot"
49488 + help
49489 + If you say Y here, all executions inside a chroot jail will be logged
49490 + to syslog. This can cause a large amount of logs if certain
49491 + applications (eg. djb's daemontools) are installed on the system, and
49492 + is therefore left as an option. If the sysctl option is enabled, a
49493 + sysctl option with name "chroot_execlog" is created.
49494 +
49495 +config GRKERNSEC_AUDIT_PTRACE
49496 + bool "Ptrace logging"
49497 + help
49498 + If you say Y here, all attempts to attach to a process via ptrace
49499 + will be logged. If the sysctl option is enabled, a sysctl option
49500 + with name "audit_ptrace" is created.
49501 +
49502 +config GRKERNSEC_AUDIT_CHDIR
49503 + bool "Chdir logging"
49504 + help
49505 + If you say Y here, all chdir() calls will be logged. If the sysctl
49506 + option is enabled, a sysctl option with name "audit_chdir" is created.
49507 +
49508 +config GRKERNSEC_AUDIT_MOUNT
49509 + bool "(Un)Mount logging"
49510 + help
49511 + If you say Y here, all mounts and unmounts will be logged. If the
49512 + sysctl option is enabled, a sysctl option with name "audit_mount" is
49513 + created.
49514 +
49515 +config GRKERNSEC_SIGNAL
49516 + bool "Signal logging"
49517 + help
49518 + If you say Y here, certain important signals will be logged, such as
49519 + SIGSEGV, which will as a result inform you of when a error in a program
49520 + occurred, which in some cases could mean a possible exploit attempt.
49521 + If the sysctl option is enabled, a sysctl option with name
49522 + "signal_logging" is created.
49523 +
49524 +config GRKERNSEC_FORKFAIL
49525 + bool "Fork failure logging"
49526 + help
49527 + If you say Y here, all failed fork() attempts will be logged.
49528 + This could suggest a fork bomb, or someone attempting to overstep
49529 + their process limit. If the sysctl option is enabled, a sysctl option
49530 + with name "forkfail_logging" is created.
49531 +
49532 +config GRKERNSEC_TIME
49533 + bool "Time change logging"
49534 + help
49535 + If you say Y here, any changes of the system clock will be logged.
49536 + If the sysctl option is enabled, a sysctl option with name
49537 + "timechange_logging" is created.
49538 +
49539 +config GRKERNSEC_PROC_IPADDR
49540 + bool "/proc/<pid>/ipaddr support"
49541 + help
49542 + If you say Y here, a new entry will be added to each /proc/<pid>
49543 + directory that contains the IP address of the person using the task.
49544 + The IP is carried across local TCP and AF_UNIX stream sockets.
49545 + This information can be useful for IDS/IPSes to perform remote response
49546 + to a local attack. The entry is readable by only the owner of the
49547 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
49548 + the RBAC system), and thus does not create privacy concerns.
49549 +
49550 +config GRKERNSEC_RWXMAP_LOG
49551 + bool 'Denied RWX mmap/mprotect logging'
49552 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
49553 + help
49554 + If you say Y here, calls to mmap() and mprotect() with explicit
49555 + usage of PROT_WRITE and PROT_EXEC together will be logged when
49556 + denied by the PAX_MPROTECT feature. If the sysctl option is
49557 + enabled, a sysctl option with name "rwxmap_logging" is created.
49558 +
49559 +config GRKERNSEC_AUDIT_TEXTREL
49560 + bool 'ELF text relocations logging (READ HELP)'
49561 + depends on PAX_MPROTECT
49562 + help
49563 + If you say Y here, text relocations will be logged with the filename
49564 + of the offending library or binary. The purpose of the feature is
49565 + to help Linux distribution developers get rid of libraries and
49566 + binaries that need text relocations which hinder the future progress
49567 + of PaX. Only Linux distribution developers should say Y here, and
49568 + never on a production machine, as this option creates an information
49569 + leak that could aid an attacker in defeating the randomization of
49570 + a single memory region. If the sysctl option is enabled, a sysctl
49571 + option with name "audit_textrel" is created.
49572 +
49573 +endmenu
49574 +
49575 +menu "Executable Protections"
49576 +depends on GRKERNSEC
49577 +
49578 +config GRKERNSEC_DMESG
49579 + bool "Dmesg(8) restriction"
49580 + help
49581 + If you say Y here, non-root users will not be able to use dmesg(8)
49582 + to view up to the last 4kb of messages in the kernel's log buffer.
49583 + The kernel's log buffer often contains kernel addresses and other
49584 + identifying information useful to an attacker in fingerprinting a
49585 + system for a targeted exploit.
49586 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
49587 + created.
49588 +
49589 +config GRKERNSEC_HARDEN_PTRACE
49590 + bool "Deter ptrace-based process snooping"
49591 + help
49592 + If you say Y here, TTY sniffers and other malicious monitoring
49593 + programs implemented through ptrace will be defeated. If you
49594 + have been using the RBAC system, this option has already been
49595 + enabled for several years for all users, with the ability to make
49596 + fine-grained exceptions.
49597 +
49598 + This option only affects the ability of non-root users to ptrace
49599 + processes that are not a descendent of the ptracing process.
49600 + This means that strace ./binary and gdb ./binary will still work,
49601 + but attaching to arbitrary processes will not. If the sysctl
49602 + option is enabled, a sysctl option with name "harden_ptrace" is
49603 + created.
49604 +
49605 +config GRKERNSEC_PTRACE_READEXEC
49606 + bool "Require read access to ptrace sensitive binaries"
49607 + help
49608 + If you say Y here, unprivileged users will not be able to ptrace unreadable
49609 + binaries. This option is useful in environments that
49610 + remove the read bits (e.g. file mode 4711) from suid binaries to
49611 + prevent infoleaking of their contents. This option adds
49612 + consistency to the use of that file mode, as the binary could normally
49613 + be read out when run without privileges while ptracing.
49614 +
49615 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
49616 + is created.
49617 +
49618 +config GRKERNSEC_SETXID
49619 + bool "Enforce consistent multithreaded privileges"
49620 + help
49621 + If you say Y here, a change from a root uid to a non-root uid
49622 + in a multithreaded application will cause the resulting uids,
49623 + gids, supplementary groups, and capabilities in that thread
49624 + to be propagated to the other threads of the process. In most
49625 + cases this is unnecessary, as glibc will emulate this behavior
49626 + on behalf of the application. Other libcs do not act in the
49627 + same way, allowing the other threads of the process to continue
49628 + running with root privileges. If the sysctl option is enabled,
49629 + a sysctl option with name "consistent_setxid" is created.
49630 +
49631 +config GRKERNSEC_TPE
49632 + bool "Trusted Path Execution (TPE)"
49633 + help
49634 + If you say Y here, you will be able to choose a gid to add to the
49635 + supplementary groups of users you want to mark as "untrusted."
49636 + These users will not be able to execute any files that are not in
49637 + root-owned directories writable only by root. If the sysctl option
49638 + is enabled, a sysctl option with name "tpe" is created.
49639 +
49640 +config GRKERNSEC_TPE_ALL
49641 + bool "Partially restrict all non-root users"
49642 + depends on GRKERNSEC_TPE
49643 + help
49644 + If you say Y here, all non-root users will be covered under
49645 + a weaker TPE restriction. This is separate from, and in addition to,
49646 + the main TPE options that you have selected elsewhere. Thus, if a
49647 + "trusted" GID is chosen, this restriction applies to even that GID.
49648 + Under this restriction, all non-root users will only be allowed to
49649 + execute files in directories they own that are not group or
49650 + world-writable, or in directories owned by root and writable only by
49651 + root. If the sysctl option is enabled, a sysctl option with name
49652 + "tpe_restrict_all" is created.
49653 +
49654 +config GRKERNSEC_TPE_INVERT
49655 + bool "Invert GID option"
49656 + depends on GRKERNSEC_TPE
49657 + help
49658 + If you say Y here, the group you specify in the TPE configuration will
49659 + decide what group TPE restrictions will be *disabled* for. This
49660 + option is useful if you want TPE restrictions to be applied to most
49661 + users on the system. If the sysctl option is enabled, a sysctl option
49662 + with name "tpe_invert" is created. Unlike other sysctl options, this
49663 + entry will default to on for backward-compatibility.
49664 +
49665 +config GRKERNSEC_TPE_GID
49666 + int "GID for untrusted users"
49667 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
49668 + default 1005
49669 + help
49670 + Setting this GID determines what group TPE restrictions will be
49671 + *enabled* for. If the sysctl option is enabled, a sysctl option
49672 + with name "tpe_gid" is created.
49673 +
49674 +config GRKERNSEC_TPE_GID
49675 + int "GID for trusted users"
49676 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
49677 + default 1005
49678 + help
49679 + Setting this GID determines what group TPE restrictions will be
49680 + *disabled* for. If the sysctl option is enabled, a sysctl option
49681 + with name "tpe_gid" is created.
49682 +
49683 +endmenu
49684 +menu "Network Protections"
49685 +depends on GRKERNSEC
49686 +
49687 +config GRKERNSEC_RANDNET
49688 + bool "Larger entropy pools"
49689 + help
49690 + If you say Y here, the entropy pools used for many features of Linux
49691 + and grsecurity will be doubled in size. Since several grsecurity
49692 + features use additional randomness, it is recommended that you say Y
49693 + here. Saying Y here has a similar effect as modifying
49694 + /proc/sys/kernel/random/poolsize.
49695 +
49696 +config GRKERNSEC_BLACKHOLE
49697 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
49698 + depends on NET
49699 + help
49700 + If you say Y here, neither TCP resets nor ICMP
49701 + destination-unreachable packets will be sent in response to packets
49702 + sent to ports for which no associated listening process exists.
49703 + This feature supports both IPV4 and IPV6 and exempts the
49704 + loopback interface from blackholing. Enabling this feature
49705 + makes a host more resilient to DoS attacks and reduces network
49706 + visibility against scanners.
49707 +
49708 + The blackhole feature as-implemented is equivalent to the FreeBSD
49709 + blackhole feature, as it prevents RST responses to all packets, not
49710 + just SYNs. Under most application behavior this causes no
49711 + problems, but applications (like haproxy) may not close certain
49712 + connections in a way that cleanly terminates them on the remote
49713 + end, leaving the remote host in LAST_ACK state. Because of this
49714 + side-effect and to prevent intentional LAST_ACK DoSes, this
49715 + feature also adds automatic mitigation against such attacks.
49716 + The mitigation drastically reduces the amount of time a socket
49717 + can spend in LAST_ACK state. If you're using haproxy and not
49718 + all servers it connects to have this option enabled, consider
49719 + disabling this feature on the haproxy host.
49720 +
49721 + If the sysctl option is enabled, two sysctl options with names
49722 + "ip_blackhole" and "lastack_retries" will be created.
49723 + While "ip_blackhole" takes the standard zero/non-zero on/off
49724 + toggle, "lastack_retries" uses the same kinds of values as
49725 + "tcp_retries1" and "tcp_retries2". The default value of 4
49726 + prevents a socket from lasting more than 45 seconds in LAST_ACK
49727 + state.
49728 +
49729 +config GRKERNSEC_SOCKET
49730 + bool "Socket restrictions"
49731 + depends on NET
49732 + help
49733 + If you say Y here, you will be able to choose from several options.
49734 + If you assign a GID on your system and add it to the supplementary
49735 + groups of users you want to restrict socket access to, this patch
49736 + will perform up to three things, based on the option(s) you choose.
49737 +
49738 +config GRKERNSEC_SOCKET_ALL
49739 + bool "Deny any sockets to group"
49740 + depends on GRKERNSEC_SOCKET
49741 + help
49742 + If you say Y here, you will be able to choose a GID of whose users will
49743 + be unable to connect to other hosts from your machine or run server
49744 + applications from your machine. If the sysctl option is enabled, a
49745 + sysctl option with name "socket_all" is created.
49746 +
49747 +config GRKERNSEC_SOCKET_ALL_GID
49748 + int "GID to deny all sockets for"
49749 + depends on GRKERNSEC_SOCKET_ALL
49750 + default 1004
49751 + help
49752 + Here you can choose the GID to disable socket access for. Remember to
49753 + add the users you want socket access disabled for to the GID
49754 + specified here. If the sysctl option is enabled, a sysctl option
49755 + with name "socket_all_gid" is created.
49756 +
49757 +config GRKERNSEC_SOCKET_CLIENT
49758 + bool "Deny client sockets to group"
49759 + depends on GRKERNSEC_SOCKET
49760 + help
49761 + If you say Y here, you will be able to choose a GID of whose users will
49762 + be unable to connect to other hosts from your machine, but will be
49763 + able to run servers. If this option is enabled, all users in the group
49764 + you specify will have to use passive mode when initiating ftp transfers
49765 + from the shell on your machine. If the sysctl option is enabled, a
49766 + sysctl option with name "socket_client" is created.
49767 +
49768 +config GRKERNSEC_SOCKET_CLIENT_GID
49769 + int "GID to deny client sockets for"
49770 + depends on GRKERNSEC_SOCKET_CLIENT
49771 + default 1003
49772 + help
49773 + Here you can choose the GID to disable client socket access for.
49774 + Remember to add the users you want client socket access disabled for to
49775 + the GID specified here. If the sysctl option is enabled, a sysctl
49776 + option with name "socket_client_gid" is created.
49777 +
49778 +config GRKERNSEC_SOCKET_SERVER
49779 + bool "Deny server sockets to group"
49780 + depends on GRKERNSEC_SOCKET
49781 + help
49782 + If you say Y here, you will be able to choose a GID of whose users will
49783 + be unable to run server applications from your machine. If the sysctl
49784 + option is enabled, a sysctl option with name "socket_server" is created.
49785 +
49786 +config GRKERNSEC_SOCKET_SERVER_GID
49787 + int "GID to deny server sockets for"
49788 + depends on GRKERNSEC_SOCKET_SERVER
49789 + default 1002
49790 + help
49791 + Here you can choose the GID to disable server socket access for.
49792 + Remember to add the users you want server socket access disabled for to
49793 + the GID specified here. If the sysctl option is enabled, a sysctl
49794 + option with name "socket_server_gid" is created.
49795 +
49796 +endmenu
49797 +menu "Sysctl support"
49798 +depends on GRKERNSEC && SYSCTL
49799 +
49800 +config GRKERNSEC_SYSCTL
49801 + bool "Sysctl support"
49802 + help
49803 + If you say Y here, you will be able to change the options that
49804 + grsecurity runs with at bootup, without having to recompile your
49805 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
49806 + to enable (1) or disable (0) various features. All the sysctl entries
49807 + are mutable until the "grsec_lock" entry is set to a non-zero value.
49808 + All features enabled in the kernel configuration are disabled at boot
49809 + if you do not say Y to the "Turn on features by default" option.
49810 + All options should be set at startup, and the grsec_lock entry should
49811 + be set to a non-zero value after all the options are set.
49812 + *THIS IS EXTREMELY IMPORTANT*
49813 +
49814 +config GRKERNSEC_SYSCTL_DISTRO
49815 + bool "Extra sysctl support for distro makers (READ HELP)"
49816 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
49817 + help
49818 + If you say Y here, additional sysctl options will be created
49819 + for features that affect processes running as root. Therefore,
49820 + it is critical when using this option that the grsec_lock entry be
49821 + enabled after boot. Only distros with prebuilt kernel packages
49822 + with this option enabled that can ensure grsec_lock is enabled
49823 + after boot should use this option.
49824 + *Failure to set grsec_lock after boot makes all grsec features
49825 + this option covers useless*
49826 +
49827 + Currently this option creates the following sysctl entries:
49828 + "Disable Privileged I/O": "disable_priv_io"
49829 +
49830 +config GRKERNSEC_SYSCTL_ON
49831 + bool "Turn on features by default"
49832 + depends on GRKERNSEC_SYSCTL
49833 + help
49834 + If you say Y here, instead of having all features enabled in the
49835 + kernel configuration disabled at boot time, the features will be
49836 + enabled at boot time. It is recommended you say Y here unless
49837 + there is some reason you would want all sysctl-tunable features to
49838 + be disabled by default. As mentioned elsewhere, it is important
49839 + to enable the grsec_lock entry once you have finished modifying
49840 + the sysctl entries.
49841 +
49842 +endmenu
49843 +menu "Logging Options"
49844 +depends on GRKERNSEC
49845 +
49846 +config GRKERNSEC_FLOODTIME
49847 + int "Seconds in between log messages (minimum)"
49848 + default 10
49849 + help
49850 + This option allows you to enforce the number of seconds between
49851 + grsecurity log messages. The default should be suitable for most
49852 + people, however, if you choose to change it, choose a value small enough
49853 + to allow informative logs to be produced, but large enough to
49854 + prevent flooding.
49855 +
49856 +config GRKERNSEC_FLOODBURST
49857 + int "Number of messages in a burst (maximum)"
49858 + default 6
49859 + help
49860 + This option allows you to choose the maximum number of messages allowed
49861 + within the flood time interval you chose in a separate option. The
49862 + default should be suitable for most people, however if you find that
49863 + many of your logs are being interpreted as flooding, you may want to
49864 + raise this value.
49865 +
49866 +endmenu
49867 +
49868 +endmenu
49869 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
49870 new file mode 100644
49871 index 0000000..1b9afa9
49872 --- /dev/null
49873 +++ b/grsecurity/Makefile
49874 @@ -0,0 +1,38 @@
49875 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
49876 +# during 2001-2009 it has been completely redesigned by Brad Spengler
49877 +# into an RBAC system
49878 +#
49879 +# All code in this directory and various hooks inserted throughout the kernel
49880 +# are copyright Brad Spengler - Open Source Security, Inc., and released
49881 +# under the GPL v2 or higher
49882 +
49883 +KBUILD_CFLAGS += -Werror
49884 +
49885 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
49886 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
49887 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
49888 +
49889 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
49890 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
49891 + gracl_learn.o grsec_log.o
49892 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
49893 +
49894 +ifdef CONFIG_NET
49895 +obj-y += grsec_sock.o
49896 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
49897 +endif
49898 +
49899 +ifndef CONFIG_GRKERNSEC
49900 +obj-y += grsec_disabled.o
49901 +endif
49902 +
49903 +ifdef CONFIG_GRKERNSEC_HIDESYM
49904 +extra-y := grsec_hidesym.o
49905 +$(obj)/grsec_hidesym.o:
49906 + @-chmod -f 500 /boot
49907 + @-chmod -f 500 /lib/modules
49908 + @-chmod -f 500 /lib64/modules
49909 + @-chmod -f 500 /lib32/modules
49910 + @-chmod -f 700 .
49911 + @echo ' grsec: protected kernel image paths'
49912 +endif
49913 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
49914 new file mode 100644
49915 index 0000000..2733872
49916 --- /dev/null
49917 +++ b/grsecurity/gracl.c
49918 @@ -0,0 +1,4163 @@
49919 +#include <linux/kernel.h>
49920 +#include <linux/module.h>
49921 +#include <linux/sched.h>
49922 +#include <linux/mm.h>
49923 +#include <linux/file.h>
49924 +#include <linux/fs.h>
49925 +#include <linux/namei.h>
49926 +#include <linux/mount.h>
49927 +#include <linux/tty.h>
49928 +#include <linux/proc_fs.h>
49929 +#include <linux/lglock.h>
49930 +#include <linux/slab.h>
49931 +#include <linux/vmalloc.h>
49932 +#include <linux/types.h>
49933 +#include <linux/sysctl.h>
49934 +#include <linux/netdevice.h>
49935 +#include <linux/ptrace.h>
49936 +#include <linux/gracl.h>
49937 +#include <linux/gralloc.h>
49938 +#include <linux/security.h>
49939 +#include <linux/grinternal.h>
49940 +#include <linux/pid_namespace.h>
49941 +#include <linux/fdtable.h>
49942 +#include <linux/percpu.h>
49943 +
49944 +#include <asm/uaccess.h>
49945 +#include <asm/errno.h>
49946 +#include <asm/mman.h>
49947 +
49948 +static struct acl_role_db acl_role_set;
49949 +static struct name_db name_set;
49950 +static struct inodev_db inodev_set;
49951 +
49952 +/* for keeping track of userspace pointers used for subjects, so we
49953 + can share references in the kernel as well
49954 +*/
49955 +
49956 +static struct path real_root;
49957 +
49958 +static struct acl_subj_map_db subj_map_set;
49959 +
49960 +static struct acl_role_label *default_role;
49961 +
49962 +static struct acl_role_label *role_list;
49963 +
49964 +static u16 acl_sp_role_value;
49965 +
49966 +extern char *gr_shared_page[4];
49967 +static DEFINE_MUTEX(gr_dev_mutex);
49968 +DEFINE_RWLOCK(gr_inode_lock);
49969 +
49970 +struct gr_arg *gr_usermode;
49971 +
49972 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
49973 +
49974 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
49975 +extern void gr_clear_learn_entries(void);
49976 +
49977 +#ifdef CONFIG_GRKERNSEC_RESLOG
49978 +extern void gr_log_resource(const struct task_struct *task,
49979 + const int res, const unsigned long wanted, const int gt);
49980 +#endif
49981 +
49982 +unsigned char *gr_system_salt;
49983 +unsigned char *gr_system_sum;
49984 +
49985 +static struct sprole_pw **acl_special_roles = NULL;
49986 +static __u16 num_sprole_pws = 0;
49987 +
49988 +static struct acl_role_label *kernel_role = NULL;
49989 +
49990 +static unsigned int gr_auth_attempts = 0;
49991 +static unsigned long gr_auth_expires = 0UL;
49992 +
49993 +#ifdef CONFIG_NET
49994 +extern struct vfsmount *sock_mnt;
49995 +#endif
49996 +
49997 +extern struct vfsmount *pipe_mnt;
49998 +extern struct vfsmount *shm_mnt;
49999 +#ifdef CONFIG_HUGETLBFS
50000 +extern struct vfsmount *hugetlbfs_vfsmount;
50001 +#endif
50002 +
50003 +static struct acl_object_label *fakefs_obj_rw;
50004 +static struct acl_object_label *fakefs_obj_rwx;
50005 +
50006 +extern int gr_init_uidset(void);
50007 +extern void gr_free_uidset(void);
50008 +extern void gr_remove_uid(uid_t uid);
50009 +extern int gr_find_uid(uid_t uid);
50010 +
50011 +DECLARE_BRLOCK(vfsmount_lock);
50012 +
50013 +__inline__ int
50014 +gr_acl_is_enabled(void)
50015 +{
50016 + return (gr_status & GR_READY);
50017 +}
50018 +
50019 +#ifdef CONFIG_BTRFS_FS
50020 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
50021 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
50022 +#endif
50023 +
50024 +static inline dev_t __get_dev(const struct dentry *dentry)
50025 +{
50026 +#ifdef CONFIG_BTRFS_FS
50027 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
50028 + return get_btrfs_dev_from_inode(dentry->d_inode);
50029 + else
50030 +#endif
50031 + return dentry->d_inode->i_sb->s_dev;
50032 +}
50033 +
50034 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50035 +{
50036 + return __get_dev(dentry);
50037 +}
50038 +
50039 +static char gr_task_roletype_to_char(struct task_struct *task)
50040 +{
50041 + switch (task->role->roletype &
50042 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
50043 + GR_ROLE_SPECIAL)) {
50044 + case GR_ROLE_DEFAULT:
50045 + return 'D';
50046 + case GR_ROLE_USER:
50047 + return 'U';
50048 + case GR_ROLE_GROUP:
50049 + return 'G';
50050 + case GR_ROLE_SPECIAL:
50051 + return 'S';
50052 + }
50053 +
50054 + return 'X';
50055 +}
50056 +
50057 +char gr_roletype_to_char(void)
50058 +{
50059 + return gr_task_roletype_to_char(current);
50060 +}
50061 +
50062 +__inline__ int
50063 +gr_acl_tpe_check(void)
50064 +{
50065 + if (unlikely(!(gr_status & GR_READY)))
50066 + return 0;
50067 + if (current->role->roletype & GR_ROLE_TPE)
50068 + return 1;
50069 + else
50070 + return 0;
50071 +}
50072 +
50073 +int
50074 +gr_handle_rawio(const struct inode *inode)
50075 +{
50076 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50077 + if (inode && S_ISBLK(inode->i_mode) &&
50078 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
50079 + !capable(CAP_SYS_RAWIO))
50080 + return 1;
50081 +#endif
50082 + return 0;
50083 +}
50084 +
50085 +static int
50086 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
50087 +{
50088 + if (likely(lena != lenb))
50089 + return 0;
50090 +
50091 + return !memcmp(a, b, lena);
50092 +}
50093 +
50094 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
50095 +{
50096 + *buflen -= namelen;
50097 + if (*buflen < 0)
50098 + return -ENAMETOOLONG;
50099 + *buffer -= namelen;
50100 + memcpy(*buffer, str, namelen);
50101 + return 0;
50102 +}
50103 +
50104 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
50105 +{
50106 + return prepend(buffer, buflen, name->name, name->len);
50107 +}
50108 +
50109 +static int prepend_path(const struct path *path, struct path *root,
50110 + char **buffer, int *buflen)
50111 +{
50112 + struct dentry *dentry = path->dentry;
50113 + struct vfsmount *vfsmnt = path->mnt;
50114 + bool slash = false;
50115 + int error = 0;
50116 +
50117 + while (dentry != root->dentry || vfsmnt != root->mnt) {
50118 + struct dentry * parent;
50119 +
50120 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
50121 + /* Global root? */
50122 + if (vfsmnt->mnt_parent == vfsmnt) {
50123 + goto out;
50124 + }
50125 + dentry = vfsmnt->mnt_mountpoint;
50126 + vfsmnt = vfsmnt->mnt_parent;
50127 + continue;
50128 + }
50129 + parent = dentry->d_parent;
50130 + prefetch(parent);
50131 + spin_lock(&dentry->d_lock);
50132 + error = prepend_name(buffer, buflen, &dentry->d_name);
50133 + spin_unlock(&dentry->d_lock);
50134 + if (!error)
50135 + error = prepend(buffer, buflen, "/", 1);
50136 + if (error)
50137 + break;
50138 +
50139 + slash = true;
50140 + dentry = parent;
50141 + }
50142 +
50143 +out:
50144 + if (!error && !slash)
50145 + error = prepend(buffer, buflen, "/", 1);
50146 +
50147 + return error;
50148 +}
50149 +
50150 +/* this must be called with vfsmount_lock and rename_lock held */
50151 +
50152 +static char *__our_d_path(const struct path *path, struct path *root,
50153 + char *buf, int buflen)
50154 +{
50155 + char *res = buf + buflen;
50156 + int error;
50157 +
50158 + prepend(&res, &buflen, "\0", 1);
50159 + error = prepend_path(path, root, &res, &buflen);
50160 + if (error)
50161 + return ERR_PTR(error);
50162 +
50163 + return res;
50164 +}
50165 +
50166 +static char *
50167 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
50168 +{
50169 + char *retval;
50170 +
50171 + retval = __our_d_path(path, root, buf, buflen);
50172 + if (unlikely(IS_ERR(retval)))
50173 + retval = strcpy(buf, "<path too long>");
50174 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
50175 + retval[1] = '\0';
50176 +
50177 + return retval;
50178 +}
50179 +
50180 +static char *
50181 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50182 + char *buf, int buflen)
50183 +{
50184 + struct path path;
50185 + char *res;
50186 +
50187 + path.dentry = (struct dentry *)dentry;
50188 + path.mnt = (struct vfsmount *)vfsmnt;
50189 +
50190 + /* we can use real_root.dentry, real_root.mnt, because this is only called
50191 + by the RBAC system */
50192 + res = gen_full_path(&path, &real_root, buf, buflen);
50193 +
50194 + return res;
50195 +}
50196 +
50197 +static char *
50198 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50199 + char *buf, int buflen)
50200 +{
50201 + char *res;
50202 + struct path path;
50203 + struct path root;
50204 + struct task_struct *reaper = &init_task;
50205 +
50206 + path.dentry = (struct dentry *)dentry;
50207 + path.mnt = (struct vfsmount *)vfsmnt;
50208 +
50209 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
50210 + get_fs_root(reaper->fs, &root);
50211 +
50212 + write_seqlock(&rename_lock);
50213 + br_read_lock(vfsmount_lock);
50214 + res = gen_full_path(&path, &root, buf, buflen);
50215 + br_read_unlock(vfsmount_lock);
50216 + write_sequnlock(&rename_lock);
50217 +
50218 + path_put(&root);
50219 + return res;
50220 +}
50221 +
50222 +static char *
50223 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50224 +{
50225 + char *ret;
50226 + write_seqlock(&rename_lock);
50227 + br_read_lock(vfsmount_lock);
50228 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50229 + PAGE_SIZE);
50230 + br_read_unlock(vfsmount_lock);
50231 + write_sequnlock(&rename_lock);
50232 + return ret;
50233 +}
50234 +
50235 +static char *
50236 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50237 +{
50238 + char *ret;
50239 + char *buf;
50240 + int buflen;
50241 +
50242 + write_seqlock(&rename_lock);
50243 + br_read_lock(vfsmount_lock);
50244 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50245 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
50246 + buflen = (int)(ret - buf);
50247 + if (buflen >= 5)
50248 + prepend(&ret, &buflen, "/proc", 5);
50249 + else
50250 + ret = strcpy(buf, "<path too long>");
50251 + br_read_unlock(vfsmount_lock);
50252 + write_sequnlock(&rename_lock);
50253 + return ret;
50254 +}
50255 +
50256 +char *
50257 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
50258 +{
50259 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50260 + PAGE_SIZE);
50261 +}
50262 +
50263 +char *
50264 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
50265 +{
50266 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
50267 + PAGE_SIZE);
50268 +}
50269 +
50270 +char *
50271 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
50272 +{
50273 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
50274 + PAGE_SIZE);
50275 +}
50276 +
50277 +char *
50278 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
50279 +{
50280 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
50281 + PAGE_SIZE);
50282 +}
50283 +
50284 +char *
50285 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
50286 +{
50287 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
50288 + PAGE_SIZE);
50289 +}
50290 +
50291 +__inline__ __u32
50292 +to_gr_audit(const __u32 reqmode)
50293 +{
50294 + /* masks off auditable permission flags, then shifts them to create
50295 + auditing flags, and adds the special case of append auditing if
50296 + we're requesting write */
50297 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
50298 +}
50299 +
50300 +struct acl_subject_label *
50301 +lookup_subject_map(const struct acl_subject_label *userp)
50302 +{
50303 + unsigned int index = shash(userp, subj_map_set.s_size);
50304 + struct subject_map *match;
50305 +
50306 + match = subj_map_set.s_hash[index];
50307 +
50308 + while (match && match->user != userp)
50309 + match = match->next;
50310 +
50311 + if (match != NULL)
50312 + return match->kernel;
50313 + else
50314 + return NULL;
50315 +}
50316 +
50317 +static void
50318 +insert_subj_map_entry(struct subject_map *subjmap)
50319 +{
50320 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
50321 + struct subject_map **curr;
50322 +
50323 + subjmap->prev = NULL;
50324 +
50325 + curr = &subj_map_set.s_hash[index];
50326 + if (*curr != NULL)
50327 + (*curr)->prev = subjmap;
50328 +
50329 + subjmap->next = *curr;
50330 + *curr = subjmap;
50331 +
50332 + return;
50333 +}
50334 +
50335 +static struct acl_role_label *
50336 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
50337 + const gid_t gid)
50338 +{
50339 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
50340 + struct acl_role_label *match;
50341 + struct role_allowed_ip *ipp;
50342 + unsigned int x;
50343 + u32 curr_ip = task->signal->curr_ip;
50344 +
50345 + task->signal->saved_ip = curr_ip;
50346 +
50347 + match = acl_role_set.r_hash[index];
50348 +
50349 + while (match) {
50350 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
50351 + for (x = 0; x < match->domain_child_num; x++) {
50352 + if (match->domain_children[x] == uid)
50353 + goto found;
50354 + }
50355 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
50356 + break;
50357 + match = match->next;
50358 + }
50359 +found:
50360 + if (match == NULL) {
50361 + try_group:
50362 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
50363 + match = acl_role_set.r_hash[index];
50364 +
50365 + while (match) {
50366 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
50367 + for (x = 0; x < match->domain_child_num; x++) {
50368 + if (match->domain_children[x] == gid)
50369 + goto found2;
50370 + }
50371 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
50372 + break;
50373 + match = match->next;
50374 + }
50375 +found2:
50376 + if (match == NULL)
50377 + match = default_role;
50378 + if (match->allowed_ips == NULL)
50379 + return match;
50380 + else {
50381 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
50382 + if (likely
50383 + ((ntohl(curr_ip) & ipp->netmask) ==
50384 + (ntohl(ipp->addr) & ipp->netmask)))
50385 + return match;
50386 + }
50387 + match = default_role;
50388 + }
50389 + } else if (match->allowed_ips == NULL) {
50390 + return match;
50391 + } else {
50392 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
50393 + if (likely
50394 + ((ntohl(curr_ip) & ipp->netmask) ==
50395 + (ntohl(ipp->addr) & ipp->netmask)))
50396 + return match;
50397 + }
50398 + goto try_group;
50399 + }
50400 +
50401 + return match;
50402 +}
50403 +
50404 +struct acl_subject_label *
50405 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
50406 + const struct acl_role_label *role)
50407 +{
50408 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
50409 + struct acl_subject_label *match;
50410 +
50411 + match = role->subj_hash[index];
50412 +
50413 + while (match && (match->inode != ino || match->device != dev ||
50414 + (match->mode & GR_DELETED))) {
50415 + match = match->next;
50416 + }
50417 +
50418 + if (match && !(match->mode & GR_DELETED))
50419 + return match;
50420 + else
50421 + return NULL;
50422 +}
50423 +
50424 +struct acl_subject_label *
50425 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
50426 + const struct acl_role_label *role)
50427 +{
50428 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
50429 + struct acl_subject_label *match;
50430 +
50431 + match = role->subj_hash[index];
50432 +
50433 + while (match && (match->inode != ino || match->device != dev ||
50434 + !(match->mode & GR_DELETED))) {
50435 + match = match->next;
50436 + }
50437 +
50438 + if (match && (match->mode & GR_DELETED))
50439 + return match;
50440 + else
50441 + return NULL;
50442 +}
50443 +
50444 +static struct acl_object_label *
50445 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
50446 + const struct acl_subject_label *subj)
50447 +{
50448 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
50449 + struct acl_object_label *match;
50450 +
50451 + match = subj->obj_hash[index];
50452 +
50453 + while (match && (match->inode != ino || match->device != dev ||
50454 + (match->mode & GR_DELETED))) {
50455 + match = match->next;
50456 + }
50457 +
50458 + if (match && !(match->mode & GR_DELETED))
50459 + return match;
50460 + else
50461 + return NULL;
50462 +}
50463 +
50464 +static struct acl_object_label *
50465 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
50466 + const struct acl_subject_label *subj)
50467 +{
50468 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
50469 + struct acl_object_label *match;
50470 +
50471 + match = subj->obj_hash[index];
50472 +
50473 + while (match && (match->inode != ino || match->device != dev ||
50474 + !(match->mode & GR_DELETED))) {
50475 + match = match->next;
50476 + }
50477 +
50478 + if (match && (match->mode & GR_DELETED))
50479 + return match;
50480 +
50481 + match = subj->obj_hash[index];
50482 +
50483 + while (match && (match->inode != ino || match->device != dev ||
50484 + (match->mode & GR_DELETED))) {
50485 + match = match->next;
50486 + }
50487 +
50488 + if (match && !(match->mode & GR_DELETED))
50489 + return match;
50490 + else
50491 + return NULL;
50492 +}
50493 +
50494 +static struct name_entry *
50495 +lookup_name_entry(const char *name)
50496 +{
50497 + unsigned int len = strlen(name);
50498 + unsigned int key = full_name_hash(name, len);
50499 + unsigned int index = key % name_set.n_size;
50500 + struct name_entry *match;
50501 +
50502 + match = name_set.n_hash[index];
50503 +
50504 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
50505 + match = match->next;
50506 +
50507 + return match;
50508 +}
50509 +
50510 +static struct name_entry *
50511 +lookup_name_entry_create(const char *name)
50512 +{
50513 + unsigned int len = strlen(name);
50514 + unsigned int key = full_name_hash(name, len);
50515 + unsigned int index = key % name_set.n_size;
50516 + struct name_entry *match;
50517 +
50518 + match = name_set.n_hash[index];
50519 +
50520 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
50521 + !match->deleted))
50522 + match = match->next;
50523 +
50524 + if (match && match->deleted)
50525 + return match;
50526 +
50527 + match = name_set.n_hash[index];
50528 +
50529 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
50530 + match->deleted))
50531 + match = match->next;
50532 +
50533 + if (match && !match->deleted)
50534 + return match;
50535 + else
50536 + return NULL;
50537 +}
50538 +
50539 +static struct inodev_entry *
50540 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
50541 +{
50542 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
50543 + struct inodev_entry *match;
50544 +
50545 + match = inodev_set.i_hash[index];
50546 +
50547 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
50548 + match = match->next;
50549 +
50550 + return match;
50551 +}
50552 +
50553 +static void
50554 +insert_inodev_entry(struct inodev_entry *entry)
50555 +{
50556 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
50557 + inodev_set.i_size);
50558 + struct inodev_entry **curr;
50559 +
50560 + entry->prev = NULL;
50561 +
50562 + curr = &inodev_set.i_hash[index];
50563 + if (*curr != NULL)
50564 + (*curr)->prev = entry;
50565 +
50566 + entry->next = *curr;
50567 + *curr = entry;
50568 +
50569 + return;
50570 +}
50571 +
50572 +static void
50573 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
50574 +{
50575 + unsigned int index =
50576 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
50577 + struct acl_role_label **curr;
50578 + struct acl_role_label *tmp;
50579 +
50580 + curr = &acl_role_set.r_hash[index];
50581 +
50582 + /* if role was already inserted due to domains and already has
50583 + a role in the same bucket as it attached, then we need to
50584 + combine these two buckets
50585 + */
50586 + if (role->next) {
50587 + tmp = role->next;
50588 + while (tmp->next)
50589 + tmp = tmp->next;
50590 + tmp->next = *curr;
50591 + } else
50592 + role->next = *curr;
50593 + *curr = role;
50594 +
50595 + return;
50596 +}
50597 +
50598 +static void
50599 +insert_acl_role_label(struct acl_role_label *role)
50600 +{
50601 + int i;
50602 +
50603 + if (role_list == NULL) {
50604 + role_list = role;
50605 + role->prev = NULL;
50606 + } else {
50607 + role->prev = role_list;
50608 + role_list = role;
50609 + }
50610 +
50611 + /* used for hash chains */
50612 + role->next = NULL;
50613 +
50614 + if (role->roletype & GR_ROLE_DOMAIN) {
50615 + for (i = 0; i < role->domain_child_num; i++)
50616 + __insert_acl_role_label(role, role->domain_children[i]);
50617 + } else
50618 + __insert_acl_role_label(role, role->uidgid);
50619 +}
50620 +
50621 +static int
50622 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
50623 +{
50624 + struct name_entry **curr, *nentry;
50625 + struct inodev_entry *ientry;
50626 + unsigned int len = strlen(name);
50627 + unsigned int key = full_name_hash(name, len);
50628 + unsigned int index = key % name_set.n_size;
50629 +
50630 + curr = &name_set.n_hash[index];
50631 +
50632 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
50633 + curr = &((*curr)->next);
50634 +
50635 + if (*curr != NULL)
50636 + return 1;
50637 +
50638 + nentry = acl_alloc(sizeof (struct name_entry));
50639 + if (nentry == NULL)
50640 + return 0;
50641 + ientry = acl_alloc(sizeof (struct inodev_entry));
50642 + if (ientry == NULL)
50643 + return 0;
50644 + ientry->nentry = nentry;
50645 +
50646 + nentry->key = key;
50647 + nentry->name = name;
50648 + nentry->inode = inode;
50649 + nentry->device = device;
50650 + nentry->len = len;
50651 + nentry->deleted = deleted;
50652 +
50653 + nentry->prev = NULL;
50654 + curr = &name_set.n_hash[index];
50655 + if (*curr != NULL)
50656 + (*curr)->prev = nentry;
50657 + nentry->next = *curr;
50658 + *curr = nentry;
50659 +
50660 + /* insert us into the table searchable by inode/dev */
50661 + insert_inodev_entry(ientry);
50662 +
50663 + return 1;
50664 +}
50665 +
50666 +static void
50667 +insert_acl_obj_label(struct acl_object_label *obj,
50668 + struct acl_subject_label *subj)
50669 +{
50670 + unsigned int index =
50671 + fhash(obj->inode, obj->device, subj->obj_hash_size);
50672 + struct acl_object_label **curr;
50673 +
50674 +
50675 + obj->prev = NULL;
50676 +
50677 + curr = &subj->obj_hash[index];
50678 + if (*curr != NULL)
50679 + (*curr)->prev = obj;
50680 +
50681 + obj->next = *curr;
50682 + *curr = obj;
50683 +
50684 + return;
50685 +}
50686 +
50687 +static void
50688 +insert_acl_subj_label(struct acl_subject_label *obj,
50689 + struct acl_role_label *role)
50690 +{
50691 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
50692 + struct acl_subject_label **curr;
50693 +
50694 + obj->prev = NULL;
50695 +
50696 + curr = &role->subj_hash[index];
50697 + if (*curr != NULL)
50698 + (*curr)->prev = obj;
50699 +
50700 + obj->next = *curr;
50701 + *curr = obj;
50702 +
50703 + return;
50704 +}
50705 +
50706 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
50707 +
50708 +static void *
50709 +create_table(__u32 * len, int elementsize)
50710 +{
50711 + unsigned int table_sizes[] = {
50712 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
50713 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
50714 + 4194301, 8388593, 16777213, 33554393, 67108859
50715 + };
50716 + void *newtable = NULL;
50717 + unsigned int pwr = 0;
50718 +
50719 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
50720 + table_sizes[pwr] <= *len)
50721 + pwr++;
50722 +
50723 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
50724 + return newtable;
50725 +
50726 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
50727 + newtable =
50728 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
50729 + else
50730 + newtable = vmalloc(table_sizes[pwr] * elementsize);
50731 +
50732 + *len = table_sizes[pwr];
50733 +
50734 + return newtable;
50735 +}
50736 +
50737 +static int
50738 +init_variables(const struct gr_arg *arg)
50739 +{
50740 + struct task_struct *reaper = &init_task;
50741 + unsigned int stacksize;
50742 +
50743 + subj_map_set.s_size = arg->role_db.num_subjects;
50744 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
50745 + name_set.n_size = arg->role_db.num_objects;
50746 + inodev_set.i_size = arg->role_db.num_objects;
50747 +
50748 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
50749 + !name_set.n_size || !inodev_set.i_size)
50750 + return 1;
50751 +
50752 + if (!gr_init_uidset())
50753 + return 1;
50754 +
50755 + /* set up the stack that holds allocation info */
50756 +
50757 + stacksize = arg->role_db.num_pointers + 5;
50758 +
50759 + if (!acl_alloc_stack_init(stacksize))
50760 + return 1;
50761 +
50762 + /* grab reference for the real root dentry and vfsmount */
50763 + get_fs_root(reaper->fs, &real_root);
50764 +
50765 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50766 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
50767 +#endif
50768 +
50769 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
50770 + if (fakefs_obj_rw == NULL)
50771 + return 1;
50772 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
50773 +
50774 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
50775 + if (fakefs_obj_rwx == NULL)
50776 + return 1;
50777 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
50778 +
50779 + subj_map_set.s_hash =
50780 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
50781 + acl_role_set.r_hash =
50782 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
50783 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
50784 + inodev_set.i_hash =
50785 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
50786 +
50787 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
50788 + !name_set.n_hash || !inodev_set.i_hash)
50789 + return 1;
50790 +
50791 + memset(subj_map_set.s_hash, 0,
50792 + sizeof(struct subject_map *) * subj_map_set.s_size);
50793 + memset(acl_role_set.r_hash, 0,
50794 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
50795 + memset(name_set.n_hash, 0,
50796 + sizeof (struct name_entry *) * name_set.n_size);
50797 + memset(inodev_set.i_hash, 0,
50798 + sizeof (struct inodev_entry *) * inodev_set.i_size);
50799 +
50800 + return 0;
50801 +}
50802 +
50803 +/* free information not needed after startup
50804 + currently contains user->kernel pointer mappings for subjects
50805 +*/
50806 +
50807 +static void
50808 +free_init_variables(void)
50809 +{
50810 + __u32 i;
50811 +
50812 + if (subj_map_set.s_hash) {
50813 + for (i = 0; i < subj_map_set.s_size; i++) {
50814 + if (subj_map_set.s_hash[i]) {
50815 + kfree(subj_map_set.s_hash[i]);
50816 + subj_map_set.s_hash[i] = NULL;
50817 + }
50818 + }
50819 +
50820 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
50821 + PAGE_SIZE)
50822 + kfree(subj_map_set.s_hash);
50823 + else
50824 + vfree(subj_map_set.s_hash);
50825 + }
50826 +
50827 + return;
50828 +}
50829 +
50830 +static void
50831 +free_variables(void)
50832 +{
50833 + struct acl_subject_label *s;
50834 + struct acl_role_label *r;
50835 + struct task_struct *task, *task2;
50836 + unsigned int x;
50837 +
50838 + gr_clear_learn_entries();
50839 +
50840 + read_lock(&tasklist_lock);
50841 + do_each_thread(task2, task) {
50842 + task->acl_sp_role = 0;
50843 + task->acl_role_id = 0;
50844 + task->acl = NULL;
50845 + task->role = NULL;
50846 + } while_each_thread(task2, task);
50847 + read_unlock(&tasklist_lock);
50848 +
50849 + /* release the reference to the real root dentry and vfsmount */
50850 + path_put(&real_root);
50851 +
50852 + /* free all object hash tables */
50853 +
50854 + FOR_EACH_ROLE_START(r)
50855 + if (r->subj_hash == NULL)
50856 + goto next_role;
50857 + FOR_EACH_SUBJECT_START(r, s, x)
50858 + if (s->obj_hash == NULL)
50859 + break;
50860 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
50861 + kfree(s->obj_hash);
50862 + else
50863 + vfree(s->obj_hash);
50864 + FOR_EACH_SUBJECT_END(s, x)
50865 + FOR_EACH_NESTED_SUBJECT_START(r, s)
50866 + if (s->obj_hash == NULL)
50867 + break;
50868 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
50869 + kfree(s->obj_hash);
50870 + else
50871 + vfree(s->obj_hash);
50872 + FOR_EACH_NESTED_SUBJECT_END(s)
50873 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
50874 + kfree(r->subj_hash);
50875 + else
50876 + vfree(r->subj_hash);
50877 + r->subj_hash = NULL;
50878 +next_role:
50879 + FOR_EACH_ROLE_END(r)
50880 +
50881 + acl_free_all();
50882 +
50883 + if (acl_role_set.r_hash) {
50884 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
50885 + PAGE_SIZE)
50886 + kfree(acl_role_set.r_hash);
50887 + else
50888 + vfree(acl_role_set.r_hash);
50889 + }
50890 + if (name_set.n_hash) {
50891 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
50892 + PAGE_SIZE)
50893 + kfree(name_set.n_hash);
50894 + else
50895 + vfree(name_set.n_hash);
50896 + }
50897 +
50898 + if (inodev_set.i_hash) {
50899 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
50900 + PAGE_SIZE)
50901 + kfree(inodev_set.i_hash);
50902 + else
50903 + vfree(inodev_set.i_hash);
50904 + }
50905 +
50906 + gr_free_uidset();
50907 +
50908 + memset(&name_set, 0, sizeof (struct name_db));
50909 + memset(&inodev_set, 0, sizeof (struct inodev_db));
50910 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
50911 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
50912 +
50913 + default_role = NULL;
50914 + role_list = NULL;
50915 +
50916 + return;
50917 +}
50918 +
50919 +static __u32
50920 +count_user_objs(struct acl_object_label *userp)
50921 +{
50922 + struct acl_object_label o_tmp;
50923 + __u32 num = 0;
50924 +
50925 + while (userp) {
50926 + if (copy_from_user(&o_tmp, userp,
50927 + sizeof (struct acl_object_label)))
50928 + break;
50929 +
50930 + userp = o_tmp.prev;
50931 + num++;
50932 + }
50933 +
50934 + return num;
50935 +}
50936 +
50937 +static struct acl_subject_label *
50938 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
50939 +
50940 +static int
50941 +copy_user_glob(struct acl_object_label *obj)
50942 +{
50943 + struct acl_object_label *g_tmp, **guser;
50944 + unsigned int len;
50945 + char *tmp;
50946 +
50947 + if (obj->globbed == NULL)
50948 + return 0;
50949 +
50950 + guser = &obj->globbed;
50951 + while (*guser) {
50952 + g_tmp = (struct acl_object_label *)
50953 + acl_alloc(sizeof (struct acl_object_label));
50954 + if (g_tmp == NULL)
50955 + return -ENOMEM;
50956 +
50957 + if (copy_from_user(g_tmp, *guser,
50958 + sizeof (struct acl_object_label)))
50959 + return -EFAULT;
50960 +
50961 + len = strnlen_user(g_tmp->filename, PATH_MAX);
50962 +
50963 + if (!len || len >= PATH_MAX)
50964 + return -EINVAL;
50965 +
50966 + if ((tmp = (char *) acl_alloc(len)) == NULL)
50967 + return -ENOMEM;
50968 +
50969 + if (copy_from_user(tmp, g_tmp->filename, len))
50970 + return -EFAULT;
50971 + tmp[len-1] = '\0';
50972 + g_tmp->filename = tmp;
50973 +
50974 + *guser = g_tmp;
50975 + guser = &(g_tmp->next);
50976 + }
50977 +
50978 + return 0;
50979 +}
50980 +
50981 +static int
50982 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
50983 + struct acl_role_label *role)
50984 +{
50985 + struct acl_object_label *o_tmp;
50986 + unsigned int len;
50987 + int ret;
50988 + char *tmp;
50989 +
50990 + while (userp) {
50991 + if ((o_tmp = (struct acl_object_label *)
50992 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
50993 + return -ENOMEM;
50994 +
50995 + if (copy_from_user(o_tmp, userp,
50996 + sizeof (struct acl_object_label)))
50997 + return -EFAULT;
50998 +
50999 + userp = o_tmp->prev;
51000 +
51001 + len = strnlen_user(o_tmp->filename, PATH_MAX);
51002 +
51003 + if (!len || len >= PATH_MAX)
51004 + return -EINVAL;
51005 +
51006 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51007 + return -ENOMEM;
51008 +
51009 + if (copy_from_user(tmp, o_tmp->filename, len))
51010 + return -EFAULT;
51011 + tmp[len-1] = '\0';
51012 + o_tmp->filename = tmp;
51013 +
51014 + insert_acl_obj_label(o_tmp, subj);
51015 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
51016 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
51017 + return -ENOMEM;
51018 +
51019 + ret = copy_user_glob(o_tmp);
51020 + if (ret)
51021 + return ret;
51022 +
51023 + if (o_tmp->nested) {
51024 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
51025 + if (IS_ERR(o_tmp->nested))
51026 + return PTR_ERR(o_tmp->nested);
51027 +
51028 + /* insert into nested subject list */
51029 + o_tmp->nested->next = role->hash->first;
51030 + role->hash->first = o_tmp->nested;
51031 + }
51032 + }
51033 +
51034 + return 0;
51035 +}
51036 +
51037 +static __u32
51038 +count_user_subjs(struct acl_subject_label *userp)
51039 +{
51040 + struct acl_subject_label s_tmp;
51041 + __u32 num = 0;
51042 +
51043 + while (userp) {
51044 + if (copy_from_user(&s_tmp, userp,
51045 + sizeof (struct acl_subject_label)))
51046 + break;
51047 +
51048 + userp = s_tmp.prev;
51049 + /* do not count nested subjects against this count, since
51050 + they are not included in the hash table, but are
51051 + attached to objects. We have already counted
51052 + the subjects in userspace for the allocation
51053 + stack
51054 + */
51055 + if (!(s_tmp.mode & GR_NESTED))
51056 + num++;
51057 + }
51058 +
51059 + return num;
51060 +}
51061 +
51062 +static int
51063 +copy_user_allowedips(struct acl_role_label *rolep)
51064 +{
51065 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
51066 +
51067 + ruserip = rolep->allowed_ips;
51068 +
51069 + while (ruserip) {
51070 + rlast = rtmp;
51071 +
51072 + if ((rtmp = (struct role_allowed_ip *)
51073 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
51074 + return -ENOMEM;
51075 +
51076 + if (copy_from_user(rtmp, ruserip,
51077 + sizeof (struct role_allowed_ip)))
51078 + return -EFAULT;
51079 +
51080 + ruserip = rtmp->prev;
51081 +
51082 + if (!rlast) {
51083 + rtmp->prev = NULL;
51084 + rolep->allowed_ips = rtmp;
51085 + } else {
51086 + rlast->next = rtmp;
51087 + rtmp->prev = rlast;
51088 + }
51089 +
51090 + if (!ruserip)
51091 + rtmp->next = NULL;
51092 + }
51093 +
51094 + return 0;
51095 +}
51096 +
51097 +static int
51098 +copy_user_transitions(struct acl_role_label *rolep)
51099 +{
51100 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
51101 +
51102 + unsigned int len;
51103 + char *tmp;
51104 +
51105 + rusertp = rolep->transitions;
51106 +
51107 + while (rusertp) {
51108 + rlast = rtmp;
51109 +
51110 + if ((rtmp = (struct role_transition *)
51111 + acl_alloc(sizeof (struct role_transition))) == NULL)
51112 + return -ENOMEM;
51113 +
51114 + if (copy_from_user(rtmp, rusertp,
51115 + sizeof (struct role_transition)))
51116 + return -EFAULT;
51117 +
51118 + rusertp = rtmp->prev;
51119 +
51120 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
51121 +
51122 + if (!len || len >= GR_SPROLE_LEN)
51123 + return -EINVAL;
51124 +
51125 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51126 + return -ENOMEM;
51127 +
51128 + if (copy_from_user(tmp, rtmp->rolename, len))
51129 + return -EFAULT;
51130 + tmp[len-1] = '\0';
51131 + rtmp->rolename = tmp;
51132 +
51133 + if (!rlast) {
51134 + rtmp->prev = NULL;
51135 + rolep->transitions = rtmp;
51136 + } else {
51137 + rlast->next = rtmp;
51138 + rtmp->prev = rlast;
51139 + }
51140 +
51141 + if (!rusertp)
51142 + rtmp->next = NULL;
51143 + }
51144 +
51145 + return 0;
51146 +}
51147 +
51148 +static struct acl_subject_label *
51149 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
51150 +{
51151 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
51152 + unsigned int len;
51153 + char *tmp;
51154 + __u32 num_objs;
51155 + struct acl_ip_label **i_tmp, *i_utmp2;
51156 + struct gr_hash_struct ghash;
51157 + struct subject_map *subjmap;
51158 + unsigned int i_num;
51159 + int err;
51160 +
51161 + s_tmp = lookup_subject_map(userp);
51162 +
51163 + /* we've already copied this subject into the kernel, just return
51164 + the reference to it, and don't copy it over again
51165 + */
51166 + if (s_tmp)
51167 + return(s_tmp);
51168 +
51169 + if ((s_tmp = (struct acl_subject_label *)
51170 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
51171 + return ERR_PTR(-ENOMEM);
51172 +
51173 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
51174 + if (subjmap == NULL)
51175 + return ERR_PTR(-ENOMEM);
51176 +
51177 + subjmap->user = userp;
51178 + subjmap->kernel = s_tmp;
51179 + insert_subj_map_entry(subjmap);
51180 +
51181 + if (copy_from_user(s_tmp, userp,
51182 + sizeof (struct acl_subject_label)))
51183 + return ERR_PTR(-EFAULT);
51184 +
51185 + len = strnlen_user(s_tmp->filename, PATH_MAX);
51186 +
51187 + if (!len || len >= PATH_MAX)
51188 + return ERR_PTR(-EINVAL);
51189 +
51190 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51191 + return ERR_PTR(-ENOMEM);
51192 +
51193 + if (copy_from_user(tmp, s_tmp->filename, len))
51194 + return ERR_PTR(-EFAULT);
51195 + tmp[len-1] = '\0';
51196 + s_tmp->filename = tmp;
51197 +
51198 + if (!strcmp(s_tmp->filename, "/"))
51199 + role->root_label = s_tmp;
51200 +
51201 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
51202 + return ERR_PTR(-EFAULT);
51203 +
51204 + /* copy user and group transition tables */
51205 +
51206 + if (s_tmp->user_trans_num) {
51207 + uid_t *uidlist;
51208 +
51209 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
51210 + if (uidlist == NULL)
51211 + return ERR_PTR(-ENOMEM);
51212 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
51213 + return ERR_PTR(-EFAULT);
51214 +
51215 + s_tmp->user_transitions = uidlist;
51216 + }
51217 +
51218 + if (s_tmp->group_trans_num) {
51219 + gid_t *gidlist;
51220 +
51221 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
51222 + if (gidlist == NULL)
51223 + return ERR_PTR(-ENOMEM);
51224 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
51225 + return ERR_PTR(-EFAULT);
51226 +
51227 + s_tmp->group_transitions = gidlist;
51228 + }
51229 +
51230 + /* set up object hash table */
51231 + num_objs = count_user_objs(ghash.first);
51232 +
51233 + s_tmp->obj_hash_size = num_objs;
51234 + s_tmp->obj_hash =
51235 + (struct acl_object_label **)
51236 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
51237 +
51238 + if (!s_tmp->obj_hash)
51239 + return ERR_PTR(-ENOMEM);
51240 +
51241 + memset(s_tmp->obj_hash, 0,
51242 + s_tmp->obj_hash_size *
51243 + sizeof (struct acl_object_label *));
51244 +
51245 + /* add in objects */
51246 + err = copy_user_objs(ghash.first, s_tmp, role);
51247 +
51248 + if (err)
51249 + return ERR_PTR(err);
51250 +
51251 + /* set pointer for parent subject */
51252 + if (s_tmp->parent_subject) {
51253 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
51254 +
51255 + if (IS_ERR(s_tmp2))
51256 + return s_tmp2;
51257 +
51258 + s_tmp->parent_subject = s_tmp2;
51259 + }
51260 +
51261 + /* add in ip acls */
51262 +
51263 + if (!s_tmp->ip_num) {
51264 + s_tmp->ips = NULL;
51265 + goto insert;
51266 + }
51267 +
51268 + i_tmp =
51269 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
51270 + sizeof (struct acl_ip_label *));
51271 +
51272 + if (!i_tmp)
51273 + return ERR_PTR(-ENOMEM);
51274 +
51275 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
51276 + *(i_tmp + i_num) =
51277 + (struct acl_ip_label *)
51278 + acl_alloc(sizeof (struct acl_ip_label));
51279 + if (!*(i_tmp + i_num))
51280 + return ERR_PTR(-ENOMEM);
51281 +
51282 + if (copy_from_user
51283 + (&i_utmp2, s_tmp->ips + i_num,
51284 + sizeof (struct acl_ip_label *)))
51285 + return ERR_PTR(-EFAULT);
51286 +
51287 + if (copy_from_user
51288 + (*(i_tmp + i_num), i_utmp2,
51289 + sizeof (struct acl_ip_label)))
51290 + return ERR_PTR(-EFAULT);
51291 +
51292 + if ((*(i_tmp + i_num))->iface == NULL)
51293 + continue;
51294 +
51295 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
51296 + if (!len || len >= IFNAMSIZ)
51297 + return ERR_PTR(-EINVAL);
51298 + tmp = acl_alloc(len);
51299 + if (tmp == NULL)
51300 + return ERR_PTR(-ENOMEM);
51301 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
51302 + return ERR_PTR(-EFAULT);
51303 + (*(i_tmp + i_num))->iface = tmp;
51304 + }
51305 +
51306 + s_tmp->ips = i_tmp;
51307 +
51308 +insert:
51309 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
51310 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
51311 + return ERR_PTR(-ENOMEM);
51312 +
51313 + return s_tmp;
51314 +}
51315 +
51316 +static int
51317 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
51318 +{
51319 + struct acl_subject_label s_pre;
51320 + struct acl_subject_label * ret;
51321 + int err;
51322 +
51323 + while (userp) {
51324 + if (copy_from_user(&s_pre, userp,
51325 + sizeof (struct acl_subject_label)))
51326 + return -EFAULT;
51327 +
51328 + /* do not add nested subjects here, add
51329 + while parsing objects
51330 + */
51331 +
51332 + if (s_pre.mode & GR_NESTED) {
51333 + userp = s_pre.prev;
51334 + continue;
51335 + }
51336 +
51337 + ret = do_copy_user_subj(userp, role);
51338 +
51339 + err = PTR_ERR(ret);
51340 + if (IS_ERR(ret))
51341 + return err;
51342 +
51343 + insert_acl_subj_label(ret, role);
51344 +
51345 + userp = s_pre.prev;
51346 + }
51347 +
51348 + return 0;
51349 +}
51350 +
51351 +static int
51352 +copy_user_acl(struct gr_arg *arg)
51353 +{
51354 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
51355 + struct sprole_pw *sptmp;
51356 + struct gr_hash_struct *ghash;
51357 + uid_t *domainlist;
51358 + unsigned int r_num;
51359 + unsigned int len;
51360 + char *tmp;
51361 + int err = 0;
51362 + __u16 i;
51363 + __u32 num_subjs;
51364 +
51365 + /* we need a default and kernel role */
51366 + if (arg->role_db.num_roles < 2)
51367 + return -EINVAL;
51368 +
51369 + /* copy special role authentication info from userspace */
51370 +
51371 + num_sprole_pws = arg->num_sprole_pws;
51372 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
51373 +
51374 + if (!acl_special_roles) {
51375 + err = -ENOMEM;
51376 + goto cleanup;
51377 + }
51378 +
51379 + for (i = 0; i < num_sprole_pws; i++) {
51380 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
51381 + if (!sptmp) {
51382 + err = -ENOMEM;
51383 + goto cleanup;
51384 + }
51385 + if (copy_from_user(sptmp, arg->sprole_pws + i,
51386 + sizeof (struct sprole_pw))) {
51387 + err = -EFAULT;
51388 + goto cleanup;
51389 + }
51390 +
51391 + len =
51392 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
51393 +
51394 + if (!len || len >= GR_SPROLE_LEN) {
51395 + err = -EINVAL;
51396 + goto cleanup;
51397 + }
51398 +
51399 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
51400 + err = -ENOMEM;
51401 + goto cleanup;
51402 + }
51403 +
51404 + if (copy_from_user(tmp, sptmp->rolename, len)) {
51405 + err = -EFAULT;
51406 + goto cleanup;
51407 + }
51408 + tmp[len-1] = '\0';
51409 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51410 + printk(KERN_ALERT "Copying special role %s\n", tmp);
51411 +#endif
51412 + sptmp->rolename = tmp;
51413 + acl_special_roles[i] = sptmp;
51414 + }
51415 +
51416 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
51417 +
51418 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
51419 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
51420 +
51421 + if (!r_tmp) {
51422 + err = -ENOMEM;
51423 + goto cleanup;
51424 + }
51425 +
51426 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
51427 + sizeof (struct acl_role_label *))) {
51428 + err = -EFAULT;
51429 + goto cleanup;
51430 + }
51431 +
51432 + if (copy_from_user(r_tmp, r_utmp2,
51433 + sizeof (struct acl_role_label))) {
51434 + err = -EFAULT;
51435 + goto cleanup;
51436 + }
51437 +
51438 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
51439 +
51440 + if (!len || len >= PATH_MAX) {
51441 + err = -EINVAL;
51442 + goto cleanup;
51443 + }
51444 +
51445 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
51446 + err = -ENOMEM;
51447 + goto cleanup;
51448 + }
51449 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
51450 + err = -EFAULT;
51451 + goto cleanup;
51452 + }
51453 + tmp[len-1] = '\0';
51454 + r_tmp->rolename = tmp;
51455 +
51456 + if (!strcmp(r_tmp->rolename, "default")
51457 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
51458 + default_role = r_tmp;
51459 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
51460 + kernel_role = r_tmp;
51461 + }
51462 +
51463 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
51464 + err = -ENOMEM;
51465 + goto cleanup;
51466 + }
51467 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
51468 + err = -EFAULT;
51469 + goto cleanup;
51470 + }
51471 +
51472 + r_tmp->hash = ghash;
51473 +
51474 + num_subjs = count_user_subjs(r_tmp->hash->first);
51475 +
51476 + r_tmp->subj_hash_size = num_subjs;
51477 + r_tmp->subj_hash =
51478 + (struct acl_subject_label **)
51479 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
51480 +
51481 + if (!r_tmp->subj_hash) {
51482 + err = -ENOMEM;
51483 + goto cleanup;
51484 + }
51485 +
51486 + err = copy_user_allowedips(r_tmp);
51487 + if (err)
51488 + goto cleanup;
51489 +
51490 + /* copy domain info */
51491 + if (r_tmp->domain_children != NULL) {
51492 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
51493 + if (domainlist == NULL) {
51494 + err = -ENOMEM;
51495 + goto cleanup;
51496 + }
51497 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
51498 + err = -EFAULT;
51499 + goto cleanup;
51500 + }
51501 + r_tmp->domain_children = domainlist;
51502 + }
51503 +
51504 + err = copy_user_transitions(r_tmp);
51505 + if (err)
51506 + goto cleanup;
51507 +
51508 + memset(r_tmp->subj_hash, 0,
51509 + r_tmp->subj_hash_size *
51510 + sizeof (struct acl_subject_label *));
51511 +
51512 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
51513 +
51514 + if (err)
51515 + goto cleanup;
51516 +
51517 + /* set nested subject list to null */
51518 + r_tmp->hash->first = NULL;
51519 +
51520 + insert_acl_role_label(r_tmp);
51521 + }
51522 +
51523 + goto return_err;
51524 + cleanup:
51525 + free_variables();
51526 + return_err:
51527 + return err;
51528 +
51529 +}
51530 +
51531 +static int
51532 +gracl_init(struct gr_arg *args)
51533 +{
51534 + int error = 0;
51535 +
51536 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
51537 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
51538 +
51539 + if (init_variables(args)) {
51540 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
51541 + error = -ENOMEM;
51542 + free_variables();
51543 + goto out;
51544 + }
51545 +
51546 + error = copy_user_acl(args);
51547 + free_init_variables();
51548 + if (error) {
51549 + free_variables();
51550 + goto out;
51551 + }
51552 +
51553 + if ((error = gr_set_acls(0))) {
51554 + free_variables();
51555 + goto out;
51556 + }
51557 +
51558 + pax_open_kernel();
51559 + gr_status |= GR_READY;
51560 + pax_close_kernel();
51561 +
51562 + out:
51563 + return error;
51564 +}
51565 +
51566 +/* derived from glibc fnmatch() 0: match, 1: no match*/
51567 +
51568 +static int
51569 +glob_match(const char *p, const char *n)
51570 +{
51571 + char c;
51572 +
51573 + while ((c = *p++) != '\0') {
51574 + switch (c) {
51575 + case '?':
51576 + if (*n == '\0')
51577 + return 1;
51578 + else if (*n == '/')
51579 + return 1;
51580 + break;
51581 + case '\\':
51582 + if (*n != c)
51583 + return 1;
51584 + break;
51585 + case '*':
51586 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
51587 + if (*n == '/')
51588 + return 1;
51589 + else if (c == '?') {
51590 + if (*n == '\0')
51591 + return 1;
51592 + else
51593 + ++n;
51594 + }
51595 + }
51596 + if (c == '\0') {
51597 + return 0;
51598 + } else {
51599 + const char *endp;
51600 +
51601 + if ((endp = strchr(n, '/')) == NULL)
51602 + endp = n + strlen(n);
51603 +
51604 + if (c == '[') {
51605 + for (--p; n < endp; ++n)
51606 + if (!glob_match(p, n))
51607 + return 0;
51608 + } else if (c == '/') {
51609 + while (*n != '\0' && *n != '/')
51610 + ++n;
51611 + if (*n == '/' && !glob_match(p, n + 1))
51612 + return 0;
51613 + } else {
51614 + for (--p; n < endp; ++n)
51615 + if (*n == c && !glob_match(p, n))
51616 + return 0;
51617 + }
51618 +
51619 + return 1;
51620 + }
51621 + case '[':
51622 + {
51623 + int not;
51624 + char cold;
51625 +
51626 + if (*n == '\0' || *n == '/')
51627 + return 1;
51628 +
51629 + not = (*p == '!' || *p == '^');
51630 + if (not)
51631 + ++p;
51632 +
51633 + c = *p++;
51634 + for (;;) {
51635 + unsigned char fn = (unsigned char)*n;
51636 +
51637 + if (c == '\0')
51638 + return 1;
51639 + else {
51640 + if (c == fn)
51641 + goto matched;
51642 + cold = c;
51643 + c = *p++;
51644 +
51645 + if (c == '-' && *p != ']') {
51646 + unsigned char cend = *p++;
51647 +
51648 + if (cend == '\0')
51649 + return 1;
51650 +
51651 + if (cold <= fn && fn <= cend)
51652 + goto matched;
51653 +
51654 + c = *p++;
51655 + }
51656 + }
51657 +
51658 + if (c == ']')
51659 + break;
51660 + }
51661 + if (!not)
51662 + return 1;
51663 + break;
51664 + matched:
51665 + while (c != ']') {
51666 + if (c == '\0')
51667 + return 1;
51668 +
51669 + c = *p++;
51670 + }
51671 + if (not)
51672 + return 1;
51673 + }
51674 + break;
51675 + default:
51676 + if (c != *n)
51677 + return 1;
51678 + }
51679 +
51680 + ++n;
51681 + }
51682 +
51683 + if (*n == '\0')
51684 + return 0;
51685 +
51686 + if (*n == '/')
51687 + return 0;
51688 +
51689 + return 1;
51690 +}
51691 +
51692 +static struct acl_object_label *
51693 +chk_glob_label(struct acl_object_label *globbed,
51694 + const struct dentry *dentry, const struct vfsmount *mnt, char **path)
51695 +{
51696 + struct acl_object_label *tmp;
51697 +
51698 + if (*path == NULL)
51699 + *path = gr_to_filename_nolock(dentry, mnt);
51700 +
51701 + tmp = globbed;
51702 +
51703 + while (tmp) {
51704 + if (!glob_match(tmp->filename, *path))
51705 + return tmp;
51706 + tmp = tmp->next;
51707 + }
51708 +
51709 + return NULL;
51710 +}
51711 +
51712 +static struct acl_object_label *
51713 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
51714 + const ino_t curr_ino, const dev_t curr_dev,
51715 + const struct acl_subject_label *subj, char **path, const int checkglob)
51716 +{
51717 + struct acl_subject_label *tmpsubj;
51718 + struct acl_object_label *retval;
51719 + struct acl_object_label *retval2;
51720 +
51721 + tmpsubj = (struct acl_subject_label *) subj;
51722 + read_lock(&gr_inode_lock);
51723 + do {
51724 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
51725 + if (retval) {
51726 + if (checkglob && retval->globbed) {
51727 + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
51728 + if (retval2)
51729 + retval = retval2;
51730 + }
51731 + break;
51732 + }
51733 + } while ((tmpsubj = tmpsubj->parent_subject));
51734 + read_unlock(&gr_inode_lock);
51735 +
51736 + return retval;
51737 +}
51738 +
51739 +static __inline__ struct acl_object_label *
51740 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
51741 + struct dentry *curr_dentry,
51742 + const struct acl_subject_label *subj, char **path, const int checkglob)
51743 +{
51744 + int newglob = checkglob;
51745 + ino_t inode;
51746 + dev_t device;
51747 +
51748 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
51749 + as we don't want a / * rule to match instead of the / object
51750 + don't do this for create lookups that call this function though, since they're looking up
51751 + on the parent and thus need globbing checks on all paths
51752 + */
51753 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
51754 + newglob = GR_NO_GLOB;
51755 +
51756 + spin_lock(&curr_dentry->d_lock);
51757 + inode = curr_dentry->d_inode->i_ino;
51758 + device = __get_dev(curr_dentry);
51759 + spin_unlock(&curr_dentry->d_lock);
51760 +
51761 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
51762 +}
51763 +
51764 +static struct acl_object_label *
51765 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51766 + const struct acl_subject_label *subj, char *path, const int checkglob)
51767 +{
51768 + struct dentry *dentry = (struct dentry *) l_dentry;
51769 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
51770 + struct acl_object_label *retval;
51771 + struct dentry *parent;
51772 +
51773 + write_seqlock(&rename_lock);
51774 + br_read_lock(vfsmount_lock);
51775 +
51776 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
51777 +#ifdef CONFIG_NET
51778 + mnt == sock_mnt ||
51779 +#endif
51780 +#ifdef CONFIG_HUGETLBFS
51781 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
51782 +#endif
51783 + /* ignore Eric Biederman */
51784 + IS_PRIVATE(l_dentry->d_inode))) {
51785 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
51786 + goto out;
51787 + }
51788 +
51789 + for (;;) {
51790 + if (dentry == real_root.dentry && mnt == real_root.mnt)
51791 + break;
51792 +
51793 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
51794 + if (mnt->mnt_parent == mnt)
51795 + break;
51796 +
51797 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
51798 + if (retval != NULL)
51799 + goto out;
51800 +
51801 + dentry = mnt->mnt_mountpoint;
51802 + mnt = mnt->mnt_parent;
51803 + continue;
51804 + }
51805 +
51806 + parent = dentry->d_parent;
51807 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
51808 + if (retval != NULL)
51809 + goto out;
51810 +
51811 + dentry = parent;
51812 + }
51813 +
51814 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
51815 +
51816 + /* real_root is pinned so we don't have to hold a reference */
51817 + if (retval == NULL)
51818 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
51819 +out:
51820 + br_read_unlock(vfsmount_lock);
51821 + write_sequnlock(&rename_lock);
51822 +
51823 + BUG_ON(retval == NULL);
51824 +
51825 + return retval;
51826 +}
51827 +
51828 +static __inline__ struct acl_object_label *
51829 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51830 + const struct acl_subject_label *subj)
51831 +{
51832 + char *path = NULL;
51833 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
51834 +}
51835 +
51836 +static __inline__ struct acl_object_label *
51837 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51838 + const struct acl_subject_label *subj)
51839 +{
51840 + char *path = NULL;
51841 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
51842 +}
51843 +
51844 +static __inline__ struct acl_object_label *
51845 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51846 + const struct acl_subject_label *subj, char *path)
51847 +{
51848 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
51849 +}
51850 +
51851 +static struct acl_subject_label *
51852 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
51853 + const struct acl_role_label *role)
51854 +{
51855 + struct dentry *dentry = (struct dentry *) l_dentry;
51856 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
51857 + struct acl_subject_label *retval;
51858 + struct dentry *parent;
51859 +
51860 + write_seqlock(&rename_lock);
51861 + br_read_lock(vfsmount_lock);
51862 +
51863 + for (;;) {
51864 + if (dentry == real_root.dentry && mnt == real_root.mnt)
51865 + break;
51866 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
51867 + if (mnt->mnt_parent == mnt)
51868 + break;
51869 +
51870 + spin_lock(&dentry->d_lock);
51871 + read_lock(&gr_inode_lock);
51872 + retval =
51873 + lookup_acl_subj_label(dentry->d_inode->i_ino,
51874 + __get_dev(dentry), role);
51875 + read_unlock(&gr_inode_lock);
51876 + spin_unlock(&dentry->d_lock);
51877 + if (retval != NULL)
51878 + goto out;
51879 +
51880 + dentry = mnt->mnt_mountpoint;
51881 + mnt = mnt->mnt_parent;
51882 + continue;
51883 + }
51884 +
51885 + spin_lock(&dentry->d_lock);
51886 + read_lock(&gr_inode_lock);
51887 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
51888 + __get_dev(dentry), role);
51889 + read_unlock(&gr_inode_lock);
51890 + parent = dentry->d_parent;
51891 + spin_unlock(&dentry->d_lock);
51892 +
51893 + if (retval != NULL)
51894 + goto out;
51895 +
51896 + dentry = parent;
51897 + }
51898 +
51899 + spin_lock(&dentry->d_lock);
51900 + read_lock(&gr_inode_lock);
51901 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
51902 + __get_dev(dentry), role);
51903 + read_unlock(&gr_inode_lock);
51904 + spin_unlock(&dentry->d_lock);
51905 +
51906 + if (unlikely(retval == NULL)) {
51907 + /* real_root is pinned, we don't need to hold a reference */
51908 + read_lock(&gr_inode_lock);
51909 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
51910 + __get_dev(real_root.dentry), role);
51911 + read_unlock(&gr_inode_lock);
51912 + }
51913 +out:
51914 + br_read_unlock(vfsmount_lock);
51915 + write_sequnlock(&rename_lock);
51916 +
51917 + BUG_ON(retval == NULL);
51918 +
51919 + return retval;
51920 +}
51921 +
51922 +static void
51923 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
51924 +{
51925 + struct task_struct *task = current;
51926 + const struct cred *cred = current_cred();
51927 +
51928 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
51929 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
51930 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
51931 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
51932 +
51933 + return;
51934 +}
51935 +
51936 +static void
51937 +gr_log_learn_sysctl(const char *path, const __u32 mode)
51938 +{
51939 + struct task_struct *task = current;
51940 + const struct cred *cred = current_cred();
51941 +
51942 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
51943 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
51944 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
51945 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
51946 +
51947 + return;
51948 +}
51949 +
51950 +static void
51951 +gr_log_learn_id_change(const char type, const unsigned int real,
51952 + const unsigned int effective, const unsigned int fs)
51953 +{
51954 + struct task_struct *task = current;
51955 + const struct cred *cred = current_cred();
51956 +
51957 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
51958 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
51959 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
51960 + type, real, effective, fs, &task->signal->saved_ip);
51961 +
51962 + return;
51963 +}
51964 +
51965 +__u32
51966 +gr_search_file(const struct dentry * dentry, const __u32 mode,
51967 + const struct vfsmount * mnt)
51968 +{
51969 + __u32 retval = mode;
51970 + struct acl_subject_label *curracl;
51971 + struct acl_object_label *currobj;
51972 +
51973 + if (unlikely(!(gr_status & GR_READY)))
51974 + return (mode & ~GR_AUDITS);
51975 +
51976 + curracl = current->acl;
51977 +
51978 + currobj = chk_obj_label(dentry, mnt, curracl);
51979 + retval = currobj->mode & mode;
51980 +
51981 + /* if we're opening a specified transfer file for writing
51982 + (e.g. /dev/initctl), then transfer our role to init
51983 + */
51984 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
51985 + current->role->roletype & GR_ROLE_PERSIST)) {
51986 + struct task_struct *task = init_pid_ns.child_reaper;
51987 +
51988 + if (task->role != current->role) {
51989 + task->acl_sp_role = 0;
51990 + task->acl_role_id = current->acl_role_id;
51991 + task->role = current->role;
51992 + rcu_read_lock();
51993 + read_lock(&grsec_exec_file_lock);
51994 + gr_apply_subject_to_task(task);
51995 + read_unlock(&grsec_exec_file_lock);
51996 + rcu_read_unlock();
51997 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
51998 + }
51999 + }
52000 +
52001 + if (unlikely
52002 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
52003 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
52004 + __u32 new_mode = mode;
52005 +
52006 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52007 +
52008 + retval = new_mode;
52009 +
52010 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
52011 + new_mode |= GR_INHERIT;
52012 +
52013 + if (!(mode & GR_NOLEARN))
52014 + gr_log_learn(dentry, mnt, new_mode);
52015 + }
52016 +
52017 + return retval;
52018 +}
52019 +
52020 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
52021 + const struct dentry *parent,
52022 + const struct vfsmount *mnt)
52023 +{
52024 + struct name_entry *match;
52025 + struct acl_object_label *matchpo;
52026 + struct acl_subject_label *curracl;
52027 + char *path;
52028 +
52029 + if (unlikely(!(gr_status & GR_READY)))
52030 + return NULL;
52031 +
52032 + preempt_disable();
52033 + path = gr_to_filename_rbac(new_dentry, mnt);
52034 + match = lookup_name_entry_create(path);
52035 +
52036 + curracl = current->acl;
52037 +
52038 + if (match) {
52039 + read_lock(&gr_inode_lock);
52040 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
52041 + read_unlock(&gr_inode_lock);
52042 +
52043 + if (matchpo) {
52044 + preempt_enable();
52045 + return matchpo;
52046 + }
52047 + }
52048 +
52049 + // lookup parent
52050 +
52051 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
52052 +
52053 + preempt_enable();
52054 + return matchpo;
52055 +}
52056 +
52057 +__u32
52058 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
52059 + const struct vfsmount * mnt, const __u32 mode)
52060 +{
52061 + struct acl_object_label *matchpo;
52062 + __u32 retval;
52063 +
52064 + if (unlikely(!(gr_status & GR_READY)))
52065 + return (mode & ~GR_AUDITS);
52066 +
52067 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
52068 +
52069 + retval = matchpo->mode & mode;
52070 +
52071 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
52072 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52073 + __u32 new_mode = mode;
52074 +
52075 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52076 +
52077 + gr_log_learn(new_dentry, mnt, new_mode);
52078 + return new_mode;
52079 + }
52080 +
52081 + return retval;
52082 +}
52083 +
52084 +__u32
52085 +gr_check_link(const struct dentry * new_dentry,
52086 + const struct dentry * parent_dentry,
52087 + const struct vfsmount * parent_mnt,
52088 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
52089 +{
52090 + struct acl_object_label *obj;
52091 + __u32 oldmode, newmode;
52092 + __u32 needmode;
52093 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
52094 + GR_DELETE | GR_INHERIT;
52095 +
52096 + if (unlikely(!(gr_status & GR_READY)))
52097 + return (GR_CREATE | GR_LINK);
52098 +
52099 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
52100 + oldmode = obj->mode;
52101 +
52102 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
52103 + newmode = obj->mode;
52104 +
52105 + needmode = newmode & checkmodes;
52106 +
52107 + // old name for hardlink must have at least the permissions of the new name
52108 + if ((oldmode & needmode) != needmode)
52109 + goto bad;
52110 +
52111 + // if old name had restrictions/auditing, make sure the new name does as well
52112 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
52113 +
52114 + // don't allow hardlinking of suid/sgid files without permission
52115 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52116 + needmode |= GR_SETID;
52117 +
52118 + if ((newmode & needmode) != needmode)
52119 + goto bad;
52120 +
52121 + // enforce minimum permissions
52122 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
52123 + return newmode;
52124 +bad:
52125 + needmode = oldmode;
52126 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52127 + needmode |= GR_SETID;
52128 +
52129 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
52130 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
52131 + return (GR_CREATE | GR_LINK);
52132 + } else if (newmode & GR_SUPPRESS)
52133 + return GR_SUPPRESS;
52134 + else
52135 + return 0;
52136 +}
52137 +
52138 +int
52139 +gr_check_hidden_task(const struct task_struct *task)
52140 +{
52141 + if (unlikely(!(gr_status & GR_READY)))
52142 + return 0;
52143 +
52144 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
52145 + return 1;
52146 +
52147 + return 0;
52148 +}
52149 +
52150 +int
52151 +gr_check_protected_task(const struct task_struct *task)
52152 +{
52153 + if (unlikely(!(gr_status & GR_READY) || !task))
52154 + return 0;
52155 +
52156 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52157 + task->acl != current->acl)
52158 + return 1;
52159 +
52160 + return 0;
52161 +}
52162 +
52163 +int
52164 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52165 +{
52166 + struct task_struct *p;
52167 + int ret = 0;
52168 +
52169 + if (unlikely(!(gr_status & GR_READY) || !pid))
52170 + return ret;
52171 +
52172 + read_lock(&tasklist_lock);
52173 + do_each_pid_task(pid, type, p) {
52174 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52175 + p->acl != current->acl) {
52176 + ret = 1;
52177 + goto out;
52178 + }
52179 + } while_each_pid_task(pid, type, p);
52180 +out:
52181 + read_unlock(&tasklist_lock);
52182 +
52183 + return ret;
52184 +}
52185 +
52186 +void
52187 +gr_copy_label(struct task_struct *tsk)
52188 +{
52189 + /* plain copying of fields is already done by dup_task_struct */
52190 + tsk->signal->used_accept = 0;
52191 + tsk->acl_sp_role = 0;
52192 + //tsk->acl_role_id = current->acl_role_id;
52193 + //tsk->acl = current->acl;
52194 + //tsk->role = current->role;
52195 + tsk->signal->curr_ip = current->signal->curr_ip;
52196 + tsk->signal->saved_ip = current->signal->saved_ip;
52197 + if (current->exec_file)
52198 + get_file(current->exec_file);
52199 + //tsk->exec_file = current->exec_file;
52200 + //tsk->is_writable = current->is_writable;
52201 + if (unlikely(current->signal->used_accept)) {
52202 + current->signal->curr_ip = 0;
52203 + current->signal->saved_ip = 0;
52204 + }
52205 +
52206 + return;
52207 +}
52208 +
52209 +static void
52210 +gr_set_proc_res(struct task_struct *task)
52211 +{
52212 + struct acl_subject_label *proc;
52213 + unsigned short i;
52214 +
52215 + proc = task->acl;
52216 +
52217 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
52218 + return;
52219 +
52220 + for (i = 0; i < RLIM_NLIMITS; i++) {
52221 + if (!(proc->resmask & (1 << i)))
52222 + continue;
52223 +
52224 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
52225 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
52226 + }
52227 +
52228 + return;
52229 +}
52230 +
52231 +extern int __gr_process_user_ban(struct user_struct *user);
52232 +
52233 +int
52234 +gr_check_user_change(int real, int effective, int fs)
52235 +{
52236 + unsigned int i;
52237 + __u16 num;
52238 + uid_t *uidlist;
52239 + int curuid;
52240 + int realok = 0;
52241 + int effectiveok = 0;
52242 + int fsok = 0;
52243 +
52244 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52245 + struct user_struct *user;
52246 +
52247 + if (real == -1)
52248 + goto skipit;
52249 +
52250 + user = find_user(real);
52251 + if (user == NULL)
52252 + goto skipit;
52253 +
52254 + if (__gr_process_user_ban(user)) {
52255 + /* for find_user */
52256 + free_uid(user);
52257 + return 1;
52258 + }
52259 +
52260 + /* for find_user */
52261 + free_uid(user);
52262 +
52263 +skipit:
52264 +#endif
52265 +
52266 + if (unlikely(!(gr_status & GR_READY)))
52267 + return 0;
52268 +
52269 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52270 + gr_log_learn_id_change('u', real, effective, fs);
52271 +
52272 + num = current->acl->user_trans_num;
52273 + uidlist = current->acl->user_transitions;
52274 +
52275 + if (uidlist == NULL)
52276 + return 0;
52277 +
52278 + if (real == -1)
52279 + realok = 1;
52280 + if (effective == -1)
52281 + effectiveok = 1;
52282 + if (fs == -1)
52283 + fsok = 1;
52284 +
52285 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
52286 + for (i = 0; i < num; i++) {
52287 + curuid = (int)uidlist[i];
52288 + if (real == curuid)
52289 + realok = 1;
52290 + if (effective == curuid)
52291 + effectiveok = 1;
52292 + if (fs == curuid)
52293 + fsok = 1;
52294 + }
52295 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
52296 + for (i = 0; i < num; i++) {
52297 + curuid = (int)uidlist[i];
52298 + if (real == curuid)
52299 + break;
52300 + if (effective == curuid)
52301 + break;
52302 + if (fs == curuid)
52303 + break;
52304 + }
52305 + /* not in deny list */
52306 + if (i == num) {
52307 + realok = 1;
52308 + effectiveok = 1;
52309 + fsok = 1;
52310 + }
52311 + }
52312 +
52313 + if (realok && effectiveok && fsok)
52314 + return 0;
52315 + else {
52316 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
52317 + return 1;
52318 + }
52319 +}
52320 +
52321 +int
52322 +gr_check_group_change(int real, int effective, int fs)
52323 +{
52324 + unsigned int i;
52325 + __u16 num;
52326 + gid_t *gidlist;
52327 + int curgid;
52328 + int realok = 0;
52329 + int effectiveok = 0;
52330 + int fsok = 0;
52331 +
52332 + if (unlikely(!(gr_status & GR_READY)))
52333 + return 0;
52334 +
52335 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52336 + gr_log_learn_id_change('g', real, effective, fs);
52337 +
52338 + num = current->acl->group_trans_num;
52339 + gidlist = current->acl->group_transitions;
52340 +
52341 + if (gidlist == NULL)
52342 + return 0;
52343 +
52344 + if (real == -1)
52345 + realok = 1;
52346 + if (effective == -1)
52347 + effectiveok = 1;
52348 + if (fs == -1)
52349 + fsok = 1;
52350 +
52351 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
52352 + for (i = 0; i < num; i++) {
52353 + curgid = (int)gidlist[i];
52354 + if (real == curgid)
52355 + realok = 1;
52356 + if (effective == curgid)
52357 + effectiveok = 1;
52358 + if (fs == curgid)
52359 + fsok = 1;
52360 + }
52361 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
52362 + for (i = 0; i < num; i++) {
52363 + curgid = (int)gidlist[i];
52364 + if (real == curgid)
52365 + break;
52366 + if (effective == curgid)
52367 + break;
52368 + if (fs == curgid)
52369 + break;
52370 + }
52371 + /* not in deny list */
52372 + if (i == num) {
52373 + realok = 1;
52374 + effectiveok = 1;
52375 + fsok = 1;
52376 + }
52377 + }
52378 +
52379 + if (realok && effectiveok && fsok)
52380 + return 0;
52381 + else {
52382 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
52383 + return 1;
52384 + }
52385 +}
52386 +
52387 +extern int gr_acl_is_capable(const int cap);
52388 +
52389 +void
52390 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
52391 +{
52392 + struct acl_role_label *role = task->role;
52393 + struct acl_subject_label *subj = NULL;
52394 + struct acl_object_label *obj;
52395 + struct file *filp;
52396 +
52397 + if (unlikely(!(gr_status & GR_READY)))
52398 + return;
52399 +
52400 + filp = task->exec_file;
52401 +
52402 + /* kernel process, we'll give them the kernel role */
52403 + if (unlikely(!filp)) {
52404 + task->role = kernel_role;
52405 + task->acl = kernel_role->root_label;
52406 + return;
52407 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
52408 + role = lookup_acl_role_label(task, uid, gid);
52409 +
52410 + /* don't change the role if we're not a privileged process */
52411 + if (role && task->role != role &&
52412 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
52413 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
52414 + return;
52415 +
52416 + /* perform subject lookup in possibly new role
52417 + we can use this result below in the case where role == task->role
52418 + */
52419 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
52420 +
52421 + /* if we changed uid/gid, but result in the same role
52422 + and are using inheritance, don't lose the inherited subject
52423 + if current subject is other than what normal lookup
52424 + would result in, we arrived via inheritance, don't
52425 + lose subject
52426 + */
52427 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
52428 + (subj == task->acl)))
52429 + task->acl = subj;
52430 +
52431 + task->role = role;
52432 +
52433 + task->is_writable = 0;
52434 +
52435 + /* ignore additional mmap checks for processes that are writable
52436 + by the default ACL */
52437 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52438 + if (unlikely(obj->mode & GR_WRITE))
52439 + task->is_writable = 1;
52440 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
52441 + if (unlikely(obj->mode & GR_WRITE))
52442 + task->is_writable = 1;
52443 +
52444 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52445 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
52446 +#endif
52447 +
52448 + gr_set_proc_res(task);
52449 +
52450 + return;
52451 +}
52452 +
52453 +int
52454 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
52455 + const int unsafe_flags)
52456 +{
52457 + struct task_struct *task = current;
52458 + struct acl_subject_label *newacl;
52459 + struct acl_object_label *obj;
52460 + __u32 retmode;
52461 +
52462 + if (unlikely(!(gr_status & GR_READY)))
52463 + return 0;
52464 +
52465 + newacl = chk_subj_label(dentry, mnt, task->role);
52466 +
52467 + task_lock(task);
52468 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
52469 + !(task->role->roletype & GR_ROLE_GOD) &&
52470 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
52471 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52472 + task_unlock(task);
52473 + if (unsafe_flags & LSM_UNSAFE_SHARE)
52474 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
52475 + else
52476 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
52477 + return -EACCES;
52478 + }
52479 + task_unlock(task);
52480 +
52481 + obj = chk_obj_label(dentry, mnt, task->acl);
52482 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
52483 +
52484 + if (!(task->acl->mode & GR_INHERITLEARN) &&
52485 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
52486 + if (obj->nested)
52487 + task->acl = obj->nested;
52488 + else
52489 + task->acl = newacl;
52490 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
52491 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
52492 +
52493 + task->is_writable = 0;
52494 +
52495 + /* ignore additional mmap checks for processes that are writable
52496 + by the default ACL */
52497 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
52498 + if (unlikely(obj->mode & GR_WRITE))
52499 + task->is_writable = 1;
52500 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
52501 + if (unlikely(obj->mode & GR_WRITE))
52502 + task->is_writable = 1;
52503 +
52504 + gr_set_proc_res(task);
52505 +
52506 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52507 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
52508 +#endif
52509 + return 0;
52510 +}
52511 +
52512 +/* always called with valid inodev ptr */
52513 +static void
52514 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
52515 +{
52516 + struct acl_object_label *matchpo;
52517 + struct acl_subject_label *matchps;
52518 + struct acl_subject_label *subj;
52519 + struct acl_role_label *role;
52520 + unsigned int x;
52521 +
52522 + FOR_EACH_ROLE_START(role)
52523 + FOR_EACH_SUBJECT_START(role, subj, x)
52524 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
52525 + matchpo->mode |= GR_DELETED;
52526 + FOR_EACH_SUBJECT_END(subj,x)
52527 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
52528 + if (subj->inode == ino && subj->device == dev)
52529 + subj->mode |= GR_DELETED;
52530 + FOR_EACH_NESTED_SUBJECT_END(subj)
52531 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
52532 + matchps->mode |= GR_DELETED;
52533 + FOR_EACH_ROLE_END(role)
52534 +
52535 + inodev->nentry->deleted = 1;
52536 +
52537 + return;
52538 +}
52539 +
52540 +void
52541 +gr_handle_delete(const ino_t ino, const dev_t dev)
52542 +{
52543 + struct inodev_entry *inodev;
52544 +
52545 + if (unlikely(!(gr_status & GR_READY)))
52546 + return;
52547 +
52548 + write_lock(&gr_inode_lock);
52549 + inodev = lookup_inodev_entry(ino, dev);
52550 + if (inodev != NULL)
52551 + do_handle_delete(inodev, ino, dev);
52552 + write_unlock(&gr_inode_lock);
52553 +
52554 + return;
52555 +}
52556 +
52557 +static void
52558 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
52559 + const ino_t newinode, const dev_t newdevice,
52560 + struct acl_subject_label *subj)
52561 +{
52562 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
52563 + struct acl_object_label *match;
52564 +
52565 + match = subj->obj_hash[index];
52566 +
52567 + while (match && (match->inode != oldinode ||
52568 + match->device != olddevice ||
52569 + !(match->mode & GR_DELETED)))
52570 + match = match->next;
52571 +
52572 + if (match && (match->inode == oldinode)
52573 + && (match->device == olddevice)
52574 + && (match->mode & GR_DELETED)) {
52575 + if (match->prev == NULL) {
52576 + subj->obj_hash[index] = match->next;
52577 + if (match->next != NULL)
52578 + match->next->prev = NULL;
52579 + } else {
52580 + match->prev->next = match->next;
52581 + if (match->next != NULL)
52582 + match->next->prev = match->prev;
52583 + }
52584 + match->prev = NULL;
52585 + match->next = NULL;
52586 + match->inode = newinode;
52587 + match->device = newdevice;
52588 + match->mode &= ~GR_DELETED;
52589 +
52590 + insert_acl_obj_label(match, subj);
52591 + }
52592 +
52593 + return;
52594 +}
52595 +
52596 +static void
52597 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
52598 + const ino_t newinode, const dev_t newdevice,
52599 + struct acl_role_label *role)
52600 +{
52601 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
52602 + struct acl_subject_label *match;
52603 +
52604 + match = role->subj_hash[index];
52605 +
52606 + while (match && (match->inode != oldinode ||
52607 + match->device != olddevice ||
52608 + !(match->mode & GR_DELETED)))
52609 + match = match->next;
52610 +
52611 + if (match && (match->inode == oldinode)
52612 + && (match->device == olddevice)
52613 + && (match->mode & GR_DELETED)) {
52614 + if (match->prev == NULL) {
52615 + role->subj_hash[index] = match->next;
52616 + if (match->next != NULL)
52617 + match->next->prev = NULL;
52618 + } else {
52619 + match->prev->next = match->next;
52620 + if (match->next != NULL)
52621 + match->next->prev = match->prev;
52622 + }
52623 + match->prev = NULL;
52624 + match->next = NULL;
52625 + match->inode = newinode;
52626 + match->device = newdevice;
52627 + match->mode &= ~GR_DELETED;
52628 +
52629 + insert_acl_subj_label(match, role);
52630 + }
52631 +
52632 + return;
52633 +}
52634 +
52635 +static void
52636 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
52637 + const ino_t newinode, const dev_t newdevice)
52638 +{
52639 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
52640 + struct inodev_entry *match;
52641 +
52642 + match = inodev_set.i_hash[index];
52643 +
52644 + while (match && (match->nentry->inode != oldinode ||
52645 + match->nentry->device != olddevice || !match->nentry->deleted))
52646 + match = match->next;
52647 +
52648 + if (match && (match->nentry->inode == oldinode)
52649 + && (match->nentry->device == olddevice) &&
52650 + match->nentry->deleted) {
52651 + if (match->prev == NULL) {
52652 + inodev_set.i_hash[index] = match->next;
52653 + if (match->next != NULL)
52654 + match->next->prev = NULL;
52655 + } else {
52656 + match->prev->next = match->next;
52657 + if (match->next != NULL)
52658 + match->next->prev = match->prev;
52659 + }
52660 + match->prev = NULL;
52661 + match->next = NULL;
52662 + match->nentry->inode = newinode;
52663 + match->nentry->device = newdevice;
52664 + match->nentry->deleted = 0;
52665 +
52666 + insert_inodev_entry(match);
52667 + }
52668 +
52669 + return;
52670 +}
52671 +
52672 +static void
52673 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
52674 +{
52675 + struct acl_subject_label *subj;
52676 + struct acl_role_label *role;
52677 + unsigned int x;
52678 +
52679 + FOR_EACH_ROLE_START(role)
52680 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
52681 +
52682 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
52683 + if ((subj->inode == ino) && (subj->device == dev)) {
52684 + subj->inode = ino;
52685 + subj->device = dev;
52686 + }
52687 + FOR_EACH_NESTED_SUBJECT_END(subj)
52688 + FOR_EACH_SUBJECT_START(role, subj, x)
52689 + update_acl_obj_label(matchn->inode, matchn->device,
52690 + ino, dev, subj);
52691 + FOR_EACH_SUBJECT_END(subj,x)
52692 + FOR_EACH_ROLE_END(role)
52693 +
52694 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
52695 +
52696 + return;
52697 +}
52698 +
52699 +static void
52700 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
52701 + const struct vfsmount *mnt)
52702 +{
52703 + ino_t ino = dentry->d_inode->i_ino;
52704 + dev_t dev = __get_dev(dentry);
52705 +
52706 + __do_handle_create(matchn, ino, dev);
52707 +
52708 + return;
52709 +}
52710 +
52711 +void
52712 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
52713 +{
52714 + struct name_entry *matchn;
52715 +
52716 + if (unlikely(!(gr_status & GR_READY)))
52717 + return;
52718 +
52719 + preempt_disable();
52720 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
52721 +
52722 + if (unlikely((unsigned long)matchn)) {
52723 + write_lock(&gr_inode_lock);
52724 + do_handle_create(matchn, dentry, mnt);
52725 + write_unlock(&gr_inode_lock);
52726 + }
52727 + preempt_enable();
52728 +
52729 + return;
52730 +}
52731 +
52732 +void
52733 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
52734 +{
52735 + struct name_entry *matchn;
52736 +
52737 + if (unlikely(!(gr_status & GR_READY)))
52738 + return;
52739 +
52740 + preempt_disable();
52741 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
52742 +
52743 + if (unlikely((unsigned long)matchn)) {
52744 + write_lock(&gr_inode_lock);
52745 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
52746 + write_unlock(&gr_inode_lock);
52747 + }
52748 + preempt_enable();
52749 +
52750 + return;
52751 +}
52752 +
52753 +void
52754 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
52755 + struct dentry *old_dentry,
52756 + struct dentry *new_dentry,
52757 + struct vfsmount *mnt, const __u8 replace)
52758 +{
52759 + struct name_entry *matchn;
52760 + struct inodev_entry *inodev;
52761 + struct inode *inode = new_dentry->d_inode;
52762 + ino_t old_ino = old_dentry->d_inode->i_ino;
52763 + dev_t old_dev = __get_dev(old_dentry);
52764 +
52765 + /* vfs_rename swaps the name and parent link for old_dentry and
52766 + new_dentry
52767 + at this point, old_dentry has the new name, parent link, and inode
52768 + for the renamed file
52769 + if a file is being replaced by a rename, new_dentry has the inode
52770 + and name for the replaced file
52771 + */
52772 +
52773 + if (unlikely(!(gr_status & GR_READY)))
52774 + return;
52775 +
52776 + preempt_disable();
52777 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
52778 +
52779 + /* we wouldn't have to check d_inode if it weren't for
52780 + NFS silly-renaming
52781 + */
52782 +
52783 + write_lock(&gr_inode_lock);
52784 + if (unlikely(replace && inode)) {
52785 + ino_t new_ino = inode->i_ino;
52786 + dev_t new_dev = __get_dev(new_dentry);
52787 +
52788 + inodev = lookup_inodev_entry(new_ino, new_dev);
52789 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
52790 + do_handle_delete(inodev, new_ino, new_dev);
52791 + }
52792 +
52793 + inodev = lookup_inodev_entry(old_ino, old_dev);
52794 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
52795 + do_handle_delete(inodev, old_ino, old_dev);
52796 +
52797 + if (unlikely((unsigned long)matchn))
52798 + do_handle_create(matchn, old_dentry, mnt);
52799 +
52800 + write_unlock(&gr_inode_lock);
52801 + preempt_enable();
52802 +
52803 + return;
52804 +}
52805 +
52806 +static int
52807 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
52808 + unsigned char **sum)
52809 +{
52810 + struct acl_role_label *r;
52811 + struct role_allowed_ip *ipp;
52812 + struct role_transition *trans;
52813 + unsigned int i;
52814 + int found = 0;
52815 + u32 curr_ip = current->signal->curr_ip;
52816 +
52817 + current->signal->saved_ip = curr_ip;
52818 +
52819 + /* check transition table */
52820 +
52821 + for (trans = current->role->transitions; trans; trans = trans->next) {
52822 + if (!strcmp(rolename, trans->rolename)) {
52823 + found = 1;
52824 + break;
52825 + }
52826 + }
52827 +
52828 + if (!found)
52829 + return 0;
52830 +
52831 + /* handle special roles that do not require authentication
52832 + and check ip */
52833 +
52834 + FOR_EACH_ROLE_START(r)
52835 + if (!strcmp(rolename, r->rolename) &&
52836 + (r->roletype & GR_ROLE_SPECIAL)) {
52837 + found = 0;
52838 + if (r->allowed_ips != NULL) {
52839 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
52840 + if ((ntohl(curr_ip) & ipp->netmask) ==
52841 + (ntohl(ipp->addr) & ipp->netmask))
52842 + found = 1;
52843 + }
52844 + } else
52845 + found = 2;
52846 + if (!found)
52847 + return 0;
52848 +
52849 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
52850 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
52851 + *salt = NULL;
52852 + *sum = NULL;
52853 + return 1;
52854 + }
52855 + }
52856 + FOR_EACH_ROLE_END(r)
52857 +
52858 + for (i = 0; i < num_sprole_pws; i++) {
52859 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
52860 + *salt = acl_special_roles[i]->salt;
52861 + *sum = acl_special_roles[i]->sum;
52862 + return 1;
52863 + }
52864 + }
52865 +
52866 + return 0;
52867 +}
52868 +
52869 +static void
52870 +assign_special_role(char *rolename)
52871 +{
52872 + struct acl_object_label *obj;
52873 + struct acl_role_label *r;
52874 + struct acl_role_label *assigned = NULL;
52875 + struct task_struct *tsk;
52876 + struct file *filp;
52877 +
52878 + FOR_EACH_ROLE_START(r)
52879 + if (!strcmp(rolename, r->rolename) &&
52880 + (r->roletype & GR_ROLE_SPECIAL)) {
52881 + assigned = r;
52882 + break;
52883 + }
52884 + FOR_EACH_ROLE_END(r)
52885 +
52886 + if (!assigned)
52887 + return;
52888 +
52889 + read_lock(&tasklist_lock);
52890 + read_lock(&grsec_exec_file_lock);
52891 +
52892 + tsk = current->real_parent;
52893 + if (tsk == NULL)
52894 + goto out_unlock;
52895 +
52896 + filp = tsk->exec_file;
52897 + if (filp == NULL)
52898 + goto out_unlock;
52899 +
52900 + tsk->is_writable = 0;
52901 +
52902 + tsk->acl_sp_role = 1;
52903 + tsk->acl_role_id = ++acl_sp_role_value;
52904 + tsk->role = assigned;
52905 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
52906 +
52907 + /* ignore additional mmap checks for processes that are writable
52908 + by the default ACL */
52909 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52910 + if (unlikely(obj->mode & GR_WRITE))
52911 + tsk->is_writable = 1;
52912 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
52913 + if (unlikely(obj->mode & GR_WRITE))
52914 + tsk->is_writable = 1;
52915 +
52916 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52917 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
52918 +#endif
52919 +
52920 +out_unlock:
52921 + read_unlock(&grsec_exec_file_lock);
52922 + read_unlock(&tasklist_lock);
52923 + return;
52924 +}
52925 +
52926 +int gr_check_secure_terminal(struct task_struct *task)
52927 +{
52928 + struct task_struct *p, *p2, *p3;
52929 + struct files_struct *files;
52930 + struct fdtable *fdt;
52931 + struct file *our_file = NULL, *file;
52932 + int i;
52933 +
52934 + if (task->signal->tty == NULL)
52935 + return 1;
52936 +
52937 + files = get_files_struct(task);
52938 + if (files != NULL) {
52939 + rcu_read_lock();
52940 + fdt = files_fdtable(files);
52941 + for (i=0; i < fdt->max_fds; i++) {
52942 + file = fcheck_files(files, i);
52943 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
52944 + get_file(file);
52945 + our_file = file;
52946 + }
52947 + }
52948 + rcu_read_unlock();
52949 + put_files_struct(files);
52950 + }
52951 +
52952 + if (our_file == NULL)
52953 + return 1;
52954 +
52955 + read_lock(&tasklist_lock);
52956 + do_each_thread(p2, p) {
52957 + files = get_files_struct(p);
52958 + if (files == NULL ||
52959 + (p->signal && p->signal->tty == task->signal->tty)) {
52960 + if (files != NULL)
52961 + put_files_struct(files);
52962 + continue;
52963 + }
52964 + rcu_read_lock();
52965 + fdt = files_fdtable(files);
52966 + for (i=0; i < fdt->max_fds; i++) {
52967 + file = fcheck_files(files, i);
52968 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
52969 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
52970 + p3 = task;
52971 + while (p3->pid > 0) {
52972 + if (p3 == p)
52973 + break;
52974 + p3 = p3->real_parent;
52975 + }
52976 + if (p3 == p)
52977 + break;
52978 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
52979 + gr_handle_alertkill(p);
52980 + rcu_read_unlock();
52981 + put_files_struct(files);
52982 + read_unlock(&tasklist_lock);
52983 + fput(our_file);
52984 + return 0;
52985 + }
52986 + }
52987 + rcu_read_unlock();
52988 + put_files_struct(files);
52989 + } while_each_thread(p2, p);
52990 + read_unlock(&tasklist_lock);
52991 +
52992 + fput(our_file);
52993 + return 1;
52994 +}
52995 +
52996 +ssize_t
52997 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
52998 +{
52999 + struct gr_arg_wrapper uwrap;
53000 + unsigned char *sprole_salt = NULL;
53001 + unsigned char *sprole_sum = NULL;
53002 + int error = sizeof (struct gr_arg_wrapper);
53003 + int error2 = 0;
53004 +
53005 + mutex_lock(&gr_dev_mutex);
53006 +
53007 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
53008 + error = -EPERM;
53009 + goto out;
53010 + }
53011 +
53012 + if (count != sizeof (struct gr_arg_wrapper)) {
53013 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
53014 + error = -EINVAL;
53015 + goto out;
53016 + }
53017 +
53018 +
53019 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
53020 + gr_auth_expires = 0;
53021 + gr_auth_attempts = 0;
53022 + }
53023 +
53024 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
53025 + error = -EFAULT;
53026 + goto out;
53027 + }
53028 +
53029 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
53030 + error = -EINVAL;
53031 + goto out;
53032 + }
53033 +
53034 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
53035 + error = -EFAULT;
53036 + goto out;
53037 + }
53038 +
53039 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53040 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53041 + time_after(gr_auth_expires, get_seconds())) {
53042 + error = -EBUSY;
53043 + goto out;
53044 + }
53045 +
53046 + /* if non-root trying to do anything other than use a special role,
53047 + do not attempt authentication, do not count towards authentication
53048 + locking
53049 + */
53050 +
53051 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
53052 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53053 + current_uid()) {
53054 + error = -EPERM;
53055 + goto out;
53056 + }
53057 +
53058 + /* ensure pw and special role name are null terminated */
53059 +
53060 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
53061 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
53062 +
53063 + /* Okay.
53064 + * We have our enough of the argument structure..(we have yet
53065 + * to copy_from_user the tables themselves) . Copy the tables
53066 + * only if we need them, i.e. for loading operations. */
53067 +
53068 + switch (gr_usermode->mode) {
53069 + case GR_STATUS:
53070 + if (gr_status & GR_READY) {
53071 + error = 1;
53072 + if (!gr_check_secure_terminal(current))
53073 + error = 3;
53074 + } else
53075 + error = 2;
53076 + goto out;
53077 + case GR_SHUTDOWN:
53078 + if ((gr_status & GR_READY)
53079 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53080 + pax_open_kernel();
53081 + gr_status &= ~GR_READY;
53082 + pax_close_kernel();
53083 +
53084 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
53085 + free_variables();
53086 + memset(gr_usermode, 0, sizeof (struct gr_arg));
53087 + memset(gr_system_salt, 0, GR_SALT_LEN);
53088 + memset(gr_system_sum, 0, GR_SHA_LEN);
53089 + } else if (gr_status & GR_READY) {
53090 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
53091 + error = -EPERM;
53092 + } else {
53093 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
53094 + error = -EAGAIN;
53095 + }
53096 + break;
53097 + case GR_ENABLE:
53098 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
53099 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
53100 + else {
53101 + if (gr_status & GR_READY)
53102 + error = -EAGAIN;
53103 + else
53104 + error = error2;
53105 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
53106 + }
53107 + break;
53108 + case GR_RELOAD:
53109 + if (!(gr_status & GR_READY)) {
53110 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
53111 + error = -EAGAIN;
53112 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53113 + preempt_disable();
53114 +
53115 + pax_open_kernel();
53116 + gr_status &= ~GR_READY;
53117 + pax_close_kernel();
53118 +
53119 + free_variables();
53120 + if (!(error2 = gracl_init(gr_usermode))) {
53121 + preempt_enable();
53122 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
53123 + } else {
53124 + preempt_enable();
53125 + error = error2;
53126 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53127 + }
53128 + } else {
53129 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53130 + error = -EPERM;
53131 + }
53132 + break;
53133 + case GR_SEGVMOD:
53134 + if (unlikely(!(gr_status & GR_READY))) {
53135 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
53136 + error = -EAGAIN;
53137 + break;
53138 + }
53139 +
53140 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53141 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
53142 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
53143 + struct acl_subject_label *segvacl;
53144 + segvacl =
53145 + lookup_acl_subj_label(gr_usermode->segv_inode,
53146 + gr_usermode->segv_device,
53147 + current->role);
53148 + if (segvacl) {
53149 + segvacl->crashes = 0;
53150 + segvacl->expires = 0;
53151 + }
53152 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
53153 + gr_remove_uid(gr_usermode->segv_uid);
53154 + }
53155 + } else {
53156 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
53157 + error = -EPERM;
53158 + }
53159 + break;
53160 + case GR_SPROLE:
53161 + case GR_SPROLEPAM:
53162 + if (unlikely(!(gr_status & GR_READY))) {
53163 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
53164 + error = -EAGAIN;
53165 + break;
53166 + }
53167 +
53168 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
53169 + current->role->expires = 0;
53170 + current->role->auth_attempts = 0;
53171 + }
53172 +
53173 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53174 + time_after(current->role->expires, get_seconds())) {
53175 + error = -EBUSY;
53176 + goto out;
53177 + }
53178 +
53179 + if (lookup_special_role_auth
53180 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
53181 + && ((!sprole_salt && !sprole_sum)
53182 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
53183 + char *p = "";
53184 + assign_special_role(gr_usermode->sp_role);
53185 + read_lock(&tasklist_lock);
53186 + if (current->real_parent)
53187 + p = current->real_parent->role->rolename;
53188 + read_unlock(&tasklist_lock);
53189 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
53190 + p, acl_sp_role_value);
53191 + } else {
53192 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
53193 + error = -EPERM;
53194 + if(!(current->role->auth_attempts++))
53195 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53196 +
53197 + goto out;
53198 + }
53199 + break;
53200 + case GR_UNSPROLE:
53201 + if (unlikely(!(gr_status & GR_READY))) {
53202 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
53203 + error = -EAGAIN;
53204 + break;
53205 + }
53206 +
53207 + if (current->role->roletype & GR_ROLE_SPECIAL) {
53208 + char *p = "";
53209 + int i = 0;
53210 +
53211 + read_lock(&tasklist_lock);
53212 + if (current->real_parent) {
53213 + p = current->real_parent->role->rolename;
53214 + i = current->real_parent->acl_role_id;
53215 + }
53216 + read_unlock(&tasklist_lock);
53217 +
53218 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
53219 + gr_set_acls(1);
53220 + } else {
53221 + error = -EPERM;
53222 + goto out;
53223 + }
53224 + break;
53225 + default:
53226 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
53227 + error = -EINVAL;
53228 + break;
53229 + }
53230 +
53231 + if (error != -EPERM)
53232 + goto out;
53233 +
53234 + if(!(gr_auth_attempts++))
53235 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53236 +
53237 + out:
53238 + mutex_unlock(&gr_dev_mutex);
53239 + return error;
53240 +}
53241 +
53242 +/* must be called with
53243 + rcu_read_lock();
53244 + read_lock(&tasklist_lock);
53245 + read_lock(&grsec_exec_file_lock);
53246 +*/
53247 +int gr_apply_subject_to_task(struct task_struct *task)
53248 +{
53249 + struct acl_object_label *obj;
53250 + char *tmpname;
53251 + struct acl_subject_label *tmpsubj;
53252 + struct file *filp;
53253 + struct name_entry *nmatch;
53254 +
53255 + filp = task->exec_file;
53256 + if (filp == NULL)
53257 + return 0;
53258 +
53259 + /* the following is to apply the correct subject
53260 + on binaries running when the RBAC system
53261 + is enabled, when the binaries have been
53262 + replaced or deleted since their execution
53263 + -----
53264 + when the RBAC system starts, the inode/dev
53265 + from exec_file will be one the RBAC system
53266 + is unaware of. It only knows the inode/dev
53267 + of the present file on disk, or the absence
53268 + of it.
53269 + */
53270 + preempt_disable();
53271 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
53272 +
53273 + nmatch = lookup_name_entry(tmpname);
53274 + preempt_enable();
53275 + tmpsubj = NULL;
53276 + if (nmatch) {
53277 + if (nmatch->deleted)
53278 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
53279 + else
53280 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
53281 + if (tmpsubj != NULL)
53282 + task->acl = tmpsubj;
53283 + }
53284 + if (tmpsubj == NULL)
53285 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
53286 + task->role);
53287 + if (task->acl) {
53288 + task->is_writable = 0;
53289 + /* ignore additional mmap checks for processes that are writable
53290 + by the default ACL */
53291 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53292 + if (unlikely(obj->mode & GR_WRITE))
53293 + task->is_writable = 1;
53294 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53295 + if (unlikely(obj->mode & GR_WRITE))
53296 + task->is_writable = 1;
53297 +
53298 + gr_set_proc_res(task);
53299 +
53300 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53301 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53302 +#endif
53303 + } else {
53304 + return 1;
53305 + }
53306 +
53307 + return 0;
53308 +}
53309 +
53310 +int
53311 +gr_set_acls(const int type)
53312 +{
53313 + struct task_struct *task, *task2;
53314 + struct acl_role_label *role = current->role;
53315 + __u16 acl_role_id = current->acl_role_id;
53316 + const struct cred *cred;
53317 + int ret;
53318 +
53319 + rcu_read_lock();
53320 + read_lock(&tasklist_lock);
53321 + read_lock(&grsec_exec_file_lock);
53322 + do_each_thread(task2, task) {
53323 + /* check to see if we're called from the exit handler,
53324 + if so, only replace ACLs that have inherited the admin
53325 + ACL */
53326 +
53327 + if (type && (task->role != role ||
53328 + task->acl_role_id != acl_role_id))
53329 + continue;
53330 +
53331 + task->acl_role_id = 0;
53332 + task->acl_sp_role = 0;
53333 +
53334 + if (task->exec_file) {
53335 + cred = __task_cred(task);
53336 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
53337 + ret = gr_apply_subject_to_task(task);
53338 + if (ret) {
53339 + read_unlock(&grsec_exec_file_lock);
53340 + read_unlock(&tasklist_lock);
53341 + rcu_read_unlock();
53342 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
53343 + return ret;
53344 + }
53345 + } else {
53346 + // it's a kernel process
53347 + task->role = kernel_role;
53348 + task->acl = kernel_role->root_label;
53349 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
53350 + task->acl->mode &= ~GR_PROCFIND;
53351 +#endif
53352 + }
53353 + } while_each_thread(task2, task);
53354 + read_unlock(&grsec_exec_file_lock);
53355 + read_unlock(&tasklist_lock);
53356 + rcu_read_unlock();
53357 +
53358 + return 0;
53359 +}
53360 +
53361 +void
53362 +gr_learn_resource(const struct task_struct *task,
53363 + const int res, const unsigned long wanted, const int gt)
53364 +{
53365 + struct acl_subject_label *acl;
53366 + const struct cred *cred;
53367 +
53368 + if (unlikely((gr_status & GR_READY) &&
53369 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
53370 + goto skip_reslog;
53371 +
53372 +#ifdef CONFIG_GRKERNSEC_RESLOG
53373 + gr_log_resource(task, res, wanted, gt);
53374 +#endif
53375 + skip_reslog:
53376 +
53377 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
53378 + return;
53379 +
53380 + acl = task->acl;
53381 +
53382 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
53383 + !(acl->resmask & (1 << (unsigned short) res))))
53384 + return;
53385 +
53386 + if (wanted >= acl->res[res].rlim_cur) {
53387 + unsigned long res_add;
53388 +
53389 + res_add = wanted;
53390 + switch (res) {
53391 + case RLIMIT_CPU:
53392 + res_add += GR_RLIM_CPU_BUMP;
53393 + break;
53394 + case RLIMIT_FSIZE:
53395 + res_add += GR_RLIM_FSIZE_BUMP;
53396 + break;
53397 + case RLIMIT_DATA:
53398 + res_add += GR_RLIM_DATA_BUMP;
53399 + break;
53400 + case RLIMIT_STACK:
53401 + res_add += GR_RLIM_STACK_BUMP;
53402 + break;
53403 + case RLIMIT_CORE:
53404 + res_add += GR_RLIM_CORE_BUMP;
53405 + break;
53406 + case RLIMIT_RSS:
53407 + res_add += GR_RLIM_RSS_BUMP;
53408 + break;
53409 + case RLIMIT_NPROC:
53410 + res_add += GR_RLIM_NPROC_BUMP;
53411 + break;
53412 + case RLIMIT_NOFILE:
53413 + res_add += GR_RLIM_NOFILE_BUMP;
53414 + break;
53415 + case RLIMIT_MEMLOCK:
53416 + res_add += GR_RLIM_MEMLOCK_BUMP;
53417 + break;
53418 + case RLIMIT_AS:
53419 + res_add += GR_RLIM_AS_BUMP;
53420 + break;
53421 + case RLIMIT_LOCKS:
53422 + res_add += GR_RLIM_LOCKS_BUMP;
53423 + break;
53424 + case RLIMIT_SIGPENDING:
53425 + res_add += GR_RLIM_SIGPENDING_BUMP;
53426 + break;
53427 + case RLIMIT_MSGQUEUE:
53428 + res_add += GR_RLIM_MSGQUEUE_BUMP;
53429 + break;
53430 + case RLIMIT_NICE:
53431 + res_add += GR_RLIM_NICE_BUMP;
53432 + break;
53433 + case RLIMIT_RTPRIO:
53434 + res_add += GR_RLIM_RTPRIO_BUMP;
53435 + break;
53436 + case RLIMIT_RTTIME:
53437 + res_add += GR_RLIM_RTTIME_BUMP;
53438 + break;
53439 + }
53440 +
53441 + acl->res[res].rlim_cur = res_add;
53442 +
53443 + if (wanted > acl->res[res].rlim_max)
53444 + acl->res[res].rlim_max = res_add;
53445 +
53446 + /* only log the subject filename, since resource logging is supported for
53447 + single-subject learning only */
53448 + rcu_read_lock();
53449 + cred = __task_cred(task);
53450 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
53451 + task->role->roletype, cred->uid, cred->gid, acl->filename,
53452 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
53453 + "", (unsigned long) res, &task->signal->saved_ip);
53454 + rcu_read_unlock();
53455 + }
53456 +
53457 + return;
53458 +}
53459 +
53460 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
53461 +void
53462 +pax_set_initial_flags(struct linux_binprm *bprm)
53463 +{
53464 + struct task_struct *task = current;
53465 + struct acl_subject_label *proc;
53466 + unsigned long flags;
53467 +
53468 + if (unlikely(!(gr_status & GR_READY)))
53469 + return;
53470 +
53471 + flags = pax_get_flags(task);
53472 +
53473 + proc = task->acl;
53474 +
53475 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
53476 + flags &= ~MF_PAX_PAGEEXEC;
53477 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
53478 + flags &= ~MF_PAX_SEGMEXEC;
53479 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
53480 + flags &= ~MF_PAX_RANDMMAP;
53481 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
53482 + flags &= ~MF_PAX_EMUTRAMP;
53483 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
53484 + flags &= ~MF_PAX_MPROTECT;
53485 +
53486 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
53487 + flags |= MF_PAX_PAGEEXEC;
53488 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
53489 + flags |= MF_PAX_SEGMEXEC;
53490 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
53491 + flags |= MF_PAX_RANDMMAP;
53492 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
53493 + flags |= MF_PAX_EMUTRAMP;
53494 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
53495 + flags |= MF_PAX_MPROTECT;
53496 +
53497 + pax_set_flags(task, flags);
53498 +
53499 + return;
53500 +}
53501 +#endif
53502 +
53503 +#ifdef CONFIG_SYSCTL
53504 +/* Eric Biederman likes breaking userland ABI and every inode-based security
53505 + system to save 35kb of memory */
53506 +
53507 +/* we modify the passed in filename, but adjust it back before returning */
53508 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
53509 +{
53510 + struct name_entry *nmatch;
53511 + char *p, *lastp = NULL;
53512 + struct acl_object_label *obj = NULL, *tmp;
53513 + struct acl_subject_label *tmpsubj;
53514 + char c = '\0';
53515 +
53516 + read_lock(&gr_inode_lock);
53517 +
53518 + p = name + len - 1;
53519 + do {
53520 + nmatch = lookup_name_entry(name);
53521 + if (lastp != NULL)
53522 + *lastp = c;
53523 +
53524 + if (nmatch == NULL)
53525 + goto next_component;
53526 + tmpsubj = current->acl;
53527 + do {
53528 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
53529 + if (obj != NULL) {
53530 + tmp = obj->globbed;
53531 + while (tmp) {
53532 + if (!glob_match(tmp->filename, name)) {
53533 + obj = tmp;
53534 + goto found_obj;
53535 + }
53536 + tmp = tmp->next;
53537 + }
53538 + goto found_obj;
53539 + }
53540 + } while ((tmpsubj = tmpsubj->parent_subject));
53541 +next_component:
53542 + /* end case */
53543 + if (p == name)
53544 + break;
53545 +
53546 + while (*p != '/')
53547 + p--;
53548 + if (p == name)
53549 + lastp = p + 1;
53550 + else {
53551 + lastp = p;
53552 + p--;
53553 + }
53554 + c = *lastp;
53555 + *lastp = '\0';
53556 + } while (1);
53557 +found_obj:
53558 + read_unlock(&gr_inode_lock);
53559 + /* obj returned will always be non-null */
53560 + return obj;
53561 +}
53562 +
53563 +/* returns 0 when allowing, non-zero on error
53564 + op of 0 is used for readdir, so we don't log the names of hidden files
53565 +*/
53566 +__u32
53567 +gr_handle_sysctl(const struct ctl_table *table, const int op)
53568 +{
53569 + struct ctl_table *tmp;
53570 + const char *proc_sys = "/proc/sys";
53571 + char *path;
53572 + struct acl_object_label *obj;
53573 + unsigned short len = 0, pos = 0, depth = 0, i;
53574 + __u32 err = 0;
53575 + __u32 mode = 0;
53576 +
53577 + if (unlikely(!(gr_status & GR_READY)))
53578 + return 0;
53579 +
53580 + /* for now, ignore operations on non-sysctl entries if it's not a
53581 + readdir*/
53582 + if (table->child != NULL && op != 0)
53583 + return 0;
53584 +
53585 + mode |= GR_FIND;
53586 + /* it's only a read if it's an entry, read on dirs is for readdir */
53587 + if (op & MAY_READ)
53588 + mode |= GR_READ;
53589 + if (op & MAY_WRITE)
53590 + mode |= GR_WRITE;
53591 +
53592 + preempt_disable();
53593 +
53594 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
53595 +
53596 + /* it's only a read/write if it's an actual entry, not a dir
53597 + (which are opened for readdir)
53598 + */
53599 +
53600 + /* convert the requested sysctl entry into a pathname */
53601 +
53602 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
53603 + len += strlen(tmp->procname);
53604 + len++;
53605 + depth++;
53606 + }
53607 +
53608 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
53609 + /* deny */
53610 + goto out;
53611 + }
53612 +
53613 + memset(path, 0, PAGE_SIZE);
53614 +
53615 + memcpy(path, proc_sys, strlen(proc_sys));
53616 +
53617 + pos += strlen(proc_sys);
53618 +
53619 + for (; depth > 0; depth--) {
53620 + path[pos] = '/';
53621 + pos++;
53622 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
53623 + if (depth == i) {
53624 + memcpy(path + pos, tmp->procname,
53625 + strlen(tmp->procname));
53626 + pos += strlen(tmp->procname);
53627 + }
53628 + i++;
53629 + }
53630 + }
53631 +
53632 + obj = gr_lookup_by_name(path, pos);
53633 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
53634 +
53635 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
53636 + ((err & mode) != mode))) {
53637 + __u32 new_mode = mode;
53638 +
53639 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
53640 +
53641 + err = 0;
53642 + gr_log_learn_sysctl(path, new_mode);
53643 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
53644 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
53645 + err = -ENOENT;
53646 + } else if (!(err & GR_FIND)) {
53647 + err = -ENOENT;
53648 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
53649 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
53650 + path, (mode & GR_READ) ? " reading" : "",
53651 + (mode & GR_WRITE) ? " writing" : "");
53652 + err = -EACCES;
53653 + } else if ((err & mode) != mode) {
53654 + err = -EACCES;
53655 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
53656 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
53657 + path, (mode & GR_READ) ? " reading" : "",
53658 + (mode & GR_WRITE) ? " writing" : "");
53659 + err = 0;
53660 + } else
53661 + err = 0;
53662 +
53663 + out:
53664 + preempt_enable();
53665 +
53666 + return err;
53667 +}
53668 +#endif
53669 +
53670 +int
53671 +gr_handle_proc_ptrace(struct task_struct *task)
53672 +{
53673 + struct file *filp;
53674 + struct task_struct *tmp = task;
53675 + struct task_struct *curtemp = current;
53676 + __u32 retmode;
53677 +
53678 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
53679 + if (unlikely(!(gr_status & GR_READY)))
53680 + return 0;
53681 +#endif
53682 +
53683 + read_lock(&tasklist_lock);
53684 + read_lock(&grsec_exec_file_lock);
53685 + filp = task->exec_file;
53686 +
53687 + while (tmp->pid > 0) {
53688 + if (tmp == curtemp)
53689 + break;
53690 + tmp = tmp->real_parent;
53691 + }
53692 +
53693 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
53694 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
53695 + read_unlock(&grsec_exec_file_lock);
53696 + read_unlock(&tasklist_lock);
53697 + return 1;
53698 + }
53699 +
53700 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53701 + if (!(gr_status & GR_READY)) {
53702 + read_unlock(&grsec_exec_file_lock);
53703 + read_unlock(&tasklist_lock);
53704 + return 0;
53705 + }
53706 +#endif
53707 +
53708 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
53709 + read_unlock(&grsec_exec_file_lock);
53710 + read_unlock(&tasklist_lock);
53711 +
53712 + if (retmode & GR_NOPTRACE)
53713 + return 1;
53714 +
53715 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
53716 + && (current->acl != task->acl || (current->acl != current->role->root_label
53717 + && current->pid != task->pid)))
53718 + return 1;
53719 +
53720 + return 0;
53721 +}
53722 +
53723 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
53724 +{
53725 + if (unlikely(!(gr_status & GR_READY)))
53726 + return;
53727 +
53728 + if (!(current->role->roletype & GR_ROLE_GOD))
53729 + return;
53730 +
53731 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
53732 + p->role->rolename, gr_task_roletype_to_char(p),
53733 + p->acl->filename);
53734 +}
53735 +
53736 +int
53737 +gr_handle_ptrace(struct task_struct *task, const long request)
53738 +{
53739 + struct task_struct *tmp = task;
53740 + struct task_struct *curtemp = current;
53741 + __u32 retmode;
53742 +
53743 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
53744 + if (unlikely(!(gr_status & GR_READY)))
53745 + return 0;
53746 +#endif
53747 +
53748 + read_lock(&tasklist_lock);
53749 + while (tmp->pid > 0) {
53750 + if (tmp == curtemp)
53751 + break;
53752 + tmp = tmp->real_parent;
53753 + }
53754 +
53755 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
53756 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
53757 + read_unlock(&tasklist_lock);
53758 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53759 + return 1;
53760 + }
53761 + read_unlock(&tasklist_lock);
53762 +
53763 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53764 + if (!(gr_status & GR_READY))
53765 + return 0;
53766 +#endif
53767 +
53768 + read_lock(&grsec_exec_file_lock);
53769 + if (unlikely(!task->exec_file)) {
53770 + read_unlock(&grsec_exec_file_lock);
53771 + return 0;
53772 + }
53773 +
53774 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
53775 + read_unlock(&grsec_exec_file_lock);
53776 +
53777 + if (retmode & GR_NOPTRACE) {
53778 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53779 + return 1;
53780 + }
53781 +
53782 + if (retmode & GR_PTRACERD) {
53783 + switch (request) {
53784 + case PTRACE_SEIZE:
53785 + case PTRACE_POKETEXT:
53786 + case PTRACE_POKEDATA:
53787 + case PTRACE_POKEUSR:
53788 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
53789 + case PTRACE_SETREGS:
53790 + case PTRACE_SETFPREGS:
53791 +#endif
53792 +#ifdef CONFIG_X86
53793 + case PTRACE_SETFPXREGS:
53794 +#endif
53795 +#ifdef CONFIG_ALTIVEC
53796 + case PTRACE_SETVRREGS:
53797 +#endif
53798 + return 1;
53799 + default:
53800 + return 0;
53801 + }
53802 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
53803 + !(current->role->roletype & GR_ROLE_GOD) &&
53804 + (current->acl != task->acl)) {
53805 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
53806 + return 1;
53807 + }
53808 +
53809 + return 0;
53810 +}
53811 +
53812 +static int is_writable_mmap(const struct file *filp)
53813 +{
53814 + struct task_struct *task = current;
53815 + struct acl_object_label *obj, *obj2;
53816 +
53817 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
53818 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
53819 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53820 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
53821 + task->role->root_label);
53822 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
53823 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
53824 + return 1;
53825 + }
53826 + }
53827 + return 0;
53828 +}
53829 +
53830 +int
53831 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
53832 +{
53833 + __u32 mode;
53834 +
53835 + if (unlikely(!file || !(prot & PROT_EXEC)))
53836 + return 1;
53837 +
53838 + if (is_writable_mmap(file))
53839 + return 0;
53840 +
53841 + mode =
53842 + gr_search_file(file->f_path.dentry,
53843 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
53844 + file->f_path.mnt);
53845 +
53846 + if (!gr_tpe_allow(file))
53847 + return 0;
53848 +
53849 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
53850 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53851 + return 0;
53852 + } else if (unlikely(!(mode & GR_EXEC))) {
53853 + return 0;
53854 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
53855 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53856 + return 1;
53857 + }
53858 +
53859 + return 1;
53860 +}
53861 +
53862 +int
53863 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53864 +{
53865 + __u32 mode;
53866 +
53867 + if (unlikely(!file || !(prot & PROT_EXEC)))
53868 + return 1;
53869 +
53870 + if (is_writable_mmap(file))
53871 + return 0;
53872 +
53873 + mode =
53874 + gr_search_file(file->f_path.dentry,
53875 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
53876 + file->f_path.mnt);
53877 +
53878 + if (!gr_tpe_allow(file))
53879 + return 0;
53880 +
53881 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
53882 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53883 + return 0;
53884 + } else if (unlikely(!(mode & GR_EXEC))) {
53885 + return 0;
53886 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
53887 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
53888 + return 1;
53889 + }
53890 +
53891 + return 1;
53892 +}
53893 +
53894 +void
53895 +gr_acl_handle_psacct(struct task_struct *task, const long code)
53896 +{
53897 + unsigned long runtime;
53898 + unsigned long cputime;
53899 + unsigned int wday, cday;
53900 + __u8 whr, chr;
53901 + __u8 wmin, cmin;
53902 + __u8 wsec, csec;
53903 + struct timespec timeval;
53904 +
53905 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
53906 + !(task->acl->mode & GR_PROCACCT)))
53907 + return;
53908 +
53909 + do_posix_clock_monotonic_gettime(&timeval);
53910 + runtime = timeval.tv_sec - task->start_time.tv_sec;
53911 + wday = runtime / (3600 * 24);
53912 + runtime -= wday * (3600 * 24);
53913 + whr = runtime / 3600;
53914 + runtime -= whr * 3600;
53915 + wmin = runtime / 60;
53916 + runtime -= wmin * 60;
53917 + wsec = runtime;
53918 +
53919 + cputime = (task->utime + task->stime) / HZ;
53920 + cday = cputime / (3600 * 24);
53921 + cputime -= cday * (3600 * 24);
53922 + chr = cputime / 3600;
53923 + cputime -= chr * 3600;
53924 + cmin = cputime / 60;
53925 + cputime -= cmin * 60;
53926 + csec = cputime;
53927 +
53928 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
53929 +
53930 + return;
53931 +}
53932 +
53933 +void gr_set_kernel_label(struct task_struct *task)
53934 +{
53935 + if (gr_status & GR_READY) {
53936 + task->role = kernel_role;
53937 + task->acl = kernel_role->root_label;
53938 + }
53939 + return;
53940 +}
53941 +
53942 +#ifdef CONFIG_TASKSTATS
53943 +int gr_is_taskstats_denied(int pid)
53944 +{
53945 + struct task_struct *task;
53946 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53947 + const struct cred *cred;
53948 +#endif
53949 + int ret = 0;
53950 +
53951 + /* restrict taskstats viewing to un-chrooted root users
53952 + who have the 'view' subject flag if the RBAC system is enabled
53953 + */
53954 +
53955 + rcu_read_lock();
53956 + read_lock(&tasklist_lock);
53957 + task = find_task_by_vpid(pid);
53958 + if (task) {
53959 +#ifdef CONFIG_GRKERNSEC_CHROOT
53960 + if (proc_is_chrooted(task))
53961 + ret = -EACCES;
53962 +#endif
53963 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53964 + cred = __task_cred(task);
53965 +#ifdef CONFIG_GRKERNSEC_PROC_USER
53966 + if (cred->uid != 0)
53967 + ret = -EACCES;
53968 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53969 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
53970 + ret = -EACCES;
53971 +#endif
53972 +#endif
53973 + if (gr_status & GR_READY) {
53974 + if (!(task->acl->mode & GR_VIEW))
53975 + ret = -EACCES;
53976 + }
53977 + } else
53978 + ret = -ENOENT;
53979 +
53980 + read_unlock(&tasklist_lock);
53981 + rcu_read_unlock();
53982 +
53983 + return ret;
53984 +}
53985 +#endif
53986 +
53987 +/* AUXV entries are filled via a descendant of search_binary_handler
53988 + after we've already applied the subject for the target
53989 +*/
53990 +int gr_acl_enable_at_secure(void)
53991 +{
53992 + if (unlikely(!(gr_status & GR_READY)))
53993 + return 0;
53994 +
53995 + if (current->acl->mode & GR_ATSECURE)
53996 + return 1;
53997 +
53998 + return 0;
53999 +}
54000 +
54001 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
54002 +{
54003 + struct task_struct *task = current;
54004 + struct dentry *dentry = file->f_path.dentry;
54005 + struct vfsmount *mnt = file->f_path.mnt;
54006 + struct acl_object_label *obj, *tmp;
54007 + struct acl_subject_label *subj;
54008 + unsigned int bufsize;
54009 + int is_not_root;
54010 + char *path;
54011 + dev_t dev = __get_dev(dentry);
54012 +
54013 + if (unlikely(!(gr_status & GR_READY)))
54014 + return 1;
54015 +
54016 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
54017 + return 1;
54018 +
54019 + /* ignore Eric Biederman */
54020 + if (IS_PRIVATE(dentry->d_inode))
54021 + return 1;
54022 +
54023 + subj = task->acl;
54024 + do {
54025 + obj = lookup_acl_obj_label(ino, dev, subj);
54026 + if (obj != NULL)
54027 + return (obj->mode & GR_FIND) ? 1 : 0;
54028 + } while ((subj = subj->parent_subject));
54029 +
54030 + /* this is purely an optimization since we're looking for an object
54031 + for the directory we're doing a readdir on
54032 + if it's possible for any globbed object to match the entry we're
54033 + filling into the directory, then the object we find here will be
54034 + an anchor point with attached globbed objects
54035 + */
54036 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
54037 + if (obj->globbed == NULL)
54038 + return (obj->mode & GR_FIND) ? 1 : 0;
54039 +
54040 + is_not_root = ((obj->filename[0] == '/') &&
54041 + (obj->filename[1] == '\0')) ? 0 : 1;
54042 + bufsize = PAGE_SIZE - namelen - is_not_root;
54043 +
54044 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
54045 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
54046 + return 1;
54047 +
54048 + preempt_disable();
54049 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
54050 + bufsize);
54051 +
54052 + bufsize = strlen(path);
54053 +
54054 + /* if base is "/", don't append an additional slash */
54055 + if (is_not_root)
54056 + *(path + bufsize) = '/';
54057 + memcpy(path + bufsize + is_not_root, name, namelen);
54058 + *(path + bufsize + namelen + is_not_root) = '\0';
54059 +
54060 + tmp = obj->globbed;
54061 + while (tmp) {
54062 + if (!glob_match(tmp->filename, path)) {
54063 + preempt_enable();
54064 + return (tmp->mode & GR_FIND) ? 1 : 0;
54065 + }
54066 + tmp = tmp->next;
54067 + }
54068 + preempt_enable();
54069 + return (obj->mode & GR_FIND) ? 1 : 0;
54070 +}
54071 +
54072 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
54073 +EXPORT_SYMBOL(gr_acl_is_enabled);
54074 +#endif
54075 +EXPORT_SYMBOL(gr_learn_resource);
54076 +EXPORT_SYMBOL(gr_set_kernel_label);
54077 +#ifdef CONFIG_SECURITY
54078 +EXPORT_SYMBOL(gr_check_user_change);
54079 +EXPORT_SYMBOL(gr_check_group_change);
54080 +#endif
54081 +
54082 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
54083 new file mode 100644
54084 index 0000000..34fefda
54085 --- /dev/null
54086 +++ b/grsecurity/gracl_alloc.c
54087 @@ -0,0 +1,105 @@
54088 +#include <linux/kernel.h>
54089 +#include <linux/mm.h>
54090 +#include <linux/slab.h>
54091 +#include <linux/vmalloc.h>
54092 +#include <linux/gracl.h>
54093 +#include <linux/grsecurity.h>
54094 +
54095 +static unsigned long alloc_stack_next = 1;
54096 +static unsigned long alloc_stack_size = 1;
54097 +static void **alloc_stack;
54098 +
54099 +static __inline__ int
54100 +alloc_pop(void)
54101 +{
54102 + if (alloc_stack_next == 1)
54103 + return 0;
54104 +
54105 + kfree(alloc_stack[alloc_stack_next - 2]);
54106 +
54107 + alloc_stack_next--;
54108 +
54109 + return 1;
54110 +}
54111 +
54112 +static __inline__ int
54113 +alloc_push(void *buf)
54114 +{
54115 + if (alloc_stack_next >= alloc_stack_size)
54116 + return 1;
54117 +
54118 + alloc_stack[alloc_stack_next - 1] = buf;
54119 +
54120 + alloc_stack_next++;
54121 +
54122 + return 0;
54123 +}
54124 +
54125 +void *
54126 +acl_alloc(unsigned long len)
54127 +{
54128 + void *ret = NULL;
54129 +
54130 + if (!len || len > PAGE_SIZE)
54131 + goto out;
54132 +
54133 + ret = kmalloc(len, GFP_KERNEL);
54134 +
54135 + if (ret) {
54136 + if (alloc_push(ret)) {
54137 + kfree(ret);
54138 + ret = NULL;
54139 + }
54140 + }
54141 +
54142 +out:
54143 + return ret;
54144 +}
54145 +
54146 +void *
54147 +acl_alloc_num(unsigned long num, unsigned long len)
54148 +{
54149 + if (!len || (num > (PAGE_SIZE / len)))
54150 + return NULL;
54151 +
54152 + return acl_alloc(num * len);
54153 +}
54154 +
54155 +void
54156 +acl_free_all(void)
54157 +{
54158 + if (gr_acl_is_enabled() || !alloc_stack)
54159 + return;
54160 +
54161 + while (alloc_pop()) ;
54162 +
54163 + if (alloc_stack) {
54164 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
54165 + kfree(alloc_stack);
54166 + else
54167 + vfree(alloc_stack);
54168 + }
54169 +
54170 + alloc_stack = NULL;
54171 + alloc_stack_size = 1;
54172 + alloc_stack_next = 1;
54173 +
54174 + return;
54175 +}
54176 +
54177 +int
54178 +acl_alloc_stack_init(unsigned long size)
54179 +{
54180 + if ((size * sizeof (void *)) <= PAGE_SIZE)
54181 + alloc_stack =
54182 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
54183 + else
54184 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
54185 +
54186 + alloc_stack_size = size;
54187 +
54188 + if (!alloc_stack)
54189 + return 0;
54190 + else
54191 + return 1;
54192 +}
54193 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
54194 new file mode 100644
54195 index 0000000..955ddfb
54196 --- /dev/null
54197 +++ b/grsecurity/gracl_cap.c
54198 @@ -0,0 +1,101 @@
54199 +#include <linux/kernel.h>
54200 +#include <linux/module.h>
54201 +#include <linux/sched.h>
54202 +#include <linux/gracl.h>
54203 +#include <linux/grsecurity.h>
54204 +#include <linux/grinternal.h>
54205 +
54206 +extern const char *captab_log[];
54207 +extern int captab_log_entries;
54208 +
54209 +int
54210 +gr_acl_is_capable(const int cap)
54211 +{
54212 + struct task_struct *task = current;
54213 + const struct cred *cred = current_cred();
54214 + struct acl_subject_label *curracl;
54215 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54216 + kernel_cap_t cap_audit = __cap_empty_set;
54217 +
54218 + if (!gr_acl_is_enabled())
54219 + return 1;
54220 +
54221 + curracl = task->acl;
54222 +
54223 + cap_drop = curracl->cap_lower;
54224 + cap_mask = curracl->cap_mask;
54225 + cap_audit = curracl->cap_invert_audit;
54226 +
54227 + while ((curracl = curracl->parent_subject)) {
54228 + /* if the cap isn't specified in the current computed mask but is specified in the
54229 + current level subject, and is lowered in the current level subject, then add
54230 + it to the set of dropped capabilities
54231 + otherwise, add the current level subject's mask to the current computed mask
54232 + */
54233 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54234 + cap_raise(cap_mask, cap);
54235 + if (cap_raised(curracl->cap_lower, cap))
54236 + cap_raise(cap_drop, cap);
54237 + if (cap_raised(curracl->cap_invert_audit, cap))
54238 + cap_raise(cap_audit, cap);
54239 + }
54240 + }
54241 +
54242 + if (!cap_raised(cap_drop, cap)) {
54243 + if (cap_raised(cap_audit, cap))
54244 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
54245 + return 1;
54246 + }
54247 +
54248 + curracl = task->acl;
54249 +
54250 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
54251 + && cap_raised(cred->cap_effective, cap)) {
54252 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54253 + task->role->roletype, cred->uid,
54254 + cred->gid, task->exec_file ?
54255 + gr_to_filename(task->exec_file->f_path.dentry,
54256 + task->exec_file->f_path.mnt) : curracl->filename,
54257 + curracl->filename, 0UL,
54258 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
54259 + return 1;
54260 + }
54261 +
54262 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
54263 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
54264 + return 0;
54265 +}
54266 +
54267 +int
54268 +gr_acl_is_capable_nolog(const int cap)
54269 +{
54270 + struct acl_subject_label *curracl;
54271 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54272 +
54273 + if (!gr_acl_is_enabled())
54274 + return 1;
54275 +
54276 + curracl = current->acl;
54277 +
54278 + cap_drop = curracl->cap_lower;
54279 + cap_mask = curracl->cap_mask;
54280 +
54281 + while ((curracl = curracl->parent_subject)) {
54282 + /* if the cap isn't specified in the current computed mask but is specified in the
54283 + current level subject, and is lowered in the current level subject, then add
54284 + it to the set of dropped capabilities
54285 + otherwise, add the current level subject's mask to the current computed mask
54286 + */
54287 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54288 + cap_raise(cap_mask, cap);
54289 + if (cap_raised(curracl->cap_lower, cap))
54290 + cap_raise(cap_drop, cap);
54291 + }
54292 + }
54293 +
54294 + if (!cap_raised(cap_drop, cap))
54295 + return 1;
54296 +
54297 + return 0;
54298 +}
54299 +
54300 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
54301 new file mode 100644
54302 index 0000000..88d0e87
54303 --- /dev/null
54304 +++ b/grsecurity/gracl_fs.c
54305 @@ -0,0 +1,435 @@
54306 +#include <linux/kernel.h>
54307 +#include <linux/sched.h>
54308 +#include <linux/types.h>
54309 +#include <linux/fs.h>
54310 +#include <linux/file.h>
54311 +#include <linux/stat.h>
54312 +#include <linux/grsecurity.h>
54313 +#include <linux/grinternal.h>
54314 +#include <linux/gracl.h>
54315 +
54316 +umode_t
54317 +gr_acl_umask(void)
54318 +{
54319 + if (unlikely(!gr_acl_is_enabled()))
54320 + return 0;
54321 +
54322 + return current->role->umask;
54323 +}
54324 +
54325 +__u32
54326 +gr_acl_handle_hidden_file(const struct dentry * dentry,
54327 + const struct vfsmount * mnt)
54328 +{
54329 + __u32 mode;
54330 +
54331 + if (unlikely(!dentry->d_inode))
54332 + return GR_FIND;
54333 +
54334 + mode =
54335 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
54336 +
54337 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
54338 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54339 + return mode;
54340 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
54341 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54342 + return 0;
54343 + } else if (unlikely(!(mode & GR_FIND)))
54344 + return 0;
54345 +
54346 + return GR_FIND;
54347 +}
54348 +
54349 +__u32
54350 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54351 + int acc_mode)
54352 +{
54353 + __u32 reqmode = GR_FIND;
54354 + __u32 mode;
54355 +
54356 + if (unlikely(!dentry->d_inode))
54357 + return reqmode;
54358 +
54359 + if (acc_mode & MAY_APPEND)
54360 + reqmode |= GR_APPEND;
54361 + else if (acc_mode & MAY_WRITE)
54362 + reqmode |= GR_WRITE;
54363 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
54364 + reqmode |= GR_READ;
54365 +
54366 + mode =
54367 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54368 + mnt);
54369 +
54370 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54371 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54372 + reqmode & GR_READ ? " reading" : "",
54373 + reqmode & GR_WRITE ? " writing" : reqmode &
54374 + GR_APPEND ? " appending" : "");
54375 + return reqmode;
54376 + } else
54377 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54378 + {
54379 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54380 + reqmode & GR_READ ? " reading" : "",
54381 + reqmode & GR_WRITE ? " writing" : reqmode &
54382 + GR_APPEND ? " appending" : "");
54383 + return 0;
54384 + } else if (unlikely((mode & reqmode) != reqmode))
54385 + return 0;
54386 +
54387 + return reqmode;
54388 +}
54389 +
54390 +__u32
54391 +gr_acl_handle_creat(const struct dentry * dentry,
54392 + const struct dentry * p_dentry,
54393 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
54394 + const int imode)
54395 +{
54396 + __u32 reqmode = GR_WRITE | GR_CREATE;
54397 + __u32 mode;
54398 +
54399 + if (acc_mode & MAY_APPEND)
54400 + reqmode |= GR_APPEND;
54401 + // if a directory was required or the directory already exists, then
54402 + // don't count this open as a read
54403 + if ((acc_mode & MAY_READ) &&
54404 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
54405 + reqmode |= GR_READ;
54406 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
54407 + reqmode |= GR_SETID;
54408 +
54409 + mode =
54410 + gr_check_create(dentry, p_dentry, p_mnt,
54411 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54412 +
54413 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54414 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54415 + reqmode & GR_READ ? " reading" : "",
54416 + reqmode & GR_WRITE ? " writing" : reqmode &
54417 + GR_APPEND ? " appending" : "");
54418 + return reqmode;
54419 + } else
54420 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54421 + {
54422 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54423 + reqmode & GR_READ ? " reading" : "",
54424 + reqmode & GR_WRITE ? " writing" : reqmode &
54425 + GR_APPEND ? " appending" : "");
54426 + return 0;
54427 + } else if (unlikely((mode & reqmode) != reqmode))
54428 + return 0;
54429 +
54430 + return reqmode;
54431 +}
54432 +
54433 +__u32
54434 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
54435 + const int fmode)
54436 +{
54437 + __u32 mode, reqmode = GR_FIND;
54438 +
54439 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
54440 + reqmode |= GR_EXEC;
54441 + if (fmode & S_IWOTH)
54442 + reqmode |= GR_WRITE;
54443 + if (fmode & S_IROTH)
54444 + reqmode |= GR_READ;
54445 +
54446 + mode =
54447 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54448 + mnt);
54449 +
54450 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54451 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
54452 + reqmode & GR_READ ? " reading" : "",
54453 + reqmode & GR_WRITE ? " writing" : "",
54454 + reqmode & GR_EXEC ? " executing" : "");
54455 + return reqmode;
54456 + } else
54457 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54458 + {
54459 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
54460 + reqmode & GR_READ ? " reading" : "",
54461 + reqmode & GR_WRITE ? " writing" : "",
54462 + reqmode & GR_EXEC ? " executing" : "");
54463 + return 0;
54464 + } else if (unlikely((mode & reqmode) != reqmode))
54465 + return 0;
54466 +
54467 + return reqmode;
54468 +}
54469 +
54470 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
54471 +{
54472 + __u32 mode;
54473 +
54474 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
54475 +
54476 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
54477 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
54478 + return mode;
54479 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
54480 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
54481 + return 0;
54482 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
54483 + return 0;
54484 +
54485 + return (reqmode);
54486 +}
54487 +
54488 +__u32
54489 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54490 +{
54491 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
54492 +}
54493 +
54494 +__u32
54495 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
54496 +{
54497 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
54498 +}
54499 +
54500 +__u32
54501 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
54502 +{
54503 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
54504 +}
54505 +
54506 +__u32
54507 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
54508 +{
54509 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
54510 +}
54511 +
54512 +__u32
54513 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
54514 + umode_t *modeptr)
54515 +{
54516 + umode_t mode;
54517 +
54518 + *modeptr &= ~gr_acl_umask();
54519 + mode = *modeptr;
54520 +
54521 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
54522 + return 1;
54523 +
54524 + if (unlikely(mode & (S_ISUID | S_ISGID))) {
54525 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
54526 + GR_CHMOD_ACL_MSG);
54527 + } else {
54528 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
54529 + }
54530 +}
54531 +
54532 +__u32
54533 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
54534 +{
54535 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
54536 +}
54537 +
54538 +__u32
54539 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
54540 +{
54541 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
54542 +}
54543 +
54544 +__u32
54545 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
54546 +{
54547 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
54548 +}
54549 +
54550 +__u32
54551 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
54552 +{
54553 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
54554 + GR_UNIXCONNECT_ACL_MSG);
54555 +}
54556 +
54557 +/* hardlinks require at minimum create and link permission,
54558 + any additional privilege required is based on the
54559 + privilege of the file being linked to
54560 +*/
54561 +__u32
54562 +gr_acl_handle_link(const struct dentry * new_dentry,
54563 + const struct dentry * parent_dentry,
54564 + const struct vfsmount * parent_mnt,
54565 + const struct dentry * old_dentry,
54566 + const struct vfsmount * old_mnt, const char *to)
54567 +{
54568 + __u32 mode;
54569 + __u32 needmode = GR_CREATE | GR_LINK;
54570 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
54571 +
54572 + mode =
54573 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
54574 + old_mnt);
54575 +
54576 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
54577 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
54578 + return mode;
54579 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
54580 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
54581 + return 0;
54582 + } else if (unlikely((mode & needmode) != needmode))
54583 + return 0;
54584 +
54585 + return 1;
54586 +}
54587 +
54588 +__u32
54589 +gr_acl_handle_symlink(const struct dentry * new_dentry,
54590 + const struct dentry * parent_dentry,
54591 + const struct vfsmount * parent_mnt, const char *from)
54592 +{
54593 + __u32 needmode = GR_WRITE | GR_CREATE;
54594 + __u32 mode;
54595 +
54596 + mode =
54597 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
54598 + GR_CREATE | GR_AUDIT_CREATE |
54599 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
54600 +
54601 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
54602 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
54603 + return mode;
54604 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
54605 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
54606 + return 0;
54607 + } else if (unlikely((mode & needmode) != needmode))
54608 + return 0;
54609 +
54610 + return (GR_WRITE | GR_CREATE);
54611 +}
54612 +
54613 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
54614 +{
54615 + __u32 mode;
54616 +
54617 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54618 +
54619 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
54620 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
54621 + return mode;
54622 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
54623 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
54624 + return 0;
54625 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
54626 + return 0;
54627 +
54628 + return (reqmode);
54629 +}
54630 +
54631 +__u32
54632 +gr_acl_handle_mknod(const struct dentry * new_dentry,
54633 + const struct dentry * parent_dentry,
54634 + const struct vfsmount * parent_mnt,
54635 + const int mode)
54636 +{
54637 + __u32 reqmode = GR_WRITE | GR_CREATE;
54638 + if (unlikely(mode & (S_ISUID | S_ISGID)))
54639 + reqmode |= GR_SETID;
54640 +
54641 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
54642 + reqmode, GR_MKNOD_ACL_MSG);
54643 +}
54644 +
54645 +__u32
54646 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
54647 + const struct dentry *parent_dentry,
54648 + const struct vfsmount *parent_mnt)
54649 +{
54650 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
54651 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
54652 +}
54653 +
54654 +#define RENAME_CHECK_SUCCESS(old, new) \
54655 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
54656 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
54657 +
54658 +int
54659 +gr_acl_handle_rename(struct dentry *new_dentry,
54660 + struct dentry *parent_dentry,
54661 + const struct vfsmount *parent_mnt,
54662 + struct dentry *old_dentry,
54663 + struct inode *old_parent_inode,
54664 + struct vfsmount *old_mnt, const char *newname)
54665 +{
54666 + __u32 comp1, comp2;
54667 + int error = 0;
54668 +
54669 + if (unlikely(!gr_acl_is_enabled()))
54670 + return 0;
54671 +
54672 + if (!new_dentry->d_inode) {
54673 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
54674 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
54675 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
54676 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
54677 + GR_DELETE | GR_AUDIT_DELETE |
54678 + GR_AUDIT_READ | GR_AUDIT_WRITE |
54679 + GR_SUPPRESS, old_mnt);
54680 + } else {
54681 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
54682 + GR_CREATE | GR_DELETE |
54683 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
54684 + GR_AUDIT_READ | GR_AUDIT_WRITE |
54685 + GR_SUPPRESS, parent_mnt);
54686 + comp2 =
54687 + gr_search_file(old_dentry,
54688 + GR_READ | GR_WRITE | GR_AUDIT_READ |
54689 + GR_DELETE | GR_AUDIT_DELETE |
54690 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
54691 + }
54692 +
54693 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
54694 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
54695 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
54696 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
54697 + && !(comp2 & GR_SUPPRESS)) {
54698 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
54699 + error = -EACCES;
54700 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
54701 + error = -EACCES;
54702 +
54703 + return error;
54704 +}
54705 +
54706 +void
54707 +gr_acl_handle_exit(void)
54708 +{
54709 + u16 id;
54710 + char *rolename;
54711 + struct file *exec_file;
54712 +
54713 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
54714 + !(current->role->roletype & GR_ROLE_PERSIST))) {
54715 + id = current->acl_role_id;
54716 + rolename = current->role->rolename;
54717 + gr_set_acls(1);
54718 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
54719 + }
54720 +
54721 + write_lock(&grsec_exec_file_lock);
54722 + exec_file = current->exec_file;
54723 + current->exec_file = NULL;
54724 + write_unlock(&grsec_exec_file_lock);
54725 +
54726 + if (exec_file)
54727 + fput(exec_file);
54728 +}
54729 +
54730 +int
54731 +gr_acl_handle_procpidmem(const struct task_struct *task)
54732 +{
54733 + if (unlikely(!gr_acl_is_enabled()))
54734 + return 0;
54735 +
54736 + if (task != current && task->acl->mode & GR_PROTPROCFD)
54737 + return -EACCES;
54738 +
54739 + return 0;
54740 +}
54741 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
54742 new file mode 100644
54743 index 0000000..17050ca
54744 --- /dev/null
54745 +++ b/grsecurity/gracl_ip.c
54746 @@ -0,0 +1,381 @@
54747 +#include <linux/kernel.h>
54748 +#include <asm/uaccess.h>
54749 +#include <asm/errno.h>
54750 +#include <net/sock.h>
54751 +#include <linux/file.h>
54752 +#include <linux/fs.h>
54753 +#include <linux/net.h>
54754 +#include <linux/in.h>
54755 +#include <linux/skbuff.h>
54756 +#include <linux/ip.h>
54757 +#include <linux/udp.h>
54758 +#include <linux/types.h>
54759 +#include <linux/sched.h>
54760 +#include <linux/netdevice.h>
54761 +#include <linux/inetdevice.h>
54762 +#include <linux/gracl.h>
54763 +#include <linux/grsecurity.h>
54764 +#include <linux/grinternal.h>
54765 +
54766 +#define GR_BIND 0x01
54767 +#define GR_CONNECT 0x02
54768 +#define GR_INVERT 0x04
54769 +#define GR_BINDOVERRIDE 0x08
54770 +#define GR_CONNECTOVERRIDE 0x10
54771 +#define GR_SOCK_FAMILY 0x20
54772 +
54773 +static const char * gr_protocols[IPPROTO_MAX] = {
54774 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
54775 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
54776 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
54777 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
54778 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
54779 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
54780 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
54781 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
54782 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
54783 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
54784 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
54785 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
54786 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
54787 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
54788 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
54789 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
54790 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
54791 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
54792 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
54793 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
54794 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
54795 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
54796 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
54797 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
54798 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
54799 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
54800 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
54801 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
54802 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
54803 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
54804 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
54805 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
54806 + };
54807 +
54808 +static const char * gr_socktypes[SOCK_MAX] = {
54809 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
54810 + "unknown:7", "unknown:8", "unknown:9", "packet"
54811 + };
54812 +
54813 +static const char * gr_sockfamilies[AF_MAX+1] = {
54814 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
54815 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
54816 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
54817 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
54818 + };
54819 +
54820 +const char *
54821 +gr_proto_to_name(unsigned char proto)
54822 +{
54823 + return gr_protocols[proto];
54824 +}
54825 +
54826 +const char *
54827 +gr_socktype_to_name(unsigned char type)
54828 +{
54829 + return gr_socktypes[type];
54830 +}
54831 +
54832 +const char *
54833 +gr_sockfamily_to_name(unsigned char family)
54834 +{
54835 + return gr_sockfamilies[family];
54836 +}
54837 +
54838 +int
54839 +gr_search_socket(const int domain, const int type, const int protocol)
54840 +{
54841 + struct acl_subject_label *curr;
54842 + const struct cred *cred = current_cred();
54843 +
54844 + if (unlikely(!gr_acl_is_enabled()))
54845 + goto exit;
54846 +
54847 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
54848 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
54849 + goto exit; // let the kernel handle it
54850 +
54851 + curr = current->acl;
54852 +
54853 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
54854 + /* the family is allowed, if this is PF_INET allow it only if
54855 + the extra sock type/protocol checks pass */
54856 + if (domain == PF_INET)
54857 + goto inet_check;
54858 + goto exit;
54859 + } else {
54860 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
54861 + __u32 fakeip = 0;
54862 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54863 + current->role->roletype, cred->uid,
54864 + cred->gid, current->exec_file ?
54865 + gr_to_filename(current->exec_file->f_path.dentry,
54866 + current->exec_file->f_path.mnt) :
54867 + curr->filename, curr->filename,
54868 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
54869 + &current->signal->saved_ip);
54870 + goto exit;
54871 + }
54872 + goto exit_fail;
54873 + }
54874 +
54875 +inet_check:
54876 + /* the rest of this checking is for IPv4 only */
54877 + if (!curr->ips)
54878 + goto exit;
54879 +
54880 + if ((curr->ip_type & (1 << type)) &&
54881 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
54882 + goto exit;
54883 +
54884 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
54885 + /* we don't place acls on raw sockets , and sometimes
54886 + dgram/ip sockets are opened for ioctl and not
54887 + bind/connect, so we'll fake a bind learn log */
54888 + if (type == SOCK_RAW || type == SOCK_PACKET) {
54889 + __u32 fakeip = 0;
54890 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54891 + current->role->roletype, cred->uid,
54892 + cred->gid, current->exec_file ?
54893 + gr_to_filename(current->exec_file->f_path.dentry,
54894 + current->exec_file->f_path.mnt) :
54895 + curr->filename, curr->filename,
54896 + &fakeip, 0, type,
54897 + protocol, GR_CONNECT, &current->signal->saved_ip);
54898 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
54899 + __u32 fakeip = 0;
54900 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54901 + current->role->roletype, cred->uid,
54902 + cred->gid, current->exec_file ?
54903 + gr_to_filename(current->exec_file->f_path.dentry,
54904 + current->exec_file->f_path.mnt) :
54905 + curr->filename, curr->filename,
54906 + &fakeip, 0, type,
54907 + protocol, GR_BIND, &current->signal->saved_ip);
54908 + }
54909 + /* we'll log when they use connect or bind */
54910 + goto exit;
54911 + }
54912 +
54913 +exit_fail:
54914 + if (domain == PF_INET)
54915 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
54916 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
54917 + else
54918 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
54919 + gr_socktype_to_name(type), protocol);
54920 +
54921 + return 0;
54922 +exit:
54923 + return 1;
54924 +}
54925 +
54926 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
54927 +{
54928 + if ((ip->mode & mode) &&
54929 + (ip_port >= ip->low) &&
54930 + (ip_port <= ip->high) &&
54931 + ((ntohl(ip_addr) & our_netmask) ==
54932 + (ntohl(our_addr) & our_netmask))
54933 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
54934 + && (ip->type & (1 << type))) {
54935 + if (ip->mode & GR_INVERT)
54936 + return 2; // specifically denied
54937 + else
54938 + return 1; // allowed
54939 + }
54940 +
54941 + return 0; // not specifically allowed, may continue parsing
54942 +}
54943 +
54944 +static int
54945 +gr_search_connectbind(const int full_mode, struct sock *sk,
54946 + struct sockaddr_in *addr, const int type)
54947 +{
54948 + char iface[IFNAMSIZ] = {0};
54949 + struct acl_subject_label *curr;
54950 + struct acl_ip_label *ip;
54951 + struct inet_sock *isk;
54952 + struct net_device *dev;
54953 + struct in_device *idev;
54954 + unsigned long i;
54955 + int ret;
54956 + int mode = full_mode & (GR_BIND | GR_CONNECT);
54957 + __u32 ip_addr = 0;
54958 + __u32 our_addr;
54959 + __u32 our_netmask;
54960 + char *p;
54961 + __u16 ip_port = 0;
54962 + const struct cred *cred = current_cred();
54963 +
54964 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
54965 + return 0;
54966 +
54967 + curr = current->acl;
54968 + isk = inet_sk(sk);
54969 +
54970 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
54971 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
54972 + addr->sin_addr.s_addr = curr->inaddr_any_override;
54973 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
54974 + struct sockaddr_in saddr;
54975 + int err;
54976 +
54977 + saddr.sin_family = AF_INET;
54978 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
54979 + saddr.sin_port = isk->inet_sport;
54980 +
54981 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
54982 + if (err)
54983 + return err;
54984 +
54985 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
54986 + if (err)
54987 + return err;
54988 + }
54989 +
54990 + if (!curr->ips)
54991 + return 0;
54992 +
54993 + ip_addr = addr->sin_addr.s_addr;
54994 + ip_port = ntohs(addr->sin_port);
54995 +
54996 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
54997 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
54998 + current->role->roletype, cred->uid,
54999 + cred->gid, current->exec_file ?
55000 + gr_to_filename(current->exec_file->f_path.dentry,
55001 + current->exec_file->f_path.mnt) :
55002 + curr->filename, curr->filename,
55003 + &ip_addr, ip_port, type,
55004 + sk->sk_protocol, mode, &current->signal->saved_ip);
55005 + return 0;
55006 + }
55007 +
55008 + for (i = 0; i < curr->ip_num; i++) {
55009 + ip = *(curr->ips + i);
55010 + if (ip->iface != NULL) {
55011 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
55012 + p = strchr(iface, ':');
55013 + if (p != NULL)
55014 + *p = '\0';
55015 + dev = dev_get_by_name(sock_net(sk), iface);
55016 + if (dev == NULL)
55017 + continue;
55018 + idev = in_dev_get(dev);
55019 + if (idev == NULL) {
55020 + dev_put(dev);
55021 + continue;
55022 + }
55023 + rcu_read_lock();
55024 + for_ifa(idev) {
55025 + if (!strcmp(ip->iface, ifa->ifa_label)) {
55026 + our_addr = ifa->ifa_address;
55027 + our_netmask = 0xffffffff;
55028 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55029 + if (ret == 1) {
55030 + rcu_read_unlock();
55031 + in_dev_put(idev);
55032 + dev_put(dev);
55033 + return 0;
55034 + } else if (ret == 2) {
55035 + rcu_read_unlock();
55036 + in_dev_put(idev);
55037 + dev_put(dev);
55038 + goto denied;
55039 + }
55040 + }
55041 + } endfor_ifa(idev);
55042 + rcu_read_unlock();
55043 + in_dev_put(idev);
55044 + dev_put(dev);
55045 + } else {
55046 + our_addr = ip->addr;
55047 + our_netmask = ip->netmask;
55048 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55049 + if (ret == 1)
55050 + return 0;
55051 + else if (ret == 2)
55052 + goto denied;
55053 + }
55054 + }
55055 +
55056 +denied:
55057 + if (mode == GR_BIND)
55058 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55059 + else if (mode == GR_CONNECT)
55060 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55061 +
55062 + return -EACCES;
55063 +}
55064 +
55065 +int
55066 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
55067 +{
55068 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
55069 +}
55070 +
55071 +int
55072 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
55073 +{
55074 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
55075 +}
55076 +
55077 +int gr_search_listen(struct socket *sock)
55078 +{
55079 + struct sock *sk = sock->sk;
55080 + struct sockaddr_in addr;
55081 +
55082 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55083 + addr.sin_port = inet_sk(sk)->inet_sport;
55084 +
55085 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55086 +}
55087 +
55088 +int gr_search_accept(struct socket *sock)
55089 +{
55090 + struct sock *sk = sock->sk;
55091 + struct sockaddr_in addr;
55092 +
55093 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55094 + addr.sin_port = inet_sk(sk)->inet_sport;
55095 +
55096 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55097 +}
55098 +
55099 +int
55100 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
55101 +{
55102 + if (addr)
55103 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
55104 + else {
55105 + struct sockaddr_in sin;
55106 + const struct inet_sock *inet = inet_sk(sk);
55107 +
55108 + sin.sin_addr.s_addr = inet->inet_daddr;
55109 + sin.sin_port = inet->inet_dport;
55110 +
55111 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55112 + }
55113 +}
55114 +
55115 +int
55116 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
55117 +{
55118 + struct sockaddr_in sin;
55119 +
55120 + if (unlikely(skb->len < sizeof (struct udphdr)))
55121 + return 0; // skip this packet
55122 +
55123 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
55124 + sin.sin_port = udp_hdr(skb)->source;
55125 +
55126 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55127 +}
55128 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
55129 new file mode 100644
55130 index 0000000..25f54ef
55131 --- /dev/null
55132 +++ b/grsecurity/gracl_learn.c
55133 @@ -0,0 +1,207 @@
55134 +#include <linux/kernel.h>
55135 +#include <linux/mm.h>
55136 +#include <linux/sched.h>
55137 +#include <linux/poll.h>
55138 +#include <linux/string.h>
55139 +#include <linux/file.h>
55140 +#include <linux/types.h>
55141 +#include <linux/vmalloc.h>
55142 +#include <linux/grinternal.h>
55143 +
55144 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
55145 + size_t count, loff_t *ppos);
55146 +extern int gr_acl_is_enabled(void);
55147 +
55148 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
55149 +static int gr_learn_attached;
55150 +
55151 +/* use a 512k buffer */
55152 +#define LEARN_BUFFER_SIZE (512 * 1024)
55153 +
55154 +static DEFINE_SPINLOCK(gr_learn_lock);
55155 +static DEFINE_MUTEX(gr_learn_user_mutex);
55156 +
55157 +/* we need to maintain two buffers, so that the kernel context of grlearn
55158 + uses a semaphore around the userspace copying, and the other kernel contexts
55159 + use a spinlock when copying into the buffer, since they cannot sleep
55160 +*/
55161 +static char *learn_buffer;
55162 +static char *learn_buffer_user;
55163 +static int learn_buffer_len;
55164 +static int learn_buffer_user_len;
55165 +
55166 +static ssize_t
55167 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
55168 +{
55169 + DECLARE_WAITQUEUE(wait, current);
55170 + ssize_t retval = 0;
55171 +
55172 + add_wait_queue(&learn_wait, &wait);
55173 + set_current_state(TASK_INTERRUPTIBLE);
55174 + do {
55175 + mutex_lock(&gr_learn_user_mutex);
55176 + spin_lock(&gr_learn_lock);
55177 + if (learn_buffer_len)
55178 + break;
55179 + spin_unlock(&gr_learn_lock);
55180 + mutex_unlock(&gr_learn_user_mutex);
55181 + if (file->f_flags & O_NONBLOCK) {
55182 + retval = -EAGAIN;
55183 + goto out;
55184 + }
55185 + if (signal_pending(current)) {
55186 + retval = -ERESTARTSYS;
55187 + goto out;
55188 + }
55189 +
55190 + schedule();
55191 + } while (1);
55192 +
55193 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
55194 + learn_buffer_user_len = learn_buffer_len;
55195 + retval = learn_buffer_len;
55196 + learn_buffer_len = 0;
55197 +
55198 + spin_unlock(&gr_learn_lock);
55199 +
55200 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
55201 + retval = -EFAULT;
55202 +
55203 + mutex_unlock(&gr_learn_user_mutex);
55204 +out:
55205 + set_current_state(TASK_RUNNING);
55206 + remove_wait_queue(&learn_wait, &wait);
55207 + return retval;
55208 +}
55209 +
55210 +static unsigned int
55211 +poll_learn(struct file * file, poll_table * wait)
55212 +{
55213 + poll_wait(file, &learn_wait, wait);
55214 +
55215 + if (learn_buffer_len)
55216 + return (POLLIN | POLLRDNORM);
55217 +
55218 + return 0;
55219 +}
55220 +
55221 +void
55222 +gr_clear_learn_entries(void)
55223 +{
55224 + char *tmp;
55225 +
55226 + mutex_lock(&gr_learn_user_mutex);
55227 + spin_lock(&gr_learn_lock);
55228 + tmp = learn_buffer;
55229 + learn_buffer = NULL;
55230 + spin_unlock(&gr_learn_lock);
55231 + if (tmp)
55232 + vfree(tmp);
55233 + if (learn_buffer_user != NULL) {
55234 + vfree(learn_buffer_user);
55235 + learn_buffer_user = NULL;
55236 + }
55237 + learn_buffer_len = 0;
55238 + mutex_unlock(&gr_learn_user_mutex);
55239 +
55240 + return;
55241 +}
55242 +
55243 +void
55244 +gr_add_learn_entry(const char *fmt, ...)
55245 +{
55246 + va_list args;
55247 + unsigned int len;
55248 +
55249 + if (!gr_learn_attached)
55250 + return;
55251 +
55252 + spin_lock(&gr_learn_lock);
55253 +
55254 + /* leave a gap at the end so we know when it's "full" but don't have to
55255 + compute the exact length of the string we're trying to append
55256 + */
55257 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
55258 + spin_unlock(&gr_learn_lock);
55259 + wake_up_interruptible(&learn_wait);
55260 + return;
55261 + }
55262 + if (learn_buffer == NULL) {
55263 + spin_unlock(&gr_learn_lock);
55264 + return;
55265 + }
55266 +
55267 + va_start(args, fmt);
55268 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
55269 + va_end(args);
55270 +
55271 + learn_buffer_len += len + 1;
55272 +
55273 + spin_unlock(&gr_learn_lock);
55274 + wake_up_interruptible(&learn_wait);
55275 +
55276 + return;
55277 +}
55278 +
55279 +static int
55280 +open_learn(struct inode *inode, struct file *file)
55281 +{
55282 + if (file->f_mode & FMODE_READ && gr_learn_attached)
55283 + return -EBUSY;
55284 + if (file->f_mode & FMODE_READ) {
55285 + int retval = 0;
55286 + mutex_lock(&gr_learn_user_mutex);
55287 + if (learn_buffer == NULL)
55288 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
55289 + if (learn_buffer_user == NULL)
55290 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
55291 + if (learn_buffer == NULL) {
55292 + retval = -ENOMEM;
55293 + goto out_error;
55294 + }
55295 + if (learn_buffer_user == NULL) {
55296 + retval = -ENOMEM;
55297 + goto out_error;
55298 + }
55299 + learn_buffer_len = 0;
55300 + learn_buffer_user_len = 0;
55301 + gr_learn_attached = 1;
55302 +out_error:
55303 + mutex_unlock(&gr_learn_user_mutex);
55304 + return retval;
55305 + }
55306 + return 0;
55307 +}
55308 +
55309 +static int
55310 +close_learn(struct inode *inode, struct file *file)
55311 +{
55312 + if (file->f_mode & FMODE_READ) {
55313 + char *tmp = NULL;
55314 + mutex_lock(&gr_learn_user_mutex);
55315 + spin_lock(&gr_learn_lock);
55316 + tmp = learn_buffer;
55317 + learn_buffer = NULL;
55318 + spin_unlock(&gr_learn_lock);
55319 + if (tmp)
55320 + vfree(tmp);
55321 + if (learn_buffer_user != NULL) {
55322 + vfree(learn_buffer_user);
55323 + learn_buffer_user = NULL;
55324 + }
55325 + learn_buffer_len = 0;
55326 + learn_buffer_user_len = 0;
55327 + gr_learn_attached = 0;
55328 + mutex_unlock(&gr_learn_user_mutex);
55329 + }
55330 +
55331 + return 0;
55332 +}
55333 +
55334 +const struct file_operations grsec_fops = {
55335 + .read = read_learn,
55336 + .write = write_grsec_handler,
55337 + .open = open_learn,
55338 + .release = close_learn,
55339 + .poll = poll_learn,
55340 +};
55341 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
55342 new file mode 100644
55343 index 0000000..39645c9
55344 --- /dev/null
55345 +++ b/grsecurity/gracl_res.c
55346 @@ -0,0 +1,68 @@
55347 +#include <linux/kernel.h>
55348 +#include <linux/sched.h>
55349 +#include <linux/gracl.h>
55350 +#include <linux/grinternal.h>
55351 +
55352 +static const char *restab_log[] = {
55353 + [RLIMIT_CPU] = "RLIMIT_CPU",
55354 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
55355 + [RLIMIT_DATA] = "RLIMIT_DATA",
55356 + [RLIMIT_STACK] = "RLIMIT_STACK",
55357 + [RLIMIT_CORE] = "RLIMIT_CORE",
55358 + [RLIMIT_RSS] = "RLIMIT_RSS",
55359 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
55360 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
55361 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
55362 + [RLIMIT_AS] = "RLIMIT_AS",
55363 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
55364 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
55365 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
55366 + [RLIMIT_NICE] = "RLIMIT_NICE",
55367 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
55368 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
55369 + [GR_CRASH_RES] = "RLIMIT_CRASH"
55370 +};
55371 +
55372 +void
55373 +gr_log_resource(const struct task_struct *task,
55374 + const int res, const unsigned long wanted, const int gt)
55375 +{
55376 + const struct cred *cred;
55377 + unsigned long rlim;
55378 +
55379 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
55380 + return;
55381 +
55382 + // not yet supported resource
55383 + if (unlikely(!restab_log[res]))
55384 + return;
55385 +
55386 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
55387 + rlim = task_rlimit_max(task, res);
55388 + else
55389 + rlim = task_rlimit(task, res);
55390 +
55391 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
55392 + return;
55393 +
55394 + rcu_read_lock();
55395 + cred = __task_cred(task);
55396 +
55397 + if (res == RLIMIT_NPROC &&
55398 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
55399 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
55400 + goto out_rcu_unlock;
55401 + else if (res == RLIMIT_MEMLOCK &&
55402 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
55403 + goto out_rcu_unlock;
55404 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
55405 + goto out_rcu_unlock;
55406 + rcu_read_unlock();
55407 +
55408 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
55409 +
55410 + return;
55411 +out_rcu_unlock:
55412 + rcu_read_unlock();
55413 + return;
55414 +}
55415 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
55416 new file mode 100644
55417 index 0000000..5556be3
55418 --- /dev/null
55419 +++ b/grsecurity/gracl_segv.c
55420 @@ -0,0 +1,299 @@
55421 +#include <linux/kernel.h>
55422 +#include <linux/mm.h>
55423 +#include <asm/uaccess.h>
55424 +#include <asm/errno.h>
55425 +#include <asm/mman.h>
55426 +#include <net/sock.h>
55427 +#include <linux/file.h>
55428 +#include <linux/fs.h>
55429 +#include <linux/net.h>
55430 +#include <linux/in.h>
55431 +#include <linux/slab.h>
55432 +#include <linux/types.h>
55433 +#include <linux/sched.h>
55434 +#include <linux/timer.h>
55435 +#include <linux/gracl.h>
55436 +#include <linux/grsecurity.h>
55437 +#include <linux/grinternal.h>
55438 +
55439 +static struct crash_uid *uid_set;
55440 +static unsigned short uid_used;
55441 +static DEFINE_SPINLOCK(gr_uid_lock);
55442 +extern rwlock_t gr_inode_lock;
55443 +extern struct acl_subject_label *
55444 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
55445 + struct acl_role_label *role);
55446 +
55447 +#ifdef CONFIG_BTRFS_FS
55448 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
55449 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
55450 +#endif
55451 +
55452 +static inline dev_t __get_dev(const struct dentry *dentry)
55453 +{
55454 +#ifdef CONFIG_BTRFS_FS
55455 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
55456 + return get_btrfs_dev_from_inode(dentry->d_inode);
55457 + else
55458 +#endif
55459 + return dentry->d_inode->i_sb->s_dev;
55460 +}
55461 +
55462 +int
55463 +gr_init_uidset(void)
55464 +{
55465 + uid_set =
55466 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
55467 + uid_used = 0;
55468 +
55469 + return uid_set ? 1 : 0;
55470 +}
55471 +
55472 +void
55473 +gr_free_uidset(void)
55474 +{
55475 + if (uid_set)
55476 + kfree(uid_set);
55477 +
55478 + return;
55479 +}
55480 +
55481 +int
55482 +gr_find_uid(const uid_t uid)
55483 +{
55484 + struct crash_uid *tmp = uid_set;
55485 + uid_t buid;
55486 + int low = 0, high = uid_used - 1, mid;
55487 +
55488 + while (high >= low) {
55489 + mid = (low + high) >> 1;
55490 + buid = tmp[mid].uid;
55491 + if (buid == uid)
55492 + return mid;
55493 + if (buid > uid)
55494 + high = mid - 1;
55495 + if (buid < uid)
55496 + low = mid + 1;
55497 + }
55498 +
55499 + return -1;
55500 +}
55501 +
55502 +static __inline__ void
55503 +gr_insertsort(void)
55504 +{
55505 + unsigned short i, j;
55506 + struct crash_uid index;
55507 +
55508 + for (i = 1; i < uid_used; i++) {
55509 + index = uid_set[i];
55510 + j = i;
55511 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
55512 + uid_set[j] = uid_set[j - 1];
55513 + j--;
55514 + }
55515 + uid_set[j] = index;
55516 + }
55517 +
55518 + return;
55519 +}
55520 +
55521 +static __inline__ void
55522 +gr_insert_uid(const uid_t uid, const unsigned long expires)
55523 +{
55524 + int loc;
55525 +
55526 + if (uid_used == GR_UIDTABLE_MAX)
55527 + return;
55528 +
55529 + loc = gr_find_uid(uid);
55530 +
55531 + if (loc >= 0) {
55532 + uid_set[loc].expires = expires;
55533 + return;
55534 + }
55535 +
55536 + uid_set[uid_used].uid = uid;
55537 + uid_set[uid_used].expires = expires;
55538 + uid_used++;
55539 +
55540 + gr_insertsort();
55541 +
55542 + return;
55543 +}
55544 +
55545 +void
55546 +gr_remove_uid(const unsigned short loc)
55547 +{
55548 + unsigned short i;
55549 +
55550 + for (i = loc + 1; i < uid_used; i++)
55551 + uid_set[i - 1] = uid_set[i];
55552 +
55553 + uid_used--;
55554 +
55555 + return;
55556 +}
55557 +
55558 +int
55559 +gr_check_crash_uid(const uid_t uid)
55560 +{
55561 + int loc;
55562 + int ret = 0;
55563 +
55564 + if (unlikely(!gr_acl_is_enabled()))
55565 + return 0;
55566 +
55567 + spin_lock(&gr_uid_lock);
55568 + loc = gr_find_uid(uid);
55569 +
55570 + if (loc < 0)
55571 + goto out_unlock;
55572 +
55573 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
55574 + gr_remove_uid(loc);
55575 + else
55576 + ret = 1;
55577 +
55578 +out_unlock:
55579 + spin_unlock(&gr_uid_lock);
55580 + return ret;
55581 +}
55582 +
55583 +static __inline__ int
55584 +proc_is_setxid(const struct cred *cred)
55585 +{
55586 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
55587 + cred->uid != cred->fsuid)
55588 + return 1;
55589 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
55590 + cred->gid != cred->fsgid)
55591 + return 1;
55592 +
55593 + return 0;
55594 +}
55595 +
55596 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
55597 +
55598 +void
55599 +gr_handle_crash(struct task_struct *task, const int sig)
55600 +{
55601 + struct acl_subject_label *curr;
55602 + struct task_struct *tsk, *tsk2;
55603 + const struct cred *cred;
55604 + const struct cred *cred2;
55605 +
55606 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
55607 + return;
55608 +
55609 + if (unlikely(!gr_acl_is_enabled()))
55610 + return;
55611 +
55612 + curr = task->acl;
55613 +
55614 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
55615 + return;
55616 +
55617 + if (time_before_eq(curr->expires, get_seconds())) {
55618 + curr->expires = 0;
55619 + curr->crashes = 0;
55620 + }
55621 +
55622 + curr->crashes++;
55623 +
55624 + if (!curr->expires)
55625 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
55626 +
55627 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
55628 + time_after(curr->expires, get_seconds())) {
55629 + rcu_read_lock();
55630 + cred = __task_cred(task);
55631 + if (cred->uid && proc_is_setxid(cred)) {
55632 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
55633 + spin_lock(&gr_uid_lock);
55634 + gr_insert_uid(cred->uid, curr->expires);
55635 + spin_unlock(&gr_uid_lock);
55636 + curr->expires = 0;
55637 + curr->crashes = 0;
55638 + read_lock(&tasklist_lock);
55639 + do_each_thread(tsk2, tsk) {
55640 + cred2 = __task_cred(tsk);
55641 + if (tsk != task && cred2->uid == cred->uid)
55642 + gr_fake_force_sig(SIGKILL, tsk);
55643 + } while_each_thread(tsk2, tsk);
55644 + read_unlock(&tasklist_lock);
55645 + } else {
55646 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
55647 + read_lock(&tasklist_lock);
55648 + read_lock(&grsec_exec_file_lock);
55649 + do_each_thread(tsk2, tsk) {
55650 + if (likely(tsk != task)) {
55651 + // if this thread has the same subject as the one that triggered
55652 + // RES_CRASH and it's the same binary, kill it
55653 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
55654 + gr_fake_force_sig(SIGKILL, tsk);
55655 + }
55656 + } while_each_thread(tsk2, tsk);
55657 + read_unlock(&grsec_exec_file_lock);
55658 + read_unlock(&tasklist_lock);
55659 + }
55660 + rcu_read_unlock();
55661 + }
55662 +
55663 + return;
55664 +}
55665 +
55666 +int
55667 +gr_check_crash_exec(const struct file *filp)
55668 +{
55669 + struct acl_subject_label *curr;
55670 +
55671 + if (unlikely(!gr_acl_is_enabled()))
55672 + return 0;
55673 +
55674 + read_lock(&gr_inode_lock);
55675 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
55676 + __get_dev(filp->f_path.dentry),
55677 + current->role);
55678 + read_unlock(&gr_inode_lock);
55679 +
55680 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
55681 + (!curr->crashes && !curr->expires))
55682 + return 0;
55683 +
55684 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
55685 + time_after(curr->expires, get_seconds()))
55686 + return 1;
55687 + else if (time_before_eq(curr->expires, get_seconds())) {
55688 + curr->crashes = 0;
55689 + curr->expires = 0;
55690 + }
55691 +
55692 + return 0;
55693 +}
55694 +
55695 +void
55696 +gr_handle_alertkill(struct task_struct *task)
55697 +{
55698 + struct acl_subject_label *curracl;
55699 + __u32 curr_ip;
55700 + struct task_struct *p, *p2;
55701 +
55702 + if (unlikely(!gr_acl_is_enabled()))
55703 + return;
55704 +
55705 + curracl = task->acl;
55706 + curr_ip = task->signal->curr_ip;
55707 +
55708 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
55709 + read_lock(&tasklist_lock);
55710 + do_each_thread(p2, p) {
55711 + if (p->signal->curr_ip == curr_ip)
55712 + gr_fake_force_sig(SIGKILL, p);
55713 + } while_each_thread(p2, p);
55714 + read_unlock(&tasklist_lock);
55715 + } else if (curracl->mode & GR_KILLPROC)
55716 + gr_fake_force_sig(SIGKILL, task);
55717 +
55718 + return;
55719 +}
55720 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
55721 new file mode 100644
55722 index 0000000..9d83a69
55723 --- /dev/null
55724 +++ b/grsecurity/gracl_shm.c
55725 @@ -0,0 +1,40 @@
55726 +#include <linux/kernel.h>
55727 +#include <linux/mm.h>
55728 +#include <linux/sched.h>
55729 +#include <linux/file.h>
55730 +#include <linux/ipc.h>
55731 +#include <linux/gracl.h>
55732 +#include <linux/grsecurity.h>
55733 +#include <linux/grinternal.h>
55734 +
55735 +int
55736 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55737 + const time_t shm_createtime, const uid_t cuid, const int shmid)
55738 +{
55739 + struct task_struct *task;
55740 +
55741 + if (!gr_acl_is_enabled())
55742 + return 1;
55743 +
55744 + rcu_read_lock();
55745 + read_lock(&tasklist_lock);
55746 +
55747 + task = find_task_by_vpid(shm_cprid);
55748 +
55749 + if (unlikely(!task))
55750 + task = find_task_by_vpid(shm_lapid);
55751 +
55752 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
55753 + (task->pid == shm_lapid)) &&
55754 + (task->acl->mode & GR_PROTSHM) &&
55755 + (task->acl != current->acl))) {
55756 + read_unlock(&tasklist_lock);
55757 + rcu_read_unlock();
55758 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
55759 + return 0;
55760 + }
55761 + read_unlock(&tasklist_lock);
55762 + rcu_read_unlock();
55763 +
55764 + return 1;
55765 +}
55766 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
55767 new file mode 100644
55768 index 0000000..bc0be01
55769 --- /dev/null
55770 +++ b/grsecurity/grsec_chdir.c
55771 @@ -0,0 +1,19 @@
55772 +#include <linux/kernel.h>
55773 +#include <linux/sched.h>
55774 +#include <linux/fs.h>
55775 +#include <linux/file.h>
55776 +#include <linux/grsecurity.h>
55777 +#include <linux/grinternal.h>
55778 +
55779 +void
55780 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
55781 +{
55782 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55783 + if ((grsec_enable_chdir && grsec_enable_group &&
55784 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
55785 + !grsec_enable_group)) {
55786 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
55787 + }
55788 +#endif
55789 + return;
55790 +}
55791 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
55792 new file mode 100644
55793 index 0000000..a2dc675
55794 --- /dev/null
55795 +++ b/grsecurity/grsec_chroot.c
55796 @@ -0,0 +1,351 @@
55797 +#include <linux/kernel.h>
55798 +#include <linux/module.h>
55799 +#include <linux/sched.h>
55800 +#include <linux/file.h>
55801 +#include <linux/fs.h>
55802 +#include <linux/mount.h>
55803 +#include <linux/types.h>
55804 +#include <linux/pid_namespace.h>
55805 +#include <linux/grsecurity.h>
55806 +#include <linux/grinternal.h>
55807 +
55808 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
55809 +{
55810 +#ifdef CONFIG_GRKERNSEC
55811 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
55812 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
55813 + task->gr_is_chrooted = 1;
55814 + else
55815 + task->gr_is_chrooted = 0;
55816 +
55817 + task->gr_chroot_dentry = path->dentry;
55818 +#endif
55819 + return;
55820 +}
55821 +
55822 +void gr_clear_chroot_entries(struct task_struct *task)
55823 +{
55824 +#ifdef CONFIG_GRKERNSEC
55825 + task->gr_is_chrooted = 0;
55826 + task->gr_chroot_dentry = NULL;
55827 +#endif
55828 + return;
55829 +}
55830 +
55831 +int
55832 +gr_handle_chroot_unix(const pid_t pid)
55833 +{
55834 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55835 + struct task_struct *p;
55836 +
55837 + if (unlikely(!grsec_enable_chroot_unix))
55838 + return 1;
55839 +
55840 + if (likely(!proc_is_chrooted(current)))
55841 + return 1;
55842 +
55843 + rcu_read_lock();
55844 + read_lock(&tasklist_lock);
55845 + p = find_task_by_vpid_unrestricted(pid);
55846 + if (unlikely(p && !have_same_root(current, p))) {
55847 + read_unlock(&tasklist_lock);
55848 + rcu_read_unlock();
55849 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
55850 + return 0;
55851 + }
55852 + read_unlock(&tasklist_lock);
55853 + rcu_read_unlock();
55854 +#endif
55855 + return 1;
55856 +}
55857 +
55858 +int
55859 +gr_handle_chroot_nice(void)
55860 +{
55861 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55862 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
55863 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
55864 + return -EPERM;
55865 + }
55866 +#endif
55867 + return 0;
55868 +}
55869 +
55870 +int
55871 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
55872 +{
55873 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55874 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
55875 + && proc_is_chrooted(current)) {
55876 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
55877 + return -EACCES;
55878 + }
55879 +#endif
55880 + return 0;
55881 +}
55882 +
55883 +int
55884 +gr_handle_chroot_rawio(const struct inode *inode)
55885 +{
55886 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55887 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
55888 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
55889 + return 1;
55890 +#endif
55891 + return 0;
55892 +}
55893 +
55894 +int
55895 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
55896 +{
55897 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55898 + struct task_struct *p;
55899 + int ret = 0;
55900 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
55901 + return ret;
55902 +
55903 + read_lock(&tasklist_lock);
55904 + do_each_pid_task(pid, type, p) {
55905 + if (!have_same_root(current, p)) {
55906 + ret = 1;
55907 + goto out;
55908 + }
55909 + } while_each_pid_task(pid, type, p);
55910 +out:
55911 + read_unlock(&tasklist_lock);
55912 + return ret;
55913 +#endif
55914 + return 0;
55915 +}
55916 +
55917 +int
55918 +gr_pid_is_chrooted(struct task_struct *p)
55919 +{
55920 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55921 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
55922 + return 0;
55923 +
55924 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
55925 + !have_same_root(current, p)) {
55926 + return 1;
55927 + }
55928 +#endif
55929 + return 0;
55930 +}
55931 +
55932 +EXPORT_SYMBOL(gr_pid_is_chrooted);
55933 +
55934 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
55935 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
55936 +{
55937 + struct path path, currentroot;
55938 + int ret = 0;
55939 +
55940 + path.dentry = (struct dentry *)u_dentry;
55941 + path.mnt = (struct vfsmount *)u_mnt;
55942 + get_fs_root(current->fs, &currentroot);
55943 + if (path_is_under(&path, &currentroot))
55944 + ret = 1;
55945 + path_put(&currentroot);
55946 +
55947 + return ret;
55948 +}
55949 +#endif
55950 +
55951 +int
55952 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
55953 +{
55954 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55955 + if (!grsec_enable_chroot_fchdir)
55956 + return 1;
55957 +
55958 + if (!proc_is_chrooted(current))
55959 + return 1;
55960 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
55961 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
55962 + return 0;
55963 + }
55964 +#endif
55965 + return 1;
55966 +}
55967 +
55968 +int
55969 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
55970 + const time_t shm_createtime)
55971 +{
55972 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55973 + struct task_struct *p;
55974 + time_t starttime;
55975 +
55976 + if (unlikely(!grsec_enable_chroot_shmat))
55977 + return 1;
55978 +
55979 + if (likely(!proc_is_chrooted(current)))
55980 + return 1;
55981 +
55982 + rcu_read_lock();
55983 + read_lock(&tasklist_lock);
55984 +
55985 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
55986 + starttime = p->start_time.tv_sec;
55987 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
55988 + if (have_same_root(current, p)) {
55989 + goto allow;
55990 + } else {
55991 + read_unlock(&tasklist_lock);
55992 + rcu_read_unlock();
55993 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
55994 + return 0;
55995 + }
55996 + }
55997 + /* creator exited, pid reuse, fall through to next check */
55998 + }
55999 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
56000 + if (unlikely(!have_same_root(current, p))) {
56001 + read_unlock(&tasklist_lock);
56002 + rcu_read_unlock();
56003 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56004 + return 0;
56005 + }
56006 + }
56007 +
56008 +allow:
56009 + read_unlock(&tasklist_lock);
56010 + rcu_read_unlock();
56011 +#endif
56012 + return 1;
56013 +}
56014 +
56015 +void
56016 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
56017 +{
56018 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56019 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
56020 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
56021 +#endif
56022 + return;
56023 +}
56024 +
56025 +int
56026 +gr_handle_chroot_mknod(const struct dentry *dentry,
56027 + const struct vfsmount *mnt, const int mode)
56028 +{
56029 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56030 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
56031 + proc_is_chrooted(current)) {
56032 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
56033 + return -EPERM;
56034 + }
56035 +#endif
56036 + return 0;
56037 +}
56038 +
56039 +int
56040 +gr_handle_chroot_mount(const struct dentry *dentry,
56041 + const struct vfsmount *mnt, const char *dev_name)
56042 +{
56043 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56044 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
56045 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
56046 + return -EPERM;
56047 + }
56048 +#endif
56049 + return 0;
56050 +}
56051 +
56052 +int
56053 +gr_handle_chroot_pivot(void)
56054 +{
56055 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56056 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
56057 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
56058 + return -EPERM;
56059 + }
56060 +#endif
56061 + return 0;
56062 +}
56063 +
56064 +int
56065 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
56066 +{
56067 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56068 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
56069 + !gr_is_outside_chroot(dentry, mnt)) {
56070 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
56071 + return -EPERM;
56072 + }
56073 +#endif
56074 + return 0;
56075 +}
56076 +
56077 +extern const char *captab_log[];
56078 +extern int captab_log_entries;
56079 +
56080 +int
56081 +gr_chroot_is_capable(const int cap)
56082 +{
56083 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56084 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
56085 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56086 + if (cap_raised(chroot_caps, cap)) {
56087 + const struct cred *creds = current_cred();
56088 + if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
56089 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
56090 + }
56091 + return 0;
56092 + }
56093 + }
56094 +#endif
56095 + return 1;
56096 +}
56097 +
56098 +int
56099 +gr_chroot_is_capable_nolog(const int cap)
56100 +{
56101 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56102 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
56103 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56104 + if (cap_raised(chroot_caps, cap)) {
56105 + return 0;
56106 + }
56107 + }
56108 +#endif
56109 + return 1;
56110 +}
56111 +
56112 +int
56113 +gr_handle_chroot_sysctl(const int op)
56114 +{
56115 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56116 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
56117 + proc_is_chrooted(current))
56118 + return -EACCES;
56119 +#endif
56120 + return 0;
56121 +}
56122 +
56123 +void
56124 +gr_handle_chroot_chdir(struct path *path)
56125 +{
56126 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56127 + if (grsec_enable_chroot_chdir)
56128 + set_fs_pwd(current->fs, path);
56129 +#endif
56130 + return;
56131 +}
56132 +
56133 +int
56134 +gr_handle_chroot_chmod(const struct dentry *dentry,
56135 + const struct vfsmount *mnt, const int mode)
56136 +{
56137 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56138 + /* allow chmod +s on directories, but not files */
56139 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
56140 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
56141 + proc_is_chrooted(current)) {
56142 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
56143 + return -EPERM;
56144 + }
56145 +#endif
56146 + return 0;
56147 +}
56148 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
56149 new file mode 100644
56150 index 0000000..213ad8b
56151 --- /dev/null
56152 +++ b/grsecurity/grsec_disabled.c
56153 @@ -0,0 +1,437 @@
56154 +#include <linux/kernel.h>
56155 +#include <linux/module.h>
56156 +#include <linux/sched.h>
56157 +#include <linux/file.h>
56158 +#include <linux/fs.h>
56159 +#include <linux/kdev_t.h>
56160 +#include <linux/net.h>
56161 +#include <linux/in.h>
56162 +#include <linux/ip.h>
56163 +#include <linux/skbuff.h>
56164 +#include <linux/sysctl.h>
56165 +
56166 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56167 +void
56168 +pax_set_initial_flags(struct linux_binprm *bprm)
56169 +{
56170 + return;
56171 +}
56172 +#endif
56173 +
56174 +#ifdef CONFIG_SYSCTL
56175 +__u32
56176 +gr_handle_sysctl(const struct ctl_table * table, const int op)
56177 +{
56178 + return 0;
56179 +}
56180 +#endif
56181 +
56182 +#ifdef CONFIG_TASKSTATS
56183 +int gr_is_taskstats_denied(int pid)
56184 +{
56185 + return 0;
56186 +}
56187 +#endif
56188 +
56189 +int
56190 +gr_acl_is_enabled(void)
56191 +{
56192 + return 0;
56193 +}
56194 +
56195 +void
56196 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
56197 +{
56198 + return;
56199 +}
56200 +
56201 +int
56202 +gr_handle_rawio(const struct inode *inode)
56203 +{
56204 + return 0;
56205 +}
56206 +
56207 +void
56208 +gr_acl_handle_psacct(struct task_struct *task, const long code)
56209 +{
56210 + return;
56211 +}
56212 +
56213 +int
56214 +gr_handle_ptrace(struct task_struct *task, const long request)
56215 +{
56216 + return 0;
56217 +}
56218 +
56219 +int
56220 +gr_handle_proc_ptrace(struct task_struct *task)
56221 +{
56222 + return 0;
56223 +}
56224 +
56225 +void
56226 +gr_learn_resource(const struct task_struct *task,
56227 + const int res, const unsigned long wanted, const int gt)
56228 +{
56229 + return;
56230 +}
56231 +
56232 +int
56233 +gr_set_acls(const int type)
56234 +{
56235 + return 0;
56236 +}
56237 +
56238 +int
56239 +gr_check_hidden_task(const struct task_struct *tsk)
56240 +{
56241 + return 0;
56242 +}
56243 +
56244 +int
56245 +gr_check_protected_task(const struct task_struct *task)
56246 +{
56247 + return 0;
56248 +}
56249 +
56250 +int
56251 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
56252 +{
56253 + return 0;
56254 +}
56255 +
56256 +void
56257 +gr_copy_label(struct task_struct *tsk)
56258 +{
56259 + return;
56260 +}
56261 +
56262 +void
56263 +gr_set_pax_flags(struct task_struct *task)
56264 +{
56265 + return;
56266 +}
56267 +
56268 +int
56269 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
56270 + const int unsafe_share)
56271 +{
56272 + return 0;
56273 +}
56274 +
56275 +void
56276 +gr_handle_delete(const ino_t ino, const dev_t dev)
56277 +{
56278 + return;
56279 +}
56280 +
56281 +void
56282 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
56283 +{
56284 + return;
56285 +}
56286 +
56287 +void
56288 +gr_handle_crash(struct task_struct *task, const int sig)
56289 +{
56290 + return;
56291 +}
56292 +
56293 +int
56294 +gr_check_crash_exec(const struct file *filp)
56295 +{
56296 + return 0;
56297 +}
56298 +
56299 +int
56300 +gr_check_crash_uid(const uid_t uid)
56301 +{
56302 + return 0;
56303 +}
56304 +
56305 +void
56306 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56307 + struct dentry *old_dentry,
56308 + struct dentry *new_dentry,
56309 + struct vfsmount *mnt, const __u8 replace)
56310 +{
56311 + return;
56312 +}
56313 +
56314 +int
56315 +gr_search_socket(const int family, const int type, const int protocol)
56316 +{
56317 + return 1;
56318 +}
56319 +
56320 +int
56321 +gr_search_connectbind(const int mode, const struct socket *sock,
56322 + const struct sockaddr_in *addr)
56323 +{
56324 + return 0;
56325 +}
56326 +
56327 +void
56328 +gr_handle_alertkill(struct task_struct *task)
56329 +{
56330 + return;
56331 +}
56332 +
56333 +__u32
56334 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
56335 +{
56336 + return 1;
56337 +}
56338 +
56339 +__u32
56340 +gr_acl_handle_hidden_file(const struct dentry * dentry,
56341 + const struct vfsmount * mnt)
56342 +{
56343 + return 1;
56344 +}
56345 +
56346 +__u32
56347 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
56348 + int acc_mode)
56349 +{
56350 + return 1;
56351 +}
56352 +
56353 +__u32
56354 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
56355 +{
56356 + return 1;
56357 +}
56358 +
56359 +__u32
56360 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
56361 +{
56362 + return 1;
56363 +}
56364 +
56365 +int
56366 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
56367 + unsigned int *vm_flags)
56368 +{
56369 + return 1;
56370 +}
56371 +
56372 +__u32
56373 +gr_acl_handle_truncate(const struct dentry * dentry,
56374 + const struct vfsmount * mnt)
56375 +{
56376 + return 1;
56377 +}
56378 +
56379 +__u32
56380 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
56381 +{
56382 + return 1;
56383 +}
56384 +
56385 +__u32
56386 +gr_acl_handle_access(const struct dentry * dentry,
56387 + const struct vfsmount * mnt, const int fmode)
56388 +{
56389 + return 1;
56390 +}
56391 +
56392 +__u32
56393 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
56394 + umode_t *mode)
56395 +{
56396 + return 1;
56397 +}
56398 +
56399 +__u32
56400 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
56401 +{
56402 + return 1;
56403 +}
56404 +
56405 +__u32
56406 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
56407 +{
56408 + return 1;
56409 +}
56410 +
56411 +void
56412 +grsecurity_init(void)
56413 +{
56414 + return;
56415 +}
56416 +
56417 +umode_t gr_acl_umask(void)
56418 +{
56419 + return 0;
56420 +}
56421 +
56422 +__u32
56423 +gr_acl_handle_mknod(const struct dentry * new_dentry,
56424 + const struct dentry * parent_dentry,
56425 + const struct vfsmount * parent_mnt,
56426 + const int mode)
56427 +{
56428 + return 1;
56429 +}
56430 +
56431 +__u32
56432 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
56433 + const struct dentry * parent_dentry,
56434 + const struct vfsmount * parent_mnt)
56435 +{
56436 + return 1;
56437 +}
56438 +
56439 +__u32
56440 +gr_acl_handle_symlink(const struct dentry * new_dentry,
56441 + const struct dentry * parent_dentry,
56442 + const struct vfsmount * parent_mnt, const char *from)
56443 +{
56444 + return 1;
56445 +}
56446 +
56447 +__u32
56448 +gr_acl_handle_link(const struct dentry * new_dentry,
56449 + const struct dentry * parent_dentry,
56450 + const struct vfsmount * parent_mnt,
56451 + const struct dentry * old_dentry,
56452 + const struct vfsmount * old_mnt, const char *to)
56453 +{
56454 + return 1;
56455 +}
56456 +
56457 +int
56458 +gr_acl_handle_rename(const struct dentry *new_dentry,
56459 + const struct dentry *parent_dentry,
56460 + const struct vfsmount *parent_mnt,
56461 + const struct dentry *old_dentry,
56462 + const struct inode *old_parent_inode,
56463 + const struct vfsmount *old_mnt, const char *newname)
56464 +{
56465 + return 0;
56466 +}
56467 +
56468 +int
56469 +gr_acl_handle_filldir(const struct file *file, const char *name,
56470 + const int namelen, const ino_t ino)
56471 +{
56472 + return 1;
56473 +}
56474 +
56475 +int
56476 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56477 + const time_t shm_createtime, const uid_t cuid, const int shmid)
56478 +{
56479 + return 1;
56480 +}
56481 +
56482 +int
56483 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
56484 +{
56485 + return 0;
56486 +}
56487 +
56488 +int
56489 +gr_search_accept(const struct socket *sock)
56490 +{
56491 + return 0;
56492 +}
56493 +
56494 +int
56495 +gr_search_listen(const struct socket *sock)
56496 +{
56497 + return 0;
56498 +}
56499 +
56500 +int
56501 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
56502 +{
56503 + return 0;
56504 +}
56505 +
56506 +__u32
56507 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
56508 +{
56509 + return 1;
56510 +}
56511 +
56512 +__u32
56513 +gr_acl_handle_creat(const struct dentry * dentry,
56514 + const struct dentry * p_dentry,
56515 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
56516 + const int imode)
56517 +{
56518 + return 1;
56519 +}
56520 +
56521 +void
56522 +gr_acl_handle_exit(void)
56523 +{
56524 + return;
56525 +}
56526 +
56527 +int
56528 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
56529 +{
56530 + return 1;
56531 +}
56532 +
56533 +void
56534 +gr_set_role_label(const uid_t uid, const gid_t gid)
56535 +{
56536 + return;
56537 +}
56538 +
56539 +int
56540 +gr_acl_handle_procpidmem(const struct task_struct *task)
56541 +{
56542 + return 0;
56543 +}
56544 +
56545 +int
56546 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
56547 +{
56548 + return 0;
56549 +}
56550 +
56551 +int
56552 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
56553 +{
56554 + return 0;
56555 +}
56556 +
56557 +void
56558 +gr_set_kernel_label(struct task_struct *task)
56559 +{
56560 + return;
56561 +}
56562 +
56563 +int
56564 +gr_check_user_change(int real, int effective, int fs)
56565 +{
56566 + return 0;
56567 +}
56568 +
56569 +int
56570 +gr_check_group_change(int real, int effective, int fs)
56571 +{
56572 + return 0;
56573 +}
56574 +
56575 +int gr_acl_enable_at_secure(void)
56576 +{
56577 + return 0;
56578 +}
56579 +
56580 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
56581 +{
56582 + return dentry->d_inode->i_sb->s_dev;
56583 +}
56584 +
56585 +EXPORT_SYMBOL(gr_learn_resource);
56586 +EXPORT_SYMBOL(gr_set_kernel_label);
56587 +#ifdef CONFIG_SECURITY
56588 +EXPORT_SYMBOL(gr_check_user_change);
56589 +EXPORT_SYMBOL(gr_check_group_change);
56590 +#endif
56591 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
56592 new file mode 100644
56593 index 0000000..2b05ada
56594 --- /dev/null
56595 +++ b/grsecurity/grsec_exec.c
56596 @@ -0,0 +1,146 @@
56597 +#include <linux/kernel.h>
56598 +#include <linux/sched.h>
56599 +#include <linux/file.h>
56600 +#include <linux/binfmts.h>
56601 +#include <linux/fs.h>
56602 +#include <linux/types.h>
56603 +#include <linux/grdefs.h>
56604 +#include <linux/grsecurity.h>
56605 +#include <linux/grinternal.h>
56606 +#include <linux/capability.h>
56607 +#include <linux/module.h>
56608 +
56609 +#include <asm/uaccess.h>
56610 +
56611 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56612 +static char gr_exec_arg_buf[132];
56613 +static DEFINE_MUTEX(gr_exec_arg_mutex);
56614 +#endif
56615 +
56616 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
56617 +
56618 +void
56619 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
56620 +{
56621 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56622 + char *grarg = gr_exec_arg_buf;
56623 + unsigned int i, x, execlen = 0;
56624 + char c;
56625 +
56626 + if (!((grsec_enable_execlog && grsec_enable_group &&
56627 + in_group_p(grsec_audit_gid))
56628 + || (grsec_enable_execlog && !grsec_enable_group)))
56629 + return;
56630 +
56631 + mutex_lock(&gr_exec_arg_mutex);
56632 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
56633 +
56634 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
56635 + const char __user *p;
56636 + unsigned int len;
56637 +
56638 + p = get_user_arg_ptr(argv, i);
56639 + if (IS_ERR(p))
56640 + goto log;
56641 +
56642 + len = strnlen_user(p, 128 - execlen);
56643 + if (len > 128 - execlen)
56644 + len = 128 - execlen;
56645 + else if (len > 0)
56646 + len--;
56647 + if (copy_from_user(grarg + execlen, p, len))
56648 + goto log;
56649 +
56650 + /* rewrite unprintable characters */
56651 + for (x = 0; x < len; x++) {
56652 + c = *(grarg + execlen + x);
56653 + if (c < 32 || c > 126)
56654 + *(grarg + execlen + x) = ' ';
56655 + }
56656 +
56657 + execlen += len;
56658 + *(grarg + execlen) = ' ';
56659 + *(grarg + execlen + 1) = '\0';
56660 + execlen++;
56661 + }
56662 +
56663 + log:
56664 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
56665 + bprm->file->f_path.mnt, grarg);
56666 + mutex_unlock(&gr_exec_arg_mutex);
56667 +#endif
56668 + return;
56669 +}
56670 +
56671 +#ifdef CONFIG_GRKERNSEC
56672 +extern int gr_acl_is_capable(const int cap);
56673 +extern int gr_acl_is_capable_nolog(const int cap);
56674 +extern int gr_chroot_is_capable(const int cap);
56675 +extern int gr_chroot_is_capable_nolog(const int cap);
56676 +#endif
56677 +
56678 +const char *captab_log[] = {
56679 + "CAP_CHOWN",
56680 + "CAP_DAC_OVERRIDE",
56681 + "CAP_DAC_READ_SEARCH",
56682 + "CAP_FOWNER",
56683 + "CAP_FSETID",
56684 + "CAP_KILL",
56685 + "CAP_SETGID",
56686 + "CAP_SETUID",
56687 + "CAP_SETPCAP",
56688 + "CAP_LINUX_IMMUTABLE",
56689 + "CAP_NET_BIND_SERVICE",
56690 + "CAP_NET_BROADCAST",
56691 + "CAP_NET_ADMIN",
56692 + "CAP_NET_RAW",
56693 + "CAP_IPC_LOCK",
56694 + "CAP_IPC_OWNER",
56695 + "CAP_SYS_MODULE",
56696 + "CAP_SYS_RAWIO",
56697 + "CAP_SYS_CHROOT",
56698 + "CAP_SYS_PTRACE",
56699 + "CAP_SYS_PACCT",
56700 + "CAP_SYS_ADMIN",
56701 + "CAP_SYS_BOOT",
56702 + "CAP_SYS_NICE",
56703 + "CAP_SYS_RESOURCE",
56704 + "CAP_SYS_TIME",
56705 + "CAP_SYS_TTY_CONFIG",
56706 + "CAP_MKNOD",
56707 + "CAP_LEASE",
56708 + "CAP_AUDIT_WRITE",
56709 + "CAP_AUDIT_CONTROL",
56710 + "CAP_SETFCAP",
56711 + "CAP_MAC_OVERRIDE",
56712 + "CAP_MAC_ADMIN",
56713 + "CAP_SYSLOG",
56714 + "CAP_WAKE_ALARM"
56715 +};
56716 +
56717 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
56718 +
56719 +int gr_is_capable(const int cap)
56720 +{
56721 +#ifdef CONFIG_GRKERNSEC
56722 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
56723 + return 1;
56724 + return 0;
56725 +#else
56726 + return 1;
56727 +#endif
56728 +}
56729 +
56730 +int gr_is_capable_nolog(const int cap)
56731 +{
56732 +#ifdef CONFIG_GRKERNSEC
56733 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
56734 + return 1;
56735 + return 0;
56736 +#else
56737 + return 1;
56738 +#endif
56739 +}
56740 +
56741 +EXPORT_SYMBOL(gr_is_capable);
56742 +EXPORT_SYMBOL(gr_is_capable_nolog);
56743 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
56744 new file mode 100644
56745 index 0000000..d3ee748
56746 --- /dev/null
56747 +++ b/grsecurity/grsec_fifo.c
56748 @@ -0,0 +1,24 @@
56749 +#include <linux/kernel.h>
56750 +#include <linux/sched.h>
56751 +#include <linux/fs.h>
56752 +#include <linux/file.h>
56753 +#include <linux/grinternal.h>
56754 +
56755 +int
56756 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
56757 + const struct dentry *dir, const int flag, const int acc_mode)
56758 +{
56759 +#ifdef CONFIG_GRKERNSEC_FIFO
56760 + const struct cred *cred = current_cred();
56761 +
56762 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
56763 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
56764 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
56765 + (cred->fsuid != dentry->d_inode->i_uid)) {
56766 + if (!inode_permission(dentry->d_inode, acc_mode))
56767 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
56768 + return -EACCES;
56769 + }
56770 +#endif
56771 + return 0;
56772 +}
56773 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
56774 new file mode 100644
56775 index 0000000..8ca18bf
56776 --- /dev/null
56777 +++ b/grsecurity/grsec_fork.c
56778 @@ -0,0 +1,23 @@
56779 +#include <linux/kernel.h>
56780 +#include <linux/sched.h>
56781 +#include <linux/grsecurity.h>
56782 +#include <linux/grinternal.h>
56783 +#include <linux/errno.h>
56784 +
56785 +void
56786 +gr_log_forkfail(const int retval)
56787 +{
56788 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
56789 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
56790 + switch (retval) {
56791 + case -EAGAIN:
56792 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
56793 + break;
56794 + case -ENOMEM:
56795 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
56796 + break;
56797 + }
56798 + }
56799 +#endif
56800 + return;
56801 +}
56802 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
56803 new file mode 100644
56804 index 0000000..01ddde4
56805 --- /dev/null
56806 +++ b/grsecurity/grsec_init.c
56807 @@ -0,0 +1,277 @@
56808 +#include <linux/kernel.h>
56809 +#include <linux/sched.h>
56810 +#include <linux/mm.h>
56811 +#include <linux/gracl.h>
56812 +#include <linux/slab.h>
56813 +#include <linux/vmalloc.h>
56814 +#include <linux/percpu.h>
56815 +#include <linux/module.h>
56816 +
56817 +int grsec_enable_ptrace_readexec;
56818 +int grsec_enable_setxid;
56819 +int grsec_enable_brute;
56820 +int grsec_enable_link;
56821 +int grsec_enable_dmesg;
56822 +int grsec_enable_harden_ptrace;
56823 +int grsec_enable_fifo;
56824 +int grsec_enable_execlog;
56825 +int grsec_enable_signal;
56826 +int grsec_enable_forkfail;
56827 +int grsec_enable_audit_ptrace;
56828 +int grsec_enable_time;
56829 +int grsec_enable_audit_textrel;
56830 +int grsec_enable_group;
56831 +int grsec_audit_gid;
56832 +int grsec_enable_chdir;
56833 +int grsec_enable_mount;
56834 +int grsec_enable_rofs;
56835 +int grsec_enable_chroot_findtask;
56836 +int grsec_enable_chroot_mount;
56837 +int grsec_enable_chroot_shmat;
56838 +int grsec_enable_chroot_fchdir;
56839 +int grsec_enable_chroot_double;
56840 +int grsec_enable_chroot_pivot;
56841 +int grsec_enable_chroot_chdir;
56842 +int grsec_enable_chroot_chmod;
56843 +int grsec_enable_chroot_mknod;
56844 +int grsec_enable_chroot_nice;
56845 +int grsec_enable_chroot_execlog;
56846 +int grsec_enable_chroot_caps;
56847 +int grsec_enable_chroot_sysctl;
56848 +int grsec_enable_chroot_unix;
56849 +int grsec_enable_tpe;
56850 +int grsec_tpe_gid;
56851 +int grsec_enable_blackhole;
56852 +#ifdef CONFIG_IPV6_MODULE
56853 +EXPORT_SYMBOL(grsec_enable_blackhole);
56854 +#endif
56855 +int grsec_lastack_retries;
56856 +int grsec_enable_tpe_all;
56857 +int grsec_enable_tpe_invert;
56858 +int grsec_enable_socket_all;
56859 +int grsec_socket_all_gid;
56860 +int grsec_enable_socket_client;
56861 +int grsec_socket_client_gid;
56862 +int grsec_enable_socket_server;
56863 +int grsec_socket_server_gid;
56864 +int grsec_resource_logging;
56865 +int grsec_disable_privio;
56866 +int grsec_enable_log_rwxmaps;
56867 +int grsec_lock;
56868 +
56869 +DEFINE_SPINLOCK(grsec_alert_lock);
56870 +unsigned long grsec_alert_wtime = 0;
56871 +unsigned long grsec_alert_fyet = 0;
56872 +
56873 +DEFINE_SPINLOCK(grsec_audit_lock);
56874 +
56875 +DEFINE_RWLOCK(grsec_exec_file_lock);
56876 +
56877 +char *gr_shared_page[4];
56878 +
56879 +char *gr_alert_log_fmt;
56880 +char *gr_audit_log_fmt;
56881 +char *gr_alert_log_buf;
56882 +char *gr_audit_log_buf;
56883 +
56884 +extern struct gr_arg *gr_usermode;
56885 +extern unsigned char *gr_system_salt;
56886 +extern unsigned char *gr_system_sum;
56887 +
56888 +void __init
56889 +grsecurity_init(void)
56890 +{
56891 + int j;
56892 + /* create the per-cpu shared pages */
56893 +
56894 +#ifdef CONFIG_X86
56895 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
56896 +#endif
56897 +
56898 + for (j = 0; j < 4; j++) {
56899 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
56900 + if (gr_shared_page[j] == NULL) {
56901 + panic("Unable to allocate grsecurity shared page");
56902 + return;
56903 + }
56904 + }
56905 +
56906 + /* allocate log buffers */
56907 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
56908 + if (!gr_alert_log_fmt) {
56909 + panic("Unable to allocate grsecurity alert log format buffer");
56910 + return;
56911 + }
56912 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
56913 + if (!gr_audit_log_fmt) {
56914 + panic("Unable to allocate grsecurity audit log format buffer");
56915 + return;
56916 + }
56917 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
56918 + if (!gr_alert_log_buf) {
56919 + panic("Unable to allocate grsecurity alert log buffer");
56920 + return;
56921 + }
56922 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
56923 + if (!gr_audit_log_buf) {
56924 + panic("Unable to allocate grsecurity audit log buffer");
56925 + return;
56926 + }
56927 +
56928 + /* allocate memory for authentication structure */
56929 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
56930 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
56931 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
56932 +
56933 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
56934 + panic("Unable to allocate grsecurity authentication structure");
56935 + return;
56936 + }
56937 +
56938 +
56939 +#ifdef CONFIG_GRKERNSEC_IO
56940 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
56941 + grsec_disable_privio = 1;
56942 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
56943 + grsec_disable_privio = 1;
56944 +#else
56945 + grsec_disable_privio = 0;
56946 +#endif
56947 +#endif
56948 +
56949 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56950 + /* for backward compatibility, tpe_invert always defaults to on if
56951 + enabled in the kernel
56952 + */
56953 + grsec_enable_tpe_invert = 1;
56954 +#endif
56955 +
56956 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
56957 +#ifndef CONFIG_GRKERNSEC_SYSCTL
56958 + grsec_lock = 1;
56959 +#endif
56960 +
56961 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
56962 + grsec_enable_audit_textrel = 1;
56963 +#endif
56964 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56965 + grsec_enable_log_rwxmaps = 1;
56966 +#endif
56967 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
56968 + grsec_enable_group = 1;
56969 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
56970 +#endif
56971 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
56972 + grsec_enable_ptrace_readexec = 1;
56973 +#endif
56974 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56975 + grsec_enable_chdir = 1;
56976 +#endif
56977 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
56978 + grsec_enable_harden_ptrace = 1;
56979 +#endif
56980 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56981 + grsec_enable_mount = 1;
56982 +#endif
56983 +#ifdef CONFIG_GRKERNSEC_LINK
56984 + grsec_enable_link = 1;
56985 +#endif
56986 +#ifdef CONFIG_GRKERNSEC_BRUTE
56987 + grsec_enable_brute = 1;
56988 +#endif
56989 +#ifdef CONFIG_GRKERNSEC_DMESG
56990 + grsec_enable_dmesg = 1;
56991 +#endif
56992 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
56993 + grsec_enable_blackhole = 1;
56994 + grsec_lastack_retries = 4;
56995 +#endif
56996 +#ifdef CONFIG_GRKERNSEC_FIFO
56997 + grsec_enable_fifo = 1;
56998 +#endif
56999 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57000 + grsec_enable_execlog = 1;
57001 +#endif
57002 +#ifdef CONFIG_GRKERNSEC_SETXID
57003 + grsec_enable_setxid = 1;
57004 +#endif
57005 +#ifdef CONFIG_GRKERNSEC_SIGNAL
57006 + grsec_enable_signal = 1;
57007 +#endif
57008 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
57009 + grsec_enable_forkfail = 1;
57010 +#endif
57011 +#ifdef CONFIG_GRKERNSEC_TIME
57012 + grsec_enable_time = 1;
57013 +#endif
57014 +#ifdef CONFIG_GRKERNSEC_RESLOG
57015 + grsec_resource_logging = 1;
57016 +#endif
57017 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57018 + grsec_enable_chroot_findtask = 1;
57019 +#endif
57020 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57021 + grsec_enable_chroot_unix = 1;
57022 +#endif
57023 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57024 + grsec_enable_chroot_mount = 1;
57025 +#endif
57026 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57027 + grsec_enable_chroot_fchdir = 1;
57028 +#endif
57029 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57030 + grsec_enable_chroot_shmat = 1;
57031 +#endif
57032 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57033 + grsec_enable_audit_ptrace = 1;
57034 +#endif
57035 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57036 + grsec_enable_chroot_double = 1;
57037 +#endif
57038 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57039 + grsec_enable_chroot_pivot = 1;
57040 +#endif
57041 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57042 + grsec_enable_chroot_chdir = 1;
57043 +#endif
57044 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
57045 + grsec_enable_chroot_chmod = 1;
57046 +#endif
57047 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
57048 + grsec_enable_chroot_mknod = 1;
57049 +#endif
57050 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57051 + grsec_enable_chroot_nice = 1;
57052 +#endif
57053 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
57054 + grsec_enable_chroot_execlog = 1;
57055 +#endif
57056 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57057 + grsec_enable_chroot_caps = 1;
57058 +#endif
57059 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
57060 + grsec_enable_chroot_sysctl = 1;
57061 +#endif
57062 +#ifdef CONFIG_GRKERNSEC_TPE
57063 + grsec_enable_tpe = 1;
57064 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
57065 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
57066 + grsec_enable_tpe_all = 1;
57067 +#endif
57068 +#endif
57069 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
57070 + grsec_enable_socket_all = 1;
57071 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
57072 +#endif
57073 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
57074 + grsec_enable_socket_client = 1;
57075 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
57076 +#endif
57077 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57078 + grsec_enable_socket_server = 1;
57079 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
57080 +#endif
57081 +#endif
57082 +
57083 + return;
57084 +}
57085 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
57086 new file mode 100644
57087 index 0000000..3efe141
57088 --- /dev/null
57089 +++ b/grsecurity/grsec_link.c
57090 @@ -0,0 +1,43 @@
57091 +#include <linux/kernel.h>
57092 +#include <linux/sched.h>
57093 +#include <linux/fs.h>
57094 +#include <linux/file.h>
57095 +#include <linux/grinternal.h>
57096 +
57097 +int
57098 +gr_handle_follow_link(const struct inode *parent,
57099 + const struct inode *inode,
57100 + const struct dentry *dentry, const struct vfsmount *mnt)
57101 +{
57102 +#ifdef CONFIG_GRKERNSEC_LINK
57103 + const struct cred *cred = current_cred();
57104 +
57105 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
57106 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
57107 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
57108 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
57109 + return -EACCES;
57110 + }
57111 +#endif
57112 + return 0;
57113 +}
57114 +
57115 +int
57116 +gr_handle_hardlink(const struct dentry *dentry,
57117 + const struct vfsmount *mnt,
57118 + struct inode *inode, const int mode, const char *to)
57119 +{
57120 +#ifdef CONFIG_GRKERNSEC_LINK
57121 + const struct cred *cred = current_cred();
57122 +
57123 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
57124 + (!S_ISREG(mode) || (mode & S_ISUID) ||
57125 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
57126 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
57127 + !capable(CAP_FOWNER) && cred->uid) {
57128 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
57129 + return -EPERM;
57130 + }
57131 +#endif
57132 + return 0;
57133 +}
57134 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
57135 new file mode 100644
57136 index 0000000..a45d2e9
57137 --- /dev/null
57138 +++ b/grsecurity/grsec_log.c
57139 @@ -0,0 +1,322 @@
57140 +#include <linux/kernel.h>
57141 +#include <linux/sched.h>
57142 +#include <linux/file.h>
57143 +#include <linux/tty.h>
57144 +#include <linux/fs.h>
57145 +#include <linux/grinternal.h>
57146 +
57147 +#ifdef CONFIG_TREE_PREEMPT_RCU
57148 +#define DISABLE_PREEMPT() preempt_disable()
57149 +#define ENABLE_PREEMPT() preempt_enable()
57150 +#else
57151 +#define DISABLE_PREEMPT()
57152 +#define ENABLE_PREEMPT()
57153 +#endif
57154 +
57155 +#define BEGIN_LOCKS(x) \
57156 + DISABLE_PREEMPT(); \
57157 + rcu_read_lock(); \
57158 + read_lock(&tasklist_lock); \
57159 + read_lock(&grsec_exec_file_lock); \
57160 + if (x != GR_DO_AUDIT) \
57161 + spin_lock(&grsec_alert_lock); \
57162 + else \
57163 + spin_lock(&grsec_audit_lock)
57164 +
57165 +#define END_LOCKS(x) \
57166 + if (x != GR_DO_AUDIT) \
57167 + spin_unlock(&grsec_alert_lock); \
57168 + else \
57169 + spin_unlock(&grsec_audit_lock); \
57170 + read_unlock(&grsec_exec_file_lock); \
57171 + read_unlock(&tasklist_lock); \
57172 + rcu_read_unlock(); \
57173 + ENABLE_PREEMPT(); \
57174 + if (x == GR_DONT_AUDIT) \
57175 + gr_handle_alertkill(current)
57176 +
57177 +enum {
57178 + FLOODING,
57179 + NO_FLOODING
57180 +};
57181 +
57182 +extern char *gr_alert_log_fmt;
57183 +extern char *gr_audit_log_fmt;
57184 +extern char *gr_alert_log_buf;
57185 +extern char *gr_audit_log_buf;
57186 +
57187 +static int gr_log_start(int audit)
57188 +{
57189 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
57190 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
57191 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57192 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
57193 + unsigned long curr_secs = get_seconds();
57194 +
57195 + if (audit == GR_DO_AUDIT)
57196 + goto set_fmt;
57197 +
57198 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
57199 + grsec_alert_wtime = curr_secs;
57200 + grsec_alert_fyet = 0;
57201 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
57202 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
57203 + grsec_alert_fyet++;
57204 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
57205 + grsec_alert_wtime = curr_secs;
57206 + grsec_alert_fyet++;
57207 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
57208 + return FLOODING;
57209 + }
57210 + else return FLOODING;
57211 +
57212 +set_fmt:
57213 +#endif
57214 + memset(buf, 0, PAGE_SIZE);
57215 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
57216 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
57217 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57218 + } else if (current->signal->curr_ip) {
57219 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
57220 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
57221 + } else if (gr_acl_is_enabled()) {
57222 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
57223 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57224 + } else {
57225 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
57226 + strcpy(buf, fmt);
57227 + }
57228 +
57229 + return NO_FLOODING;
57230 +}
57231 +
57232 +static void gr_log_middle(int audit, const char *msg, va_list ap)
57233 + __attribute__ ((format (printf, 2, 0)));
57234 +
57235 +static void gr_log_middle(int audit, const char *msg, va_list ap)
57236 +{
57237 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57238 + unsigned int len = strlen(buf);
57239 +
57240 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57241 +
57242 + return;
57243 +}
57244 +
57245 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
57246 + __attribute__ ((format (printf, 2, 3)));
57247 +
57248 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
57249 +{
57250 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57251 + unsigned int len = strlen(buf);
57252 + va_list ap;
57253 +
57254 + va_start(ap, msg);
57255 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57256 + va_end(ap);
57257 +
57258 + return;
57259 +}
57260 +
57261 +static void gr_log_end(int audit, int append_default)
57262 +{
57263 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57264 +
57265 + if (append_default) {
57266 + unsigned int len = strlen(buf);
57267 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
57268 + }
57269 +
57270 + printk("%s\n", buf);
57271 +
57272 + return;
57273 +}
57274 +
57275 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
57276 +{
57277 + int logtype;
57278 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
57279 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
57280 + void *voidptr = NULL;
57281 + int num1 = 0, num2 = 0;
57282 + unsigned long ulong1 = 0, ulong2 = 0;
57283 + struct dentry *dentry = NULL;
57284 + struct vfsmount *mnt = NULL;
57285 + struct file *file = NULL;
57286 + struct task_struct *task = NULL;
57287 + const struct cred *cred, *pcred;
57288 + va_list ap;
57289 +
57290 + BEGIN_LOCKS(audit);
57291 + logtype = gr_log_start(audit);
57292 + if (logtype == FLOODING) {
57293 + END_LOCKS(audit);
57294 + return;
57295 + }
57296 + va_start(ap, argtypes);
57297 + switch (argtypes) {
57298 + case GR_TTYSNIFF:
57299 + task = va_arg(ap, struct task_struct *);
57300 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
57301 + break;
57302 + case GR_SYSCTL_HIDDEN:
57303 + str1 = va_arg(ap, char *);
57304 + gr_log_middle_varargs(audit, msg, result, str1);
57305 + break;
57306 + case GR_RBAC:
57307 + dentry = va_arg(ap, struct dentry *);
57308 + mnt = va_arg(ap, struct vfsmount *);
57309 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
57310 + break;
57311 + case GR_RBAC_STR:
57312 + dentry = va_arg(ap, struct dentry *);
57313 + mnt = va_arg(ap, struct vfsmount *);
57314 + str1 = va_arg(ap, char *);
57315 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
57316 + break;
57317 + case GR_STR_RBAC:
57318 + str1 = va_arg(ap, char *);
57319 + dentry = va_arg(ap, struct dentry *);
57320 + mnt = va_arg(ap, struct vfsmount *);
57321 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
57322 + break;
57323 + case GR_RBAC_MODE2:
57324 + dentry = va_arg(ap, struct dentry *);
57325 + mnt = va_arg(ap, struct vfsmount *);
57326 + str1 = va_arg(ap, char *);
57327 + str2 = va_arg(ap, char *);
57328 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
57329 + break;
57330 + case GR_RBAC_MODE3:
57331 + dentry = va_arg(ap, struct dentry *);
57332 + mnt = va_arg(ap, struct vfsmount *);
57333 + str1 = va_arg(ap, char *);
57334 + str2 = va_arg(ap, char *);
57335 + str3 = va_arg(ap, char *);
57336 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
57337 + break;
57338 + case GR_FILENAME:
57339 + dentry = va_arg(ap, struct dentry *);
57340 + mnt = va_arg(ap, struct vfsmount *);
57341 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
57342 + break;
57343 + case GR_STR_FILENAME:
57344 + str1 = va_arg(ap, char *);
57345 + dentry = va_arg(ap, struct dentry *);
57346 + mnt = va_arg(ap, struct vfsmount *);
57347 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
57348 + break;
57349 + case GR_FILENAME_STR:
57350 + dentry = va_arg(ap, struct dentry *);
57351 + mnt = va_arg(ap, struct vfsmount *);
57352 + str1 = va_arg(ap, char *);
57353 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
57354 + break;
57355 + case GR_FILENAME_TWO_INT:
57356 + dentry = va_arg(ap, struct dentry *);
57357 + mnt = va_arg(ap, struct vfsmount *);
57358 + num1 = va_arg(ap, int);
57359 + num2 = va_arg(ap, int);
57360 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
57361 + break;
57362 + case GR_FILENAME_TWO_INT_STR:
57363 + dentry = va_arg(ap, struct dentry *);
57364 + mnt = va_arg(ap, struct vfsmount *);
57365 + num1 = va_arg(ap, int);
57366 + num2 = va_arg(ap, int);
57367 + str1 = va_arg(ap, char *);
57368 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
57369 + break;
57370 + case GR_TEXTREL:
57371 + file = va_arg(ap, struct file *);
57372 + ulong1 = va_arg(ap, unsigned long);
57373 + ulong2 = va_arg(ap, unsigned long);
57374 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
57375 + break;
57376 + case GR_PTRACE:
57377 + task = va_arg(ap, struct task_struct *);
57378 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
57379 + break;
57380 + case GR_RESOURCE:
57381 + task = va_arg(ap, struct task_struct *);
57382 + cred = __task_cred(task);
57383 + pcred = __task_cred(task->real_parent);
57384 + ulong1 = va_arg(ap, unsigned long);
57385 + str1 = va_arg(ap, char *);
57386 + ulong2 = va_arg(ap, unsigned long);
57387 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57388 + break;
57389 + case GR_CAP:
57390 + task = va_arg(ap, struct task_struct *);
57391 + cred = __task_cred(task);
57392 + pcred = __task_cred(task->real_parent);
57393 + str1 = va_arg(ap, char *);
57394 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57395 + break;
57396 + case GR_SIG:
57397 + str1 = va_arg(ap, char *);
57398 + voidptr = va_arg(ap, void *);
57399 + gr_log_middle_varargs(audit, msg, str1, voidptr);
57400 + break;
57401 + case GR_SIG2:
57402 + task = va_arg(ap, struct task_struct *);
57403 + cred = __task_cred(task);
57404 + pcred = __task_cred(task->real_parent);
57405 + num1 = va_arg(ap, int);
57406 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57407 + break;
57408 + case GR_CRASH1:
57409 + task = va_arg(ap, struct task_struct *);
57410 + cred = __task_cred(task);
57411 + pcred = __task_cred(task->real_parent);
57412 + ulong1 = va_arg(ap, unsigned long);
57413 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
57414 + break;
57415 + case GR_CRASH2:
57416 + task = va_arg(ap, struct task_struct *);
57417 + cred = __task_cred(task);
57418 + pcred = __task_cred(task->real_parent);
57419 + ulong1 = va_arg(ap, unsigned long);
57420 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
57421 + break;
57422 + case GR_RWXMAP:
57423 + file = va_arg(ap, struct file *);
57424 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
57425 + break;
57426 + case GR_PSACCT:
57427 + {
57428 + unsigned int wday, cday;
57429 + __u8 whr, chr;
57430 + __u8 wmin, cmin;
57431 + __u8 wsec, csec;
57432 + char cur_tty[64] = { 0 };
57433 + char parent_tty[64] = { 0 };
57434 +
57435 + task = va_arg(ap, struct task_struct *);
57436 + wday = va_arg(ap, unsigned int);
57437 + cday = va_arg(ap, unsigned int);
57438 + whr = va_arg(ap, int);
57439 + chr = va_arg(ap, int);
57440 + wmin = va_arg(ap, int);
57441 + cmin = va_arg(ap, int);
57442 + wsec = va_arg(ap, int);
57443 + csec = va_arg(ap, int);
57444 + ulong1 = va_arg(ap, unsigned long);
57445 + cred = __task_cred(task);
57446 + pcred = __task_cred(task->real_parent);
57447 +
57448 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57449 + }
57450 + break;
57451 + default:
57452 + gr_log_middle(audit, msg, ap);
57453 + }
57454 + va_end(ap);
57455 + // these don't need DEFAULTSECARGS printed on the end
57456 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
57457 + gr_log_end(audit, 0);
57458 + else
57459 + gr_log_end(audit, 1);
57460 + END_LOCKS(audit);
57461 +}
57462 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
57463 new file mode 100644
57464 index 0000000..f536303
57465 --- /dev/null
57466 +++ b/grsecurity/grsec_mem.c
57467 @@ -0,0 +1,40 @@
57468 +#include <linux/kernel.h>
57469 +#include <linux/sched.h>
57470 +#include <linux/mm.h>
57471 +#include <linux/mman.h>
57472 +#include <linux/grinternal.h>
57473 +
57474 +void
57475 +gr_handle_ioperm(void)
57476 +{
57477 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
57478 + return;
57479 +}
57480 +
57481 +void
57482 +gr_handle_iopl(void)
57483 +{
57484 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
57485 + return;
57486 +}
57487 +
57488 +void
57489 +gr_handle_mem_readwrite(u64 from, u64 to)
57490 +{
57491 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
57492 + return;
57493 +}
57494 +
57495 +void
57496 +gr_handle_vm86(void)
57497 +{
57498 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
57499 + return;
57500 +}
57501 +
57502 +void
57503 +gr_log_badprocpid(const char *entry)
57504 +{
57505 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
57506 + return;
57507 +}
57508 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
57509 new file mode 100644
57510 index 0000000..2131422
57511 --- /dev/null
57512 +++ b/grsecurity/grsec_mount.c
57513 @@ -0,0 +1,62 @@
57514 +#include <linux/kernel.h>
57515 +#include <linux/sched.h>
57516 +#include <linux/mount.h>
57517 +#include <linux/grsecurity.h>
57518 +#include <linux/grinternal.h>
57519 +
57520 +void
57521 +gr_log_remount(const char *devname, const int retval)
57522 +{
57523 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57524 + if (grsec_enable_mount && (retval >= 0))
57525 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
57526 +#endif
57527 + return;
57528 +}
57529 +
57530 +void
57531 +gr_log_unmount(const char *devname, const int retval)
57532 +{
57533 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57534 + if (grsec_enable_mount && (retval >= 0))
57535 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
57536 +#endif
57537 + return;
57538 +}
57539 +
57540 +void
57541 +gr_log_mount(const char *from, const char *to, const int retval)
57542 +{
57543 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57544 + if (grsec_enable_mount && (retval >= 0))
57545 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
57546 +#endif
57547 + return;
57548 +}
57549 +
57550 +int
57551 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
57552 +{
57553 +#ifdef CONFIG_GRKERNSEC_ROFS
57554 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
57555 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
57556 + return -EPERM;
57557 + } else
57558 + return 0;
57559 +#endif
57560 + return 0;
57561 +}
57562 +
57563 +int
57564 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
57565 +{
57566 +#ifdef CONFIG_GRKERNSEC_ROFS
57567 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
57568 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
57569 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
57570 + return -EPERM;
57571 + } else
57572 + return 0;
57573 +#endif
57574 + return 0;
57575 +}
57576 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
57577 new file mode 100644
57578 index 0000000..a3b12a0
57579 --- /dev/null
57580 +++ b/grsecurity/grsec_pax.c
57581 @@ -0,0 +1,36 @@
57582 +#include <linux/kernel.h>
57583 +#include <linux/sched.h>
57584 +#include <linux/mm.h>
57585 +#include <linux/file.h>
57586 +#include <linux/grinternal.h>
57587 +#include <linux/grsecurity.h>
57588 +
57589 +void
57590 +gr_log_textrel(struct vm_area_struct * vma)
57591 +{
57592 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57593 + if (grsec_enable_audit_textrel)
57594 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
57595 +#endif
57596 + return;
57597 +}
57598 +
57599 +void
57600 +gr_log_rwxmmap(struct file *file)
57601 +{
57602 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57603 + if (grsec_enable_log_rwxmaps)
57604 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
57605 +#endif
57606 + return;
57607 +}
57608 +
57609 +void
57610 +gr_log_rwxmprotect(struct file *file)
57611 +{
57612 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57613 + if (grsec_enable_log_rwxmaps)
57614 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
57615 +#endif
57616 + return;
57617 +}
57618 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
57619 new file mode 100644
57620 index 0000000..f7f29aa
57621 --- /dev/null
57622 +++ b/grsecurity/grsec_ptrace.c
57623 @@ -0,0 +1,30 @@
57624 +#include <linux/kernel.h>
57625 +#include <linux/sched.h>
57626 +#include <linux/grinternal.h>
57627 +#include <linux/security.h>
57628 +
57629 +void
57630 +gr_audit_ptrace(struct task_struct *task)
57631 +{
57632 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57633 + if (grsec_enable_audit_ptrace)
57634 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
57635 +#endif
57636 + return;
57637 +}
57638 +
57639 +int
57640 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
57641 +{
57642 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57643 + const struct dentry *dentry = file->f_path.dentry;
57644 + const struct vfsmount *mnt = file->f_path.mnt;
57645 +
57646 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
57647 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
57648 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
57649 + return -EACCES;
57650 + }
57651 +#endif
57652 + return 0;
57653 +}
57654 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
57655 new file mode 100644
57656 index 0000000..7a5b2de
57657 --- /dev/null
57658 +++ b/grsecurity/grsec_sig.c
57659 @@ -0,0 +1,207 @@
57660 +#include <linux/kernel.h>
57661 +#include <linux/sched.h>
57662 +#include <linux/delay.h>
57663 +#include <linux/grsecurity.h>
57664 +#include <linux/grinternal.h>
57665 +#include <linux/hardirq.h>
57666 +
57667 +char *signames[] = {
57668 + [SIGSEGV] = "Segmentation fault",
57669 + [SIGILL] = "Illegal instruction",
57670 + [SIGABRT] = "Abort",
57671 + [SIGBUS] = "Invalid alignment/Bus error"
57672 +};
57673 +
57674 +void
57675 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
57676 +{
57677 +#ifdef CONFIG_GRKERNSEC_SIGNAL
57678 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
57679 + (sig == SIGABRT) || (sig == SIGBUS))) {
57680 + if (t->pid == current->pid) {
57681 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
57682 + } else {
57683 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
57684 + }
57685 + }
57686 +#endif
57687 + return;
57688 +}
57689 +
57690 +int
57691 +gr_handle_signal(const struct task_struct *p, const int sig)
57692 +{
57693 +#ifdef CONFIG_GRKERNSEC
57694 + /* ignore the 0 signal for protected task checks */
57695 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
57696 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
57697 + return -EPERM;
57698 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
57699 + return -EPERM;
57700 + }
57701 +#endif
57702 + return 0;
57703 +}
57704 +
57705 +#ifdef CONFIG_GRKERNSEC
57706 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
57707 +
57708 +int gr_fake_force_sig(int sig, struct task_struct *t)
57709 +{
57710 + unsigned long int flags;
57711 + int ret, blocked, ignored;
57712 + struct k_sigaction *action;
57713 +
57714 + spin_lock_irqsave(&t->sighand->siglock, flags);
57715 + action = &t->sighand->action[sig-1];
57716 + ignored = action->sa.sa_handler == SIG_IGN;
57717 + blocked = sigismember(&t->blocked, sig);
57718 + if (blocked || ignored) {
57719 + action->sa.sa_handler = SIG_DFL;
57720 + if (blocked) {
57721 + sigdelset(&t->blocked, sig);
57722 + recalc_sigpending_and_wake(t);
57723 + }
57724 + }
57725 + if (action->sa.sa_handler == SIG_DFL)
57726 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
57727 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
57728 +
57729 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
57730 +
57731 + return ret;
57732 +}
57733 +#endif
57734 +
57735 +#ifdef CONFIG_GRKERNSEC_BRUTE
57736 +#define GR_USER_BAN_TIME (15 * 60)
57737 +
57738 +static int __get_dumpable(unsigned long mm_flags)
57739 +{
57740 + int ret;
57741 +
57742 + ret = mm_flags & MMF_DUMPABLE_MASK;
57743 + return (ret >= 2) ? 2 : ret;
57744 +}
57745 +#endif
57746 +
57747 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
57748 +{
57749 +#ifdef CONFIG_GRKERNSEC_BRUTE
57750 + uid_t uid = 0;
57751 +
57752 + if (!grsec_enable_brute)
57753 + return;
57754 +
57755 + rcu_read_lock();
57756 + read_lock(&tasklist_lock);
57757 + read_lock(&grsec_exec_file_lock);
57758 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
57759 + p->real_parent->brute = 1;
57760 + else {
57761 + const struct cred *cred = __task_cred(p), *cred2;
57762 + struct task_struct *tsk, *tsk2;
57763 +
57764 + if (!__get_dumpable(mm_flags) && cred->uid) {
57765 + struct user_struct *user;
57766 +
57767 + uid = cred->uid;
57768 +
57769 + /* this is put upon execution past expiration */
57770 + user = find_user(uid);
57771 + if (user == NULL)
57772 + goto unlock;
57773 + user->banned = 1;
57774 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
57775 + if (user->ban_expires == ~0UL)
57776 + user->ban_expires--;
57777 +
57778 + do_each_thread(tsk2, tsk) {
57779 + cred2 = __task_cred(tsk);
57780 + if (tsk != p && cred2->uid == uid)
57781 + gr_fake_force_sig(SIGKILL, tsk);
57782 + } while_each_thread(tsk2, tsk);
57783 + }
57784 + }
57785 +unlock:
57786 + read_unlock(&grsec_exec_file_lock);
57787 + read_unlock(&tasklist_lock);
57788 + rcu_read_unlock();
57789 +
57790 + if (uid)
57791 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
57792 +
57793 +#endif
57794 + return;
57795 +}
57796 +
57797 +void gr_handle_brute_check(void)
57798 +{
57799 +#ifdef CONFIG_GRKERNSEC_BRUTE
57800 + if (current->brute)
57801 + msleep(30 * 1000);
57802 +#endif
57803 + return;
57804 +}
57805 +
57806 +void gr_handle_kernel_exploit(void)
57807 +{
57808 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
57809 + const struct cred *cred;
57810 + struct task_struct *tsk, *tsk2;
57811 + struct user_struct *user;
57812 + uid_t uid;
57813 +
57814 + if (in_irq() || in_serving_softirq() || in_nmi())
57815 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
57816 +
57817 + uid = current_uid();
57818 +
57819 + if (uid == 0)
57820 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
57821 + else {
57822 + /* kill all the processes of this user, hold a reference
57823 + to their creds struct, and prevent them from creating
57824 + another process until system reset
57825 + */
57826 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
57827 + /* we intentionally leak this ref */
57828 + user = get_uid(current->cred->user);
57829 + if (user) {
57830 + user->banned = 1;
57831 + user->ban_expires = ~0UL;
57832 + }
57833 +
57834 + read_lock(&tasklist_lock);
57835 + do_each_thread(tsk2, tsk) {
57836 + cred = __task_cred(tsk);
57837 + if (cred->uid == uid)
57838 + gr_fake_force_sig(SIGKILL, tsk);
57839 + } while_each_thread(tsk2, tsk);
57840 + read_unlock(&tasklist_lock);
57841 + }
57842 +#endif
57843 +}
57844 +
57845 +int __gr_process_user_ban(struct user_struct *user)
57846 +{
57847 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57848 + if (unlikely(user->banned)) {
57849 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
57850 + user->banned = 0;
57851 + user->ban_expires = 0;
57852 + free_uid(user);
57853 + } else
57854 + return -EPERM;
57855 + }
57856 +#endif
57857 + return 0;
57858 +}
57859 +
57860 +int gr_process_user_ban(void)
57861 +{
57862 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57863 + return __gr_process_user_ban(current->cred->user);
57864 +#endif
57865 + return 0;
57866 +}
57867 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
57868 new file mode 100644
57869 index 0000000..4030d57
57870 --- /dev/null
57871 +++ b/grsecurity/grsec_sock.c
57872 @@ -0,0 +1,244 @@
57873 +#include <linux/kernel.h>
57874 +#include <linux/module.h>
57875 +#include <linux/sched.h>
57876 +#include <linux/file.h>
57877 +#include <linux/net.h>
57878 +#include <linux/in.h>
57879 +#include <linux/ip.h>
57880 +#include <net/sock.h>
57881 +#include <net/inet_sock.h>
57882 +#include <linux/grsecurity.h>
57883 +#include <linux/grinternal.h>
57884 +#include <linux/gracl.h>
57885 +
57886 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
57887 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
57888 +
57889 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
57890 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
57891 +
57892 +#ifdef CONFIG_UNIX_MODULE
57893 +EXPORT_SYMBOL(gr_acl_handle_unix);
57894 +EXPORT_SYMBOL(gr_acl_handle_mknod);
57895 +EXPORT_SYMBOL(gr_handle_chroot_unix);
57896 +EXPORT_SYMBOL(gr_handle_create);
57897 +#endif
57898 +
57899 +#ifdef CONFIG_GRKERNSEC
57900 +#define gr_conn_table_size 32749
57901 +struct conn_table_entry {
57902 + struct conn_table_entry *next;
57903 + struct signal_struct *sig;
57904 +};
57905 +
57906 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
57907 +DEFINE_SPINLOCK(gr_conn_table_lock);
57908 +
57909 +extern const char * gr_socktype_to_name(unsigned char type);
57910 +extern const char * gr_proto_to_name(unsigned char proto);
57911 +extern const char * gr_sockfamily_to_name(unsigned char family);
57912 +
57913 +static __inline__ int
57914 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
57915 +{
57916 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
57917 +}
57918 +
57919 +static __inline__ int
57920 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
57921 + __u16 sport, __u16 dport)
57922 +{
57923 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
57924 + sig->gr_sport == sport && sig->gr_dport == dport))
57925 + return 1;
57926 + else
57927 + return 0;
57928 +}
57929 +
57930 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
57931 +{
57932 + struct conn_table_entry **match;
57933 + unsigned int index;
57934 +
57935 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
57936 + sig->gr_sport, sig->gr_dport,
57937 + gr_conn_table_size);
57938 +
57939 + newent->sig = sig;
57940 +
57941 + match = &gr_conn_table[index];
57942 + newent->next = *match;
57943 + *match = newent;
57944 +
57945 + return;
57946 +}
57947 +
57948 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
57949 +{
57950 + struct conn_table_entry *match, *last = NULL;
57951 + unsigned int index;
57952 +
57953 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
57954 + sig->gr_sport, sig->gr_dport,
57955 + gr_conn_table_size);
57956 +
57957 + match = gr_conn_table[index];
57958 + while (match && !conn_match(match->sig,
57959 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
57960 + sig->gr_dport)) {
57961 + last = match;
57962 + match = match->next;
57963 + }
57964 +
57965 + if (match) {
57966 + if (last)
57967 + last->next = match->next;
57968 + else
57969 + gr_conn_table[index] = NULL;
57970 + kfree(match);
57971 + }
57972 +
57973 + return;
57974 +}
57975 +
57976 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
57977 + __u16 sport, __u16 dport)
57978 +{
57979 + struct conn_table_entry *match;
57980 + unsigned int index;
57981 +
57982 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
57983 +
57984 + match = gr_conn_table[index];
57985 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
57986 + match = match->next;
57987 +
57988 + if (match)
57989 + return match->sig;
57990 + else
57991 + return NULL;
57992 +}
57993 +
57994 +#endif
57995 +
57996 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
57997 +{
57998 +#ifdef CONFIG_GRKERNSEC
57999 + struct signal_struct *sig = task->signal;
58000 + struct conn_table_entry *newent;
58001 +
58002 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
58003 + if (newent == NULL)
58004 + return;
58005 + /* no bh lock needed since we are called with bh disabled */
58006 + spin_lock(&gr_conn_table_lock);
58007 + gr_del_task_from_ip_table_nolock(sig);
58008 + sig->gr_saddr = inet->inet_rcv_saddr;
58009 + sig->gr_daddr = inet->inet_daddr;
58010 + sig->gr_sport = inet->inet_sport;
58011 + sig->gr_dport = inet->inet_dport;
58012 + gr_add_to_task_ip_table_nolock(sig, newent);
58013 + spin_unlock(&gr_conn_table_lock);
58014 +#endif
58015 + return;
58016 +}
58017 +
58018 +void gr_del_task_from_ip_table(struct task_struct *task)
58019 +{
58020 +#ifdef CONFIG_GRKERNSEC
58021 + spin_lock_bh(&gr_conn_table_lock);
58022 + gr_del_task_from_ip_table_nolock(task->signal);
58023 + spin_unlock_bh(&gr_conn_table_lock);
58024 +#endif
58025 + return;
58026 +}
58027 +
58028 +void
58029 +gr_attach_curr_ip(const struct sock *sk)
58030 +{
58031 +#ifdef CONFIG_GRKERNSEC
58032 + struct signal_struct *p, *set;
58033 + const struct inet_sock *inet = inet_sk(sk);
58034 +
58035 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
58036 + return;
58037 +
58038 + set = current->signal;
58039 +
58040 + spin_lock_bh(&gr_conn_table_lock);
58041 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
58042 + inet->inet_dport, inet->inet_sport);
58043 + if (unlikely(p != NULL)) {
58044 + set->curr_ip = p->curr_ip;
58045 + set->used_accept = 1;
58046 + gr_del_task_from_ip_table_nolock(p);
58047 + spin_unlock_bh(&gr_conn_table_lock);
58048 + return;
58049 + }
58050 + spin_unlock_bh(&gr_conn_table_lock);
58051 +
58052 + set->curr_ip = inet->inet_daddr;
58053 + set->used_accept = 1;
58054 +#endif
58055 + return;
58056 +}
58057 +
58058 +int
58059 +gr_handle_sock_all(const int family, const int type, const int protocol)
58060 +{
58061 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58062 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
58063 + (family != AF_UNIX)) {
58064 + if (family == AF_INET)
58065 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
58066 + else
58067 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
58068 + return -EACCES;
58069 + }
58070 +#endif
58071 + return 0;
58072 +}
58073 +
58074 +int
58075 +gr_handle_sock_server(const struct sockaddr *sck)
58076 +{
58077 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58078 + if (grsec_enable_socket_server &&
58079 + in_group_p(grsec_socket_server_gid) &&
58080 + sck && (sck->sa_family != AF_UNIX) &&
58081 + (sck->sa_family != AF_LOCAL)) {
58082 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58083 + return -EACCES;
58084 + }
58085 +#endif
58086 + return 0;
58087 +}
58088 +
58089 +int
58090 +gr_handle_sock_server_other(const struct sock *sck)
58091 +{
58092 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58093 + if (grsec_enable_socket_server &&
58094 + in_group_p(grsec_socket_server_gid) &&
58095 + sck && (sck->sk_family != AF_UNIX) &&
58096 + (sck->sk_family != AF_LOCAL)) {
58097 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58098 + return -EACCES;
58099 + }
58100 +#endif
58101 + return 0;
58102 +}
58103 +
58104 +int
58105 +gr_handle_sock_client(const struct sockaddr *sck)
58106 +{
58107 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58108 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
58109 + sck && (sck->sa_family != AF_UNIX) &&
58110 + (sck->sa_family != AF_LOCAL)) {
58111 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
58112 + return -EACCES;
58113 + }
58114 +#endif
58115 + return 0;
58116 +}
58117 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
58118 new file mode 100644
58119 index 0000000..a1aedd7
58120 --- /dev/null
58121 +++ b/grsecurity/grsec_sysctl.c
58122 @@ -0,0 +1,451 @@
58123 +#include <linux/kernel.h>
58124 +#include <linux/sched.h>
58125 +#include <linux/sysctl.h>
58126 +#include <linux/grsecurity.h>
58127 +#include <linux/grinternal.h>
58128 +
58129 +int
58130 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
58131 +{
58132 +#ifdef CONFIG_GRKERNSEC_SYSCTL
58133 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
58134 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
58135 + return -EACCES;
58136 + }
58137 +#endif
58138 + return 0;
58139 +}
58140 +
58141 +#ifdef CONFIG_GRKERNSEC_ROFS
58142 +static int __maybe_unused one = 1;
58143 +#endif
58144 +
58145 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
58146 +struct ctl_table grsecurity_table[] = {
58147 +#ifdef CONFIG_GRKERNSEC_SYSCTL
58148 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
58149 +#ifdef CONFIG_GRKERNSEC_IO
58150 + {
58151 + .procname = "disable_priv_io",
58152 + .data = &grsec_disable_privio,
58153 + .maxlen = sizeof(int),
58154 + .mode = 0600,
58155 + .proc_handler = &proc_dointvec,
58156 + },
58157 +#endif
58158 +#endif
58159 +#ifdef CONFIG_GRKERNSEC_LINK
58160 + {
58161 + .procname = "linking_restrictions",
58162 + .data = &grsec_enable_link,
58163 + .maxlen = sizeof(int),
58164 + .mode = 0600,
58165 + .proc_handler = &proc_dointvec,
58166 + },
58167 +#endif
58168 +#ifdef CONFIG_GRKERNSEC_BRUTE
58169 + {
58170 + .procname = "deter_bruteforce",
58171 + .data = &grsec_enable_brute,
58172 + .maxlen = sizeof(int),
58173 + .mode = 0600,
58174 + .proc_handler = &proc_dointvec,
58175 + },
58176 +#endif
58177 +#ifdef CONFIG_GRKERNSEC_FIFO
58178 + {
58179 + .procname = "fifo_restrictions",
58180 + .data = &grsec_enable_fifo,
58181 + .maxlen = sizeof(int),
58182 + .mode = 0600,
58183 + .proc_handler = &proc_dointvec,
58184 + },
58185 +#endif
58186 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58187 + {
58188 + .procname = "ptrace_readexec",
58189 + .data = &grsec_enable_ptrace_readexec,
58190 + .maxlen = sizeof(int),
58191 + .mode = 0600,
58192 + .proc_handler = &proc_dointvec,
58193 + },
58194 +#endif
58195 +#ifdef CONFIG_GRKERNSEC_SETXID
58196 + {
58197 + .procname = "consistent_setxid",
58198 + .data = &grsec_enable_setxid,
58199 + .maxlen = sizeof(int),
58200 + .mode = 0600,
58201 + .proc_handler = &proc_dointvec,
58202 + },
58203 +#endif
58204 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58205 + {
58206 + .procname = "ip_blackhole",
58207 + .data = &grsec_enable_blackhole,
58208 + .maxlen = sizeof(int),
58209 + .mode = 0600,
58210 + .proc_handler = &proc_dointvec,
58211 + },
58212 + {
58213 + .procname = "lastack_retries",
58214 + .data = &grsec_lastack_retries,
58215 + .maxlen = sizeof(int),
58216 + .mode = 0600,
58217 + .proc_handler = &proc_dointvec,
58218 + },
58219 +#endif
58220 +#ifdef CONFIG_GRKERNSEC_EXECLOG
58221 + {
58222 + .procname = "exec_logging",
58223 + .data = &grsec_enable_execlog,
58224 + .maxlen = sizeof(int),
58225 + .mode = 0600,
58226 + .proc_handler = &proc_dointvec,
58227 + },
58228 +#endif
58229 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58230 + {
58231 + .procname = "rwxmap_logging",
58232 + .data = &grsec_enable_log_rwxmaps,
58233 + .maxlen = sizeof(int),
58234 + .mode = 0600,
58235 + .proc_handler = &proc_dointvec,
58236 + },
58237 +#endif
58238 +#ifdef CONFIG_GRKERNSEC_SIGNAL
58239 + {
58240 + .procname = "signal_logging",
58241 + .data = &grsec_enable_signal,
58242 + .maxlen = sizeof(int),
58243 + .mode = 0600,
58244 + .proc_handler = &proc_dointvec,
58245 + },
58246 +#endif
58247 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
58248 + {
58249 + .procname = "forkfail_logging",
58250 + .data = &grsec_enable_forkfail,
58251 + .maxlen = sizeof(int),
58252 + .mode = 0600,
58253 + .proc_handler = &proc_dointvec,
58254 + },
58255 +#endif
58256 +#ifdef CONFIG_GRKERNSEC_TIME
58257 + {
58258 + .procname = "timechange_logging",
58259 + .data = &grsec_enable_time,
58260 + .maxlen = sizeof(int),
58261 + .mode = 0600,
58262 + .proc_handler = &proc_dointvec,
58263 + },
58264 +#endif
58265 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
58266 + {
58267 + .procname = "chroot_deny_shmat",
58268 + .data = &grsec_enable_chroot_shmat,
58269 + .maxlen = sizeof(int),
58270 + .mode = 0600,
58271 + .proc_handler = &proc_dointvec,
58272 + },
58273 +#endif
58274 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58275 + {
58276 + .procname = "chroot_deny_unix",
58277 + .data = &grsec_enable_chroot_unix,
58278 + .maxlen = sizeof(int),
58279 + .mode = 0600,
58280 + .proc_handler = &proc_dointvec,
58281 + },
58282 +#endif
58283 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
58284 + {
58285 + .procname = "chroot_deny_mount",
58286 + .data = &grsec_enable_chroot_mount,
58287 + .maxlen = sizeof(int),
58288 + .mode = 0600,
58289 + .proc_handler = &proc_dointvec,
58290 + },
58291 +#endif
58292 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
58293 + {
58294 + .procname = "chroot_deny_fchdir",
58295 + .data = &grsec_enable_chroot_fchdir,
58296 + .maxlen = sizeof(int),
58297 + .mode = 0600,
58298 + .proc_handler = &proc_dointvec,
58299 + },
58300 +#endif
58301 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
58302 + {
58303 + .procname = "chroot_deny_chroot",
58304 + .data = &grsec_enable_chroot_double,
58305 + .maxlen = sizeof(int),
58306 + .mode = 0600,
58307 + .proc_handler = &proc_dointvec,
58308 + },
58309 +#endif
58310 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
58311 + {
58312 + .procname = "chroot_deny_pivot",
58313 + .data = &grsec_enable_chroot_pivot,
58314 + .maxlen = sizeof(int),
58315 + .mode = 0600,
58316 + .proc_handler = &proc_dointvec,
58317 + },
58318 +#endif
58319 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
58320 + {
58321 + .procname = "chroot_enforce_chdir",
58322 + .data = &grsec_enable_chroot_chdir,
58323 + .maxlen = sizeof(int),
58324 + .mode = 0600,
58325 + .proc_handler = &proc_dointvec,
58326 + },
58327 +#endif
58328 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
58329 + {
58330 + .procname = "chroot_deny_chmod",
58331 + .data = &grsec_enable_chroot_chmod,
58332 + .maxlen = sizeof(int),
58333 + .mode = 0600,
58334 + .proc_handler = &proc_dointvec,
58335 + },
58336 +#endif
58337 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
58338 + {
58339 + .procname = "chroot_deny_mknod",
58340 + .data = &grsec_enable_chroot_mknod,
58341 + .maxlen = sizeof(int),
58342 + .mode = 0600,
58343 + .proc_handler = &proc_dointvec,
58344 + },
58345 +#endif
58346 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
58347 + {
58348 + .procname = "chroot_restrict_nice",
58349 + .data = &grsec_enable_chroot_nice,
58350 + .maxlen = sizeof(int),
58351 + .mode = 0600,
58352 + .proc_handler = &proc_dointvec,
58353 + },
58354 +#endif
58355 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
58356 + {
58357 + .procname = "chroot_execlog",
58358 + .data = &grsec_enable_chroot_execlog,
58359 + .maxlen = sizeof(int),
58360 + .mode = 0600,
58361 + .proc_handler = &proc_dointvec,
58362 + },
58363 +#endif
58364 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58365 + {
58366 + .procname = "chroot_caps",
58367 + .data = &grsec_enable_chroot_caps,
58368 + .maxlen = sizeof(int),
58369 + .mode = 0600,
58370 + .proc_handler = &proc_dointvec,
58371 + },
58372 +#endif
58373 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
58374 + {
58375 + .procname = "chroot_deny_sysctl",
58376 + .data = &grsec_enable_chroot_sysctl,
58377 + .maxlen = sizeof(int),
58378 + .mode = 0600,
58379 + .proc_handler = &proc_dointvec,
58380 + },
58381 +#endif
58382 +#ifdef CONFIG_GRKERNSEC_TPE
58383 + {
58384 + .procname = "tpe",
58385 + .data = &grsec_enable_tpe,
58386 + .maxlen = sizeof(int),
58387 + .mode = 0600,
58388 + .proc_handler = &proc_dointvec,
58389 + },
58390 + {
58391 + .procname = "tpe_gid",
58392 + .data = &grsec_tpe_gid,
58393 + .maxlen = sizeof(int),
58394 + .mode = 0600,
58395 + .proc_handler = &proc_dointvec,
58396 + },
58397 +#endif
58398 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58399 + {
58400 + .procname = "tpe_invert",
58401 + .data = &grsec_enable_tpe_invert,
58402 + .maxlen = sizeof(int),
58403 + .mode = 0600,
58404 + .proc_handler = &proc_dointvec,
58405 + },
58406 +#endif
58407 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
58408 + {
58409 + .procname = "tpe_restrict_all",
58410 + .data = &grsec_enable_tpe_all,
58411 + .maxlen = sizeof(int),
58412 + .mode = 0600,
58413 + .proc_handler = &proc_dointvec,
58414 + },
58415 +#endif
58416 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58417 + {
58418 + .procname = "socket_all",
58419 + .data = &grsec_enable_socket_all,
58420 + .maxlen = sizeof(int),
58421 + .mode = 0600,
58422 + .proc_handler = &proc_dointvec,
58423 + },
58424 + {
58425 + .procname = "socket_all_gid",
58426 + .data = &grsec_socket_all_gid,
58427 + .maxlen = sizeof(int),
58428 + .mode = 0600,
58429 + .proc_handler = &proc_dointvec,
58430 + },
58431 +#endif
58432 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58433 + {
58434 + .procname = "socket_client",
58435 + .data = &grsec_enable_socket_client,
58436 + .maxlen = sizeof(int),
58437 + .mode = 0600,
58438 + .proc_handler = &proc_dointvec,
58439 + },
58440 + {
58441 + .procname = "socket_client_gid",
58442 + .data = &grsec_socket_client_gid,
58443 + .maxlen = sizeof(int),
58444 + .mode = 0600,
58445 + .proc_handler = &proc_dointvec,
58446 + },
58447 +#endif
58448 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58449 + {
58450 + .procname = "socket_server",
58451 + .data = &grsec_enable_socket_server,
58452 + .maxlen = sizeof(int),
58453 + .mode = 0600,
58454 + .proc_handler = &proc_dointvec,
58455 + },
58456 + {
58457 + .procname = "socket_server_gid",
58458 + .data = &grsec_socket_server_gid,
58459 + .maxlen = sizeof(int),
58460 + .mode = 0600,
58461 + .proc_handler = &proc_dointvec,
58462 + },
58463 +#endif
58464 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
58465 + {
58466 + .procname = "audit_group",
58467 + .data = &grsec_enable_group,
58468 + .maxlen = sizeof(int),
58469 + .mode = 0600,
58470 + .proc_handler = &proc_dointvec,
58471 + },
58472 + {
58473 + .procname = "audit_gid",
58474 + .data = &grsec_audit_gid,
58475 + .maxlen = sizeof(int),
58476 + .mode = 0600,
58477 + .proc_handler = &proc_dointvec,
58478 + },
58479 +#endif
58480 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
58481 + {
58482 + .procname = "audit_chdir",
58483 + .data = &grsec_enable_chdir,
58484 + .maxlen = sizeof(int),
58485 + .mode = 0600,
58486 + .proc_handler = &proc_dointvec,
58487 + },
58488 +#endif
58489 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58490 + {
58491 + .procname = "audit_mount",
58492 + .data = &grsec_enable_mount,
58493 + .maxlen = sizeof(int),
58494 + .mode = 0600,
58495 + .proc_handler = &proc_dointvec,
58496 + },
58497 +#endif
58498 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58499 + {
58500 + .procname = "audit_textrel",
58501 + .data = &grsec_enable_audit_textrel,
58502 + .maxlen = sizeof(int),
58503 + .mode = 0600,
58504 + .proc_handler = &proc_dointvec,
58505 + },
58506 +#endif
58507 +#ifdef CONFIG_GRKERNSEC_DMESG
58508 + {
58509 + .procname = "dmesg",
58510 + .data = &grsec_enable_dmesg,
58511 + .maxlen = sizeof(int),
58512 + .mode = 0600,
58513 + .proc_handler = &proc_dointvec,
58514 + },
58515 +#endif
58516 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58517 + {
58518 + .procname = "chroot_findtask",
58519 + .data = &grsec_enable_chroot_findtask,
58520 + .maxlen = sizeof(int),
58521 + .mode = 0600,
58522 + .proc_handler = &proc_dointvec,
58523 + },
58524 +#endif
58525 +#ifdef CONFIG_GRKERNSEC_RESLOG
58526 + {
58527 + .procname = "resource_logging",
58528 + .data = &grsec_resource_logging,
58529 + .maxlen = sizeof(int),
58530 + .mode = 0600,
58531 + .proc_handler = &proc_dointvec,
58532 + },
58533 +#endif
58534 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58535 + {
58536 + .procname = "audit_ptrace",
58537 + .data = &grsec_enable_audit_ptrace,
58538 + .maxlen = sizeof(int),
58539 + .mode = 0600,
58540 + .proc_handler = &proc_dointvec,
58541 + },
58542 +#endif
58543 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
58544 + {
58545 + .procname = "harden_ptrace",
58546 + .data = &grsec_enable_harden_ptrace,
58547 + .maxlen = sizeof(int),
58548 + .mode = 0600,
58549 + .proc_handler = &proc_dointvec,
58550 + },
58551 +#endif
58552 + {
58553 + .procname = "grsec_lock",
58554 + .data = &grsec_lock,
58555 + .maxlen = sizeof(int),
58556 + .mode = 0600,
58557 + .proc_handler = &proc_dointvec,
58558 + },
58559 +#endif
58560 +#ifdef CONFIG_GRKERNSEC_ROFS
58561 + {
58562 + .procname = "romount_protect",
58563 + .data = &grsec_enable_rofs,
58564 + .maxlen = sizeof(int),
58565 + .mode = 0600,
58566 + .proc_handler = &proc_dointvec_minmax,
58567 + .extra1 = &one,
58568 + .extra2 = &one,
58569 + },
58570 +#endif
58571 + { }
58572 +};
58573 +#endif
58574 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
58575 new file mode 100644
58576 index 0000000..0dc13c3
58577 --- /dev/null
58578 +++ b/grsecurity/grsec_time.c
58579 @@ -0,0 +1,16 @@
58580 +#include <linux/kernel.h>
58581 +#include <linux/sched.h>
58582 +#include <linux/grinternal.h>
58583 +#include <linux/module.h>
58584 +
58585 +void
58586 +gr_log_timechange(void)
58587 +{
58588 +#ifdef CONFIG_GRKERNSEC_TIME
58589 + if (grsec_enable_time)
58590 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
58591 +#endif
58592 + return;
58593 +}
58594 +
58595 +EXPORT_SYMBOL(gr_log_timechange);
58596 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
58597 new file mode 100644
58598 index 0000000..07e0dc0
58599 --- /dev/null
58600 +++ b/grsecurity/grsec_tpe.c
58601 @@ -0,0 +1,73 @@
58602 +#include <linux/kernel.h>
58603 +#include <linux/sched.h>
58604 +#include <linux/file.h>
58605 +#include <linux/fs.h>
58606 +#include <linux/grinternal.h>
58607 +
58608 +extern int gr_acl_tpe_check(void);
58609 +
58610 +int
58611 +gr_tpe_allow(const struct file *file)
58612 +{
58613 +#ifdef CONFIG_GRKERNSEC
58614 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
58615 + const struct cred *cred = current_cred();
58616 + char *msg = NULL;
58617 + char *msg2 = NULL;
58618 +
58619 + // never restrict root
58620 + if (!cred->uid)
58621 + return 1;
58622 +
58623 + if (grsec_enable_tpe) {
58624 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58625 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
58626 + msg = "not being in trusted group";
58627 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
58628 + msg = "being in untrusted group";
58629 +#else
58630 + if (in_group_p(grsec_tpe_gid))
58631 + msg = "being in untrusted group";
58632 +#endif
58633 + }
58634 + if (!msg && gr_acl_tpe_check())
58635 + msg = "being in untrusted role";
58636 +
58637 + // not in any affected group/role
58638 + if (!msg)
58639 + goto next_check;
58640 +
58641 + if (inode->i_uid)
58642 + msg2 = "file in non-root-owned directory";
58643 + else if (inode->i_mode & S_IWOTH)
58644 + msg2 = "file in world-writable directory";
58645 + else if (inode->i_mode & S_IWGRP)
58646 + msg2 = "file in group-writable directory";
58647 +
58648 + if (msg && msg2) {
58649 + char fullmsg[70] = {0};
58650 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
58651 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
58652 + return 0;
58653 + }
58654 + msg = NULL;
58655 +next_check:
58656 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
58657 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
58658 + return 1;
58659 +
58660 + if (inode->i_uid && (inode->i_uid != cred->uid))
58661 + msg = "directory not owned by user";
58662 + else if (inode->i_mode & S_IWOTH)
58663 + msg = "file in world-writable directory";
58664 + else if (inode->i_mode & S_IWGRP)
58665 + msg = "file in group-writable directory";
58666 +
58667 + if (msg) {
58668 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
58669 + return 0;
58670 + }
58671 +#endif
58672 +#endif
58673 + return 1;
58674 +}
58675 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
58676 new file mode 100644
58677 index 0000000..9f7b1ac
58678 --- /dev/null
58679 +++ b/grsecurity/grsum.c
58680 @@ -0,0 +1,61 @@
58681 +#include <linux/err.h>
58682 +#include <linux/kernel.h>
58683 +#include <linux/sched.h>
58684 +#include <linux/mm.h>
58685 +#include <linux/scatterlist.h>
58686 +#include <linux/crypto.h>
58687 +#include <linux/gracl.h>
58688 +
58689 +
58690 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
58691 +#error "crypto and sha256 must be built into the kernel"
58692 +#endif
58693 +
58694 +int
58695 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
58696 +{
58697 + char *p;
58698 + struct crypto_hash *tfm;
58699 + struct hash_desc desc;
58700 + struct scatterlist sg;
58701 + unsigned char temp_sum[GR_SHA_LEN];
58702 + volatile int retval = 0;
58703 + volatile int dummy = 0;
58704 + unsigned int i;
58705 +
58706 + sg_init_table(&sg, 1);
58707 +
58708 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
58709 + if (IS_ERR(tfm)) {
58710 + /* should never happen, since sha256 should be built in */
58711 + return 1;
58712 + }
58713 +
58714 + desc.tfm = tfm;
58715 + desc.flags = 0;
58716 +
58717 + crypto_hash_init(&desc);
58718 +
58719 + p = salt;
58720 + sg_set_buf(&sg, p, GR_SALT_LEN);
58721 + crypto_hash_update(&desc, &sg, sg.length);
58722 +
58723 + p = entry->pw;
58724 + sg_set_buf(&sg, p, strlen(p));
58725 +
58726 + crypto_hash_update(&desc, &sg, sg.length);
58727 +
58728 + crypto_hash_final(&desc, temp_sum);
58729 +
58730 + memset(entry->pw, 0, GR_PW_LEN);
58731 +
58732 + for (i = 0; i < GR_SHA_LEN; i++)
58733 + if (sum[i] != temp_sum[i])
58734 + retval = 1;
58735 + else
58736 + dummy = 1; // waste a cycle
58737 +
58738 + crypto_free_hash(tfm);
58739 +
58740 + return retval;
58741 +}
58742 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
58743 index 6cd5b64..f620d2d 100644
58744 --- a/include/acpi/acpi_bus.h
58745 +++ b/include/acpi/acpi_bus.h
58746 @@ -107,7 +107,7 @@ struct acpi_device_ops {
58747 acpi_op_bind bind;
58748 acpi_op_unbind unbind;
58749 acpi_op_notify notify;
58750 -};
58751 +} __no_const;
58752
58753 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
58754
58755 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
58756 index b7babf0..71e4e74 100644
58757 --- a/include/asm-generic/atomic-long.h
58758 +++ b/include/asm-generic/atomic-long.h
58759 @@ -22,6 +22,12 @@
58760
58761 typedef atomic64_t atomic_long_t;
58762
58763 +#ifdef CONFIG_PAX_REFCOUNT
58764 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
58765 +#else
58766 +typedef atomic64_t atomic_long_unchecked_t;
58767 +#endif
58768 +
58769 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
58770
58771 static inline long atomic_long_read(atomic_long_t *l)
58772 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
58773 return (long)atomic64_read(v);
58774 }
58775
58776 +#ifdef CONFIG_PAX_REFCOUNT
58777 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
58778 +{
58779 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58780 +
58781 + return (long)atomic64_read_unchecked(v);
58782 +}
58783 +#endif
58784 +
58785 static inline void atomic_long_set(atomic_long_t *l, long i)
58786 {
58787 atomic64_t *v = (atomic64_t *)l;
58788 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
58789 atomic64_set(v, i);
58790 }
58791
58792 +#ifdef CONFIG_PAX_REFCOUNT
58793 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
58794 +{
58795 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58796 +
58797 + atomic64_set_unchecked(v, i);
58798 +}
58799 +#endif
58800 +
58801 static inline void atomic_long_inc(atomic_long_t *l)
58802 {
58803 atomic64_t *v = (atomic64_t *)l;
58804 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
58805 atomic64_inc(v);
58806 }
58807
58808 +#ifdef CONFIG_PAX_REFCOUNT
58809 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
58810 +{
58811 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58812 +
58813 + atomic64_inc_unchecked(v);
58814 +}
58815 +#endif
58816 +
58817 static inline void atomic_long_dec(atomic_long_t *l)
58818 {
58819 atomic64_t *v = (atomic64_t *)l;
58820 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
58821 atomic64_dec(v);
58822 }
58823
58824 +#ifdef CONFIG_PAX_REFCOUNT
58825 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
58826 +{
58827 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58828 +
58829 + atomic64_dec_unchecked(v);
58830 +}
58831 +#endif
58832 +
58833 static inline void atomic_long_add(long i, atomic_long_t *l)
58834 {
58835 atomic64_t *v = (atomic64_t *)l;
58836 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
58837 atomic64_add(i, v);
58838 }
58839
58840 +#ifdef CONFIG_PAX_REFCOUNT
58841 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
58842 +{
58843 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58844 +
58845 + atomic64_add_unchecked(i, v);
58846 +}
58847 +#endif
58848 +
58849 static inline void atomic_long_sub(long i, atomic_long_t *l)
58850 {
58851 atomic64_t *v = (atomic64_t *)l;
58852 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
58853 atomic64_sub(i, v);
58854 }
58855
58856 +#ifdef CONFIG_PAX_REFCOUNT
58857 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
58858 +{
58859 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58860 +
58861 + atomic64_sub_unchecked(i, v);
58862 +}
58863 +#endif
58864 +
58865 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
58866 {
58867 atomic64_t *v = (atomic64_t *)l;
58868 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
58869 return (long)atomic64_inc_return(v);
58870 }
58871
58872 +#ifdef CONFIG_PAX_REFCOUNT
58873 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
58874 +{
58875 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58876 +
58877 + return (long)atomic64_inc_return_unchecked(v);
58878 +}
58879 +#endif
58880 +
58881 static inline long atomic_long_dec_return(atomic_long_t *l)
58882 {
58883 atomic64_t *v = (atomic64_t *)l;
58884 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
58885
58886 typedef atomic_t atomic_long_t;
58887
58888 +#ifdef CONFIG_PAX_REFCOUNT
58889 +typedef atomic_unchecked_t atomic_long_unchecked_t;
58890 +#else
58891 +typedef atomic_t atomic_long_unchecked_t;
58892 +#endif
58893 +
58894 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
58895 static inline long atomic_long_read(atomic_long_t *l)
58896 {
58897 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
58898 return (long)atomic_read(v);
58899 }
58900
58901 +#ifdef CONFIG_PAX_REFCOUNT
58902 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
58903 +{
58904 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58905 +
58906 + return (long)atomic_read_unchecked(v);
58907 +}
58908 +#endif
58909 +
58910 static inline void atomic_long_set(atomic_long_t *l, long i)
58911 {
58912 atomic_t *v = (atomic_t *)l;
58913 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
58914 atomic_set(v, i);
58915 }
58916
58917 +#ifdef CONFIG_PAX_REFCOUNT
58918 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
58919 +{
58920 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58921 +
58922 + atomic_set_unchecked(v, i);
58923 +}
58924 +#endif
58925 +
58926 static inline void atomic_long_inc(atomic_long_t *l)
58927 {
58928 atomic_t *v = (atomic_t *)l;
58929 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
58930 atomic_inc(v);
58931 }
58932
58933 +#ifdef CONFIG_PAX_REFCOUNT
58934 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
58935 +{
58936 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58937 +
58938 + atomic_inc_unchecked(v);
58939 +}
58940 +#endif
58941 +
58942 static inline void atomic_long_dec(atomic_long_t *l)
58943 {
58944 atomic_t *v = (atomic_t *)l;
58945 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
58946 atomic_dec(v);
58947 }
58948
58949 +#ifdef CONFIG_PAX_REFCOUNT
58950 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
58951 +{
58952 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58953 +
58954 + atomic_dec_unchecked(v);
58955 +}
58956 +#endif
58957 +
58958 static inline void atomic_long_add(long i, atomic_long_t *l)
58959 {
58960 atomic_t *v = (atomic_t *)l;
58961 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
58962 atomic_add(i, v);
58963 }
58964
58965 +#ifdef CONFIG_PAX_REFCOUNT
58966 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
58967 +{
58968 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58969 +
58970 + atomic_add_unchecked(i, v);
58971 +}
58972 +#endif
58973 +
58974 static inline void atomic_long_sub(long i, atomic_long_t *l)
58975 {
58976 atomic_t *v = (atomic_t *)l;
58977 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
58978 atomic_sub(i, v);
58979 }
58980
58981 +#ifdef CONFIG_PAX_REFCOUNT
58982 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
58983 +{
58984 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
58985 +
58986 + atomic_sub_unchecked(i, v);
58987 +}
58988 +#endif
58989 +
58990 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
58991 {
58992 atomic_t *v = (atomic_t *)l;
58993 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
58994 return (long)atomic_inc_return(v);
58995 }
58996
58997 +#ifdef CONFIG_PAX_REFCOUNT
58998 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
58999 +{
59000 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59001 +
59002 + return (long)atomic_inc_return_unchecked(v);
59003 +}
59004 +#endif
59005 +
59006 static inline long atomic_long_dec_return(atomic_long_t *l)
59007 {
59008 atomic_t *v = (atomic_t *)l;
59009 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59010
59011 #endif /* BITS_PER_LONG == 64 */
59012
59013 +#ifdef CONFIG_PAX_REFCOUNT
59014 +static inline void pax_refcount_needs_these_functions(void)
59015 +{
59016 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
59017 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
59018 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
59019 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
59020 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
59021 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
59022 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
59023 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
59024 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
59025 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
59026 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
59027 +
59028 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
59029 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
59030 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
59031 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
59032 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
59033 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
59034 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
59035 +}
59036 +#else
59037 +#define atomic_read_unchecked(v) atomic_read(v)
59038 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
59039 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
59040 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
59041 +#define atomic_inc_unchecked(v) atomic_inc(v)
59042 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
59043 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
59044 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
59045 +#define atomic_dec_unchecked(v) atomic_dec(v)
59046 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
59047 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
59048 +
59049 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
59050 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
59051 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
59052 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
59053 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
59054 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
59055 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
59056 +#endif
59057 +
59058 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
59059 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
59060 index b18ce4f..2ee2843 100644
59061 --- a/include/asm-generic/atomic64.h
59062 +++ b/include/asm-generic/atomic64.h
59063 @@ -16,6 +16,8 @@ typedef struct {
59064 long long counter;
59065 } atomic64_t;
59066
59067 +typedef atomic64_t atomic64_unchecked_t;
59068 +
59069 #define ATOMIC64_INIT(i) { (i) }
59070
59071 extern long long atomic64_read(const atomic64_t *v);
59072 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
59073 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
59074 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
59075
59076 +#define atomic64_read_unchecked(v) atomic64_read(v)
59077 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
59078 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
59079 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
59080 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
59081 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
59082 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
59083 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
59084 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
59085 +
59086 #endif /* _ASM_GENERIC_ATOMIC64_H */
59087 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
59088 index 1bfcfe5..e04c5c9 100644
59089 --- a/include/asm-generic/cache.h
59090 +++ b/include/asm-generic/cache.h
59091 @@ -6,7 +6,7 @@
59092 * cache lines need to provide their own cache.h.
59093 */
59094
59095 -#define L1_CACHE_SHIFT 5
59096 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
59097 +#define L1_CACHE_SHIFT 5UL
59098 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
59099
59100 #endif /* __ASM_GENERIC_CACHE_H */
59101 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
59102 index 0d68a1e..b74a761 100644
59103 --- a/include/asm-generic/emergency-restart.h
59104 +++ b/include/asm-generic/emergency-restart.h
59105 @@ -1,7 +1,7 @@
59106 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
59107 #define _ASM_GENERIC_EMERGENCY_RESTART_H
59108
59109 -static inline void machine_emergency_restart(void)
59110 +static inline __noreturn void machine_emergency_restart(void)
59111 {
59112 machine_restart(NULL);
59113 }
59114 diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
59115 index 1ca3efc..e3dc852 100644
59116 --- a/include/asm-generic/int-l64.h
59117 +++ b/include/asm-generic/int-l64.h
59118 @@ -46,6 +46,8 @@ typedef unsigned int u32;
59119 typedef signed long s64;
59120 typedef unsigned long u64;
59121
59122 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
59123 +
59124 #define S8_C(x) x
59125 #define U8_C(x) x ## U
59126 #define S16_C(x) x
59127 diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
59128 index f394147..b6152b9 100644
59129 --- a/include/asm-generic/int-ll64.h
59130 +++ b/include/asm-generic/int-ll64.h
59131 @@ -51,6 +51,8 @@ typedef unsigned int u32;
59132 typedef signed long long s64;
59133 typedef unsigned long long u64;
59134
59135 +typedef unsigned long long intoverflow_t;
59136 +
59137 #define S8_C(x) x
59138 #define U8_C(x) x ## U
59139 #define S16_C(x) x
59140 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
59141 index 0232ccb..13d9165 100644
59142 --- a/include/asm-generic/kmap_types.h
59143 +++ b/include/asm-generic/kmap_types.h
59144 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
59145 KMAP_D(17) KM_NMI,
59146 KMAP_D(18) KM_NMI_PTE,
59147 KMAP_D(19) KM_KDB,
59148 +KMAP_D(20) KM_CLEARPAGE,
59149 /*
59150 * Remember to update debug_kmap_atomic() when adding new kmap types!
59151 */
59152 -KMAP_D(20) KM_TYPE_NR
59153 +KMAP_D(21) KM_TYPE_NR
59154 };
59155
59156 #undef KMAP_D
59157 diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
59158 index 9ceb03b..2efbcbd 100644
59159 --- a/include/asm-generic/local.h
59160 +++ b/include/asm-generic/local.h
59161 @@ -39,6 +39,7 @@ typedef struct
59162 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
59163 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
59164 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
59165 +#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
59166
59167 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
59168 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
59169 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
59170 index 725612b..9cc513a 100644
59171 --- a/include/asm-generic/pgtable-nopmd.h
59172 +++ b/include/asm-generic/pgtable-nopmd.h
59173 @@ -1,14 +1,19 @@
59174 #ifndef _PGTABLE_NOPMD_H
59175 #define _PGTABLE_NOPMD_H
59176
59177 -#ifndef __ASSEMBLY__
59178 -
59179 #include <asm-generic/pgtable-nopud.h>
59180
59181 -struct mm_struct;
59182 -
59183 #define __PAGETABLE_PMD_FOLDED
59184
59185 +#define PMD_SHIFT PUD_SHIFT
59186 +#define PTRS_PER_PMD 1
59187 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
59188 +#define PMD_MASK (~(PMD_SIZE-1))
59189 +
59190 +#ifndef __ASSEMBLY__
59191 +
59192 +struct mm_struct;
59193 +
59194 /*
59195 * Having the pmd type consist of a pud gets the size right, and allows
59196 * us to conceptually access the pud entry that this pmd is folded into
59197 @@ -16,11 +21,6 @@ struct mm_struct;
59198 */
59199 typedef struct { pud_t pud; } pmd_t;
59200
59201 -#define PMD_SHIFT PUD_SHIFT
59202 -#define PTRS_PER_PMD 1
59203 -#define PMD_SIZE (1UL << PMD_SHIFT)
59204 -#define PMD_MASK (~(PMD_SIZE-1))
59205 -
59206 /*
59207 * The "pud_xxx()" functions here are trivial for a folded two-level
59208 * setup: the pmd is never bad, and a pmd always exists (as it's folded
59209 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
59210 index 810431d..ccc3638 100644
59211 --- a/include/asm-generic/pgtable-nopud.h
59212 +++ b/include/asm-generic/pgtable-nopud.h
59213 @@ -1,10 +1,15 @@
59214 #ifndef _PGTABLE_NOPUD_H
59215 #define _PGTABLE_NOPUD_H
59216
59217 -#ifndef __ASSEMBLY__
59218 -
59219 #define __PAGETABLE_PUD_FOLDED
59220
59221 +#define PUD_SHIFT PGDIR_SHIFT
59222 +#define PTRS_PER_PUD 1
59223 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
59224 +#define PUD_MASK (~(PUD_SIZE-1))
59225 +
59226 +#ifndef __ASSEMBLY__
59227 +
59228 /*
59229 * Having the pud type consist of a pgd gets the size right, and allows
59230 * us to conceptually access the pgd entry that this pud is folded into
59231 @@ -12,11 +17,6 @@
59232 */
59233 typedef struct { pgd_t pgd; } pud_t;
59234
59235 -#define PUD_SHIFT PGDIR_SHIFT
59236 -#define PTRS_PER_PUD 1
59237 -#define PUD_SIZE (1UL << PUD_SHIFT)
59238 -#define PUD_MASK (~(PUD_SIZE-1))
59239 -
59240 /*
59241 * The "pgd_xxx()" functions here are trivial for a folded two-level
59242 * setup: the pud is never bad, and a pud always exists (as it's folded
59243 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
59244 index 76bff2b..c7a14e2 100644
59245 --- a/include/asm-generic/pgtable.h
59246 +++ b/include/asm-generic/pgtable.h
59247 @@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
59248 #endif /* __HAVE_ARCH_PMD_WRITE */
59249 #endif
59250
59251 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
59252 +static inline unsigned long pax_open_kernel(void) { return 0; }
59253 +#endif
59254 +
59255 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
59256 +static inline unsigned long pax_close_kernel(void) { return 0; }
59257 +#endif
59258 +
59259 #endif /* !__ASSEMBLY__ */
59260
59261 #endif /* _ASM_GENERIC_PGTABLE_H */
59262 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
59263 index b5e2e4c..6a5373e 100644
59264 --- a/include/asm-generic/vmlinux.lds.h
59265 +++ b/include/asm-generic/vmlinux.lds.h
59266 @@ -217,6 +217,7 @@
59267 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
59268 VMLINUX_SYMBOL(__start_rodata) = .; \
59269 *(.rodata) *(.rodata.*) \
59270 + *(.data..read_only) \
59271 *(__vermagic) /* Kernel version magic */ \
59272 . = ALIGN(8); \
59273 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
59274 @@ -722,17 +723,18 @@
59275 * section in the linker script will go there too. @phdr should have
59276 * a leading colon.
59277 *
59278 - * Note that this macros defines __per_cpu_load as an absolute symbol.
59279 + * Note that this macros defines per_cpu_load as an absolute symbol.
59280 * If there is no need to put the percpu section at a predetermined
59281 * address, use PERCPU_SECTION.
59282 */
59283 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
59284 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
59285 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
59286 + per_cpu_load = .; \
59287 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
59288 - LOAD_OFFSET) { \
59289 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
59290 PERCPU_INPUT(cacheline) \
59291 } phdr \
59292 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
59293 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
59294
59295 /**
59296 * PERCPU_SECTION - define output section for percpu area, simple version
59297 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
59298 index bf4b2dc..2d0762f 100644
59299 --- a/include/drm/drmP.h
59300 +++ b/include/drm/drmP.h
59301 @@ -72,6 +72,7 @@
59302 #include <linux/workqueue.h>
59303 #include <linux/poll.h>
59304 #include <asm/pgalloc.h>
59305 +#include <asm/local.h>
59306 #include "drm.h"
59307
59308 #include <linux/idr.h>
59309 @@ -1038,7 +1039,7 @@ struct drm_device {
59310
59311 /** \name Usage Counters */
59312 /*@{ */
59313 - int open_count; /**< Outstanding files open */
59314 + local_t open_count; /**< Outstanding files open */
59315 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
59316 atomic_t vma_count; /**< Outstanding vma areas open */
59317 int buf_use; /**< Buffers in use -- cannot alloc */
59318 @@ -1049,7 +1050,7 @@ struct drm_device {
59319 /*@{ */
59320 unsigned long counters;
59321 enum drm_stat_type types[15];
59322 - atomic_t counts[15];
59323 + atomic_unchecked_t counts[15];
59324 /*@} */
59325
59326 struct list_head filelist;
59327 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
59328 index 73b0712..0b7ef2f 100644
59329 --- a/include/drm/drm_crtc_helper.h
59330 +++ b/include/drm/drm_crtc_helper.h
59331 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
59332
59333 /* disable crtc when not in use - more explicit than dpms off */
59334 void (*disable)(struct drm_crtc *crtc);
59335 -};
59336 +} __no_const;
59337
59338 struct drm_encoder_helper_funcs {
59339 void (*dpms)(struct drm_encoder *encoder, int mode);
59340 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
59341 struct drm_connector *connector);
59342 /* disable encoder when not in use - more explicit than dpms off */
59343 void (*disable)(struct drm_encoder *encoder);
59344 -};
59345 +} __no_const;
59346
59347 struct drm_connector_helper_funcs {
59348 int (*get_modes)(struct drm_connector *connector);
59349 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
59350 index 26c1f78..6722682 100644
59351 --- a/include/drm/ttm/ttm_memory.h
59352 +++ b/include/drm/ttm/ttm_memory.h
59353 @@ -47,7 +47,7 @@
59354
59355 struct ttm_mem_shrink {
59356 int (*do_shrink) (struct ttm_mem_shrink *);
59357 -};
59358 +} __no_const;
59359
59360 /**
59361 * struct ttm_mem_global - Global memory accounting structure.
59362 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
59363 index e86dfca..40cc55f 100644
59364 --- a/include/linux/a.out.h
59365 +++ b/include/linux/a.out.h
59366 @@ -39,6 +39,14 @@ enum machine_type {
59367 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
59368 };
59369
59370 +/* Constants for the N_FLAGS field */
59371 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
59372 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
59373 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
59374 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
59375 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
59376 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
59377 +
59378 #if !defined (N_MAGIC)
59379 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
59380 #endif
59381 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
59382 index 49a83ca..df96b54 100644
59383 --- a/include/linux/atmdev.h
59384 +++ b/include/linux/atmdev.h
59385 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
59386 #endif
59387
59388 struct k_atm_aal_stats {
59389 -#define __HANDLE_ITEM(i) atomic_t i
59390 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
59391 __AAL_STAT_ITEMS
59392 #undef __HANDLE_ITEM
59393 };
59394 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
59395 index fd88a39..8a801b4 100644
59396 --- a/include/linux/binfmts.h
59397 +++ b/include/linux/binfmts.h
59398 @@ -18,7 +18,7 @@ struct pt_regs;
59399 #define BINPRM_BUF_SIZE 128
59400
59401 #ifdef __KERNEL__
59402 -#include <linux/list.h>
59403 +#include <linux/sched.h>
59404
59405 #define CORENAME_MAX_SIZE 128
59406
59407 @@ -58,6 +58,7 @@ struct linux_binprm {
59408 unsigned interp_flags;
59409 unsigned interp_data;
59410 unsigned long loader, exec;
59411 + char tcomm[TASK_COMM_LEN];
59412 };
59413
59414 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
59415 @@ -88,6 +89,7 @@ struct linux_binfmt {
59416 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
59417 int (*load_shlib)(struct file *);
59418 int (*core_dump)(struct coredump_params *cprm);
59419 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
59420 unsigned long min_coredump; /* minimal dump size */
59421 };
59422
59423 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
59424 index 0ed1eb0..3ab569b 100644
59425 --- a/include/linux/blkdev.h
59426 +++ b/include/linux/blkdev.h
59427 @@ -1315,7 +1315,7 @@ struct block_device_operations {
59428 /* this callback is with swap_lock and sometimes page table lock held */
59429 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
59430 struct module *owner;
59431 -};
59432 +} __do_const;
59433
59434 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
59435 unsigned long);
59436 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
59437 index 4d1a074..88f929a 100644
59438 --- a/include/linux/blktrace_api.h
59439 +++ b/include/linux/blktrace_api.h
59440 @@ -162,7 +162,7 @@ struct blk_trace {
59441 struct dentry *dir;
59442 struct dentry *dropped_file;
59443 struct dentry *msg_file;
59444 - atomic_t dropped;
59445 + atomic_unchecked_t dropped;
59446 };
59447
59448 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
59449 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
59450 index 83195fb..0b0f77d 100644
59451 --- a/include/linux/byteorder/little_endian.h
59452 +++ b/include/linux/byteorder/little_endian.h
59453 @@ -42,51 +42,51 @@
59454
59455 static inline __le64 __cpu_to_le64p(const __u64 *p)
59456 {
59457 - return (__force __le64)*p;
59458 + return (__force const __le64)*p;
59459 }
59460 static inline __u64 __le64_to_cpup(const __le64 *p)
59461 {
59462 - return (__force __u64)*p;
59463 + return (__force const __u64)*p;
59464 }
59465 static inline __le32 __cpu_to_le32p(const __u32 *p)
59466 {
59467 - return (__force __le32)*p;
59468 + return (__force const __le32)*p;
59469 }
59470 static inline __u32 __le32_to_cpup(const __le32 *p)
59471 {
59472 - return (__force __u32)*p;
59473 + return (__force const __u32)*p;
59474 }
59475 static inline __le16 __cpu_to_le16p(const __u16 *p)
59476 {
59477 - return (__force __le16)*p;
59478 + return (__force const __le16)*p;
59479 }
59480 static inline __u16 __le16_to_cpup(const __le16 *p)
59481 {
59482 - return (__force __u16)*p;
59483 + return (__force const __u16)*p;
59484 }
59485 static inline __be64 __cpu_to_be64p(const __u64 *p)
59486 {
59487 - return (__force __be64)__swab64p(p);
59488 + return (__force const __be64)__swab64p(p);
59489 }
59490 static inline __u64 __be64_to_cpup(const __be64 *p)
59491 {
59492 - return __swab64p((__u64 *)p);
59493 + return __swab64p((const __u64 *)p);
59494 }
59495 static inline __be32 __cpu_to_be32p(const __u32 *p)
59496 {
59497 - return (__force __be32)__swab32p(p);
59498 + return (__force const __be32)__swab32p(p);
59499 }
59500 static inline __u32 __be32_to_cpup(const __be32 *p)
59501 {
59502 - return __swab32p((__u32 *)p);
59503 + return __swab32p((const __u32 *)p);
59504 }
59505 static inline __be16 __cpu_to_be16p(const __u16 *p)
59506 {
59507 - return (__force __be16)__swab16p(p);
59508 + return (__force const __be16)__swab16p(p);
59509 }
59510 static inline __u16 __be16_to_cpup(const __be16 *p)
59511 {
59512 - return __swab16p((__u16 *)p);
59513 + return __swab16p((const __u16 *)p);
59514 }
59515 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
59516 #define __le64_to_cpus(x) do { (void)(x); } while (0)
59517 diff --git a/include/linux/cache.h b/include/linux/cache.h
59518 index 4c57065..4307975 100644
59519 --- a/include/linux/cache.h
59520 +++ b/include/linux/cache.h
59521 @@ -16,6 +16,10 @@
59522 #define __read_mostly
59523 #endif
59524
59525 +#ifndef __read_only
59526 +#define __read_only __read_mostly
59527 +#endif
59528 +
59529 #ifndef ____cacheline_aligned
59530 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
59531 #endif
59532 diff --git a/include/linux/capability.h b/include/linux/capability.h
59533 index a63d13d..069bfd5 100644
59534 --- a/include/linux/capability.h
59535 +++ b/include/linux/capability.h
59536 @@ -548,6 +548,9 @@ extern bool capable(int cap);
59537 extern bool ns_capable(struct user_namespace *ns, int cap);
59538 extern bool task_ns_capable(struct task_struct *t, int cap);
59539 extern bool nsown_capable(int cap);
59540 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
59541 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
59542 +extern bool capable_nolog(int cap);
59543
59544 /* audit system wants to get cap info from files as well */
59545 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
59546 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
59547 index 04ffb2e..6799180 100644
59548 --- a/include/linux/cleancache.h
59549 +++ b/include/linux/cleancache.h
59550 @@ -31,7 +31,7 @@ struct cleancache_ops {
59551 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
59552 void (*flush_inode)(int, struct cleancache_filekey);
59553 void (*flush_fs)(int);
59554 -};
59555 +} __no_const;
59556
59557 extern struct cleancache_ops
59558 cleancache_register_ops(struct cleancache_ops *ops);
59559 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
59560 index dfadc96..c0e70c1 100644
59561 --- a/include/linux/compiler-gcc4.h
59562 +++ b/include/linux/compiler-gcc4.h
59563 @@ -31,6 +31,12 @@
59564
59565
59566 #if __GNUC_MINOR__ >= 5
59567 +
59568 +#ifdef CONSTIFY_PLUGIN
59569 +#define __no_const __attribute__((no_const))
59570 +#define __do_const __attribute__((do_const))
59571 +#endif
59572 +
59573 /*
59574 * Mark a position in code as unreachable. This can be used to
59575 * suppress control flow warnings after asm blocks that transfer
59576 @@ -46,6 +52,11 @@
59577 #define __noclone __attribute__((__noclone__))
59578
59579 #endif
59580 +
59581 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
59582 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
59583 +#define __bos0(ptr) __bos((ptr), 0)
59584 +#define __bos1(ptr) __bos((ptr), 1)
59585 #endif
59586
59587 #if __GNUC_MINOR__ > 0
59588 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
59589 index 320d6c9..8573a1c 100644
59590 --- a/include/linux/compiler.h
59591 +++ b/include/linux/compiler.h
59592 @@ -5,31 +5,62 @@
59593
59594 #ifdef __CHECKER__
59595 # define __user __attribute__((noderef, address_space(1)))
59596 +# define __force_user __force __user
59597 # define __kernel __attribute__((address_space(0)))
59598 +# define __force_kernel __force __kernel
59599 # define __safe __attribute__((safe))
59600 # define __force __attribute__((force))
59601 # define __nocast __attribute__((nocast))
59602 # define __iomem __attribute__((noderef, address_space(2)))
59603 +# define __force_iomem __force __iomem
59604 # define __acquires(x) __attribute__((context(x,0,1)))
59605 # define __releases(x) __attribute__((context(x,1,0)))
59606 # define __acquire(x) __context__(x,1)
59607 # define __release(x) __context__(x,-1)
59608 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
59609 # define __percpu __attribute__((noderef, address_space(3)))
59610 +# define __force_percpu __force __percpu
59611 #ifdef CONFIG_SPARSE_RCU_POINTER
59612 # define __rcu __attribute__((noderef, address_space(4)))
59613 +# define __force_rcu __force __rcu
59614 #else
59615 # define __rcu
59616 +# define __force_rcu
59617 #endif
59618 extern void __chk_user_ptr(const volatile void __user *);
59619 extern void __chk_io_ptr(const volatile void __iomem *);
59620 +#elif defined(CHECKER_PLUGIN)
59621 +//# define __user
59622 +//# define __force_user
59623 +//# define __kernel
59624 +//# define __force_kernel
59625 +# define __safe
59626 +# define __force
59627 +# define __nocast
59628 +# define __iomem
59629 +# define __force_iomem
59630 +# define __chk_user_ptr(x) (void)0
59631 +# define __chk_io_ptr(x) (void)0
59632 +# define __builtin_warning(x, y...) (1)
59633 +# define __acquires(x)
59634 +# define __releases(x)
59635 +# define __acquire(x) (void)0
59636 +# define __release(x) (void)0
59637 +# define __cond_lock(x,c) (c)
59638 +# define __percpu
59639 +# define __force_percpu
59640 +# define __rcu
59641 +# define __force_rcu
59642 #else
59643 # define __user
59644 +# define __force_user
59645 # define __kernel
59646 +# define __force_kernel
59647 # define __safe
59648 # define __force
59649 # define __nocast
59650 # define __iomem
59651 +# define __force_iomem
59652 # define __chk_user_ptr(x) (void)0
59653 # define __chk_io_ptr(x) (void)0
59654 # define __builtin_warning(x, y...) (1)
59655 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
59656 # define __release(x) (void)0
59657 # define __cond_lock(x,c) (c)
59658 # define __percpu
59659 +# define __force_percpu
59660 # define __rcu
59661 +# define __force_rcu
59662 #endif
59663
59664 #ifdef __KERNEL__
59665 @@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59666 # define __attribute_const__ /* unimplemented */
59667 #endif
59668
59669 +#ifndef __no_const
59670 +# define __no_const
59671 +#endif
59672 +
59673 +#ifndef __do_const
59674 +# define __do_const
59675 +#endif
59676 +
59677 /*
59678 * Tell gcc if a function is cold. The compiler will assume any path
59679 * directly leading to the call is unlikely.
59680 @@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59681 #define __cold
59682 #endif
59683
59684 +#ifndef __alloc_size
59685 +#define __alloc_size(...)
59686 +#endif
59687 +
59688 +#ifndef __bos
59689 +#define __bos(ptr, arg)
59690 +#endif
59691 +
59692 +#ifndef __bos0
59693 +#define __bos0(ptr)
59694 +#endif
59695 +
59696 +#ifndef __bos1
59697 +#define __bos1(ptr)
59698 +#endif
59699 +
59700 /* Simple shorthand for a section definition */
59701 #ifndef __section
59702 # define __section(S) __attribute__ ((__section__(#S)))
59703 @@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
59704 * use is to mediate communication between process-level code and irq/NMI
59705 * handlers, all running on the same CPU.
59706 */
59707 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
59708 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
59709 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
59710
59711 #endif /* __LINUX_COMPILER_H */
59712 diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
59713 index e9eaec5..bfeb9bb 100644
59714 --- a/include/linux/cpuset.h
59715 +++ b/include/linux/cpuset.h
59716 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
59717 * nodemask.
59718 */
59719 smp_mb();
59720 - --ACCESS_ONCE(current->mems_allowed_change_disable);
59721 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
59722 }
59723
59724 static inline void set_mems_allowed(nodemask_t nodemask)
59725 diff --git a/include/linux/cred.h b/include/linux/cred.h
59726 index 4030896..8d6f342 100644
59727 --- a/include/linux/cred.h
59728 +++ b/include/linux/cred.h
59729 @@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
59730 static inline void validate_process_creds(void)
59731 {
59732 }
59733 +static inline void validate_task_creds(struct task_struct *task)
59734 +{
59735 +}
59736 #endif
59737
59738 /**
59739 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
59740 index 8a94217..15d49e3 100644
59741 --- a/include/linux/crypto.h
59742 +++ b/include/linux/crypto.h
59743 @@ -365,7 +365,7 @@ struct cipher_tfm {
59744 const u8 *key, unsigned int keylen);
59745 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
59746 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
59747 -};
59748 +} __no_const;
59749
59750 struct hash_tfm {
59751 int (*init)(struct hash_desc *desc);
59752 @@ -386,13 +386,13 @@ struct compress_tfm {
59753 int (*cot_decompress)(struct crypto_tfm *tfm,
59754 const u8 *src, unsigned int slen,
59755 u8 *dst, unsigned int *dlen);
59756 -};
59757 +} __no_const;
59758
59759 struct rng_tfm {
59760 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
59761 unsigned int dlen);
59762 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
59763 -};
59764 +} __no_const;
59765
59766 #define crt_ablkcipher crt_u.ablkcipher
59767 #define crt_aead crt_u.aead
59768 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
59769 index 7925bf0..d5143d2 100644
59770 --- a/include/linux/decompress/mm.h
59771 +++ b/include/linux/decompress/mm.h
59772 @@ -77,7 +77,7 @@ static void free(void *where)
59773 * warnings when not needed (indeed large_malloc / large_free are not
59774 * needed by inflate */
59775
59776 -#define malloc(a) kmalloc(a, GFP_KERNEL)
59777 +#define malloc(a) kmalloc((a), GFP_KERNEL)
59778 #define free(a) kfree(a)
59779
59780 #define large_malloc(a) vmalloc(a)
59781 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
59782 index e13117c..e9fc938 100644
59783 --- a/include/linux/dma-mapping.h
59784 +++ b/include/linux/dma-mapping.h
59785 @@ -46,7 +46,7 @@ struct dma_map_ops {
59786 u64 (*get_required_mask)(struct device *dev);
59787 #endif
59788 int is_phys;
59789 -};
59790 +} __do_const;
59791
59792 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
59793
59794 diff --git a/include/linux/efi.h b/include/linux/efi.h
59795 index 2362a0b..cfaf8fcc 100644
59796 --- a/include/linux/efi.h
59797 +++ b/include/linux/efi.h
59798 @@ -446,7 +446,7 @@ struct efivar_operations {
59799 efi_get_variable_t *get_variable;
59800 efi_get_next_variable_t *get_next_variable;
59801 efi_set_variable_t *set_variable;
59802 -};
59803 +} __no_const;
59804
59805 struct efivars {
59806 /*
59807 diff --git a/include/linux/elf.h b/include/linux/elf.h
59808 index 31f0508..5421c01 100644
59809 --- a/include/linux/elf.h
59810 +++ b/include/linux/elf.h
59811 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
59812 #define PT_GNU_EH_FRAME 0x6474e550
59813
59814 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
59815 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
59816 +
59817 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
59818 +
59819 +/* Constants for the e_flags field */
59820 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
59821 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
59822 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
59823 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
59824 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
59825 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
59826
59827 /*
59828 * Extended Numbering
59829 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
59830 #define DT_DEBUG 21
59831 #define DT_TEXTREL 22
59832 #define DT_JMPREL 23
59833 +#define DT_FLAGS 30
59834 + #define DF_TEXTREL 0x00000004
59835 #define DT_ENCODING 32
59836 #define OLD_DT_LOOS 0x60000000
59837 #define DT_LOOS 0x6000000d
59838 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
59839 #define PF_W 0x2
59840 #define PF_X 0x1
59841
59842 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
59843 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
59844 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
59845 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
59846 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
59847 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
59848 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
59849 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
59850 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
59851 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
59852 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
59853 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
59854 +
59855 typedef struct elf32_phdr{
59856 Elf32_Word p_type;
59857 Elf32_Off p_offset;
59858 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
59859 #define EI_OSABI 7
59860 #define EI_PAD 8
59861
59862 +#define EI_PAX 14
59863 +
59864 #define ELFMAG0 0x7f /* EI_MAG */
59865 #define ELFMAG1 'E'
59866 #define ELFMAG2 'L'
59867 @@ -423,6 +451,7 @@ extern Elf32_Dyn _DYNAMIC [];
59868 #define elf_note elf32_note
59869 #define elf_addr_t Elf32_Off
59870 #define Elf_Half Elf32_Half
59871 +#define elf_dyn Elf32_Dyn
59872
59873 #else
59874
59875 @@ -433,6 +462,7 @@ extern Elf64_Dyn _DYNAMIC [];
59876 #define elf_note elf64_note
59877 #define elf_addr_t Elf64_Off
59878 #define Elf_Half Elf64_Half
59879 +#define elf_dyn Elf64_Dyn
59880
59881 #endif
59882
59883 diff --git a/include/linux/filter.h b/include/linux/filter.h
59884 index 8eeb205..d59bfa2 100644
59885 --- a/include/linux/filter.h
59886 +++ b/include/linux/filter.h
59887 @@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
59888
59889 struct sk_buff;
59890 struct sock;
59891 +struct bpf_jit_work;
59892
59893 struct sk_filter
59894 {
59895 @@ -141,6 +142,9 @@ struct sk_filter
59896 unsigned int len; /* Number of filter blocks */
59897 unsigned int (*bpf_func)(const struct sk_buff *skb,
59898 const struct sock_filter *filter);
59899 +#ifdef CONFIG_BPF_JIT
59900 + struct bpf_jit_work *work;
59901 +#endif
59902 struct rcu_head rcu;
59903 struct sock_filter insns[0];
59904 };
59905 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
59906 index 84ccf8e..2e9b14c 100644
59907 --- a/include/linux/firewire.h
59908 +++ b/include/linux/firewire.h
59909 @@ -428,7 +428,7 @@ struct fw_iso_context {
59910 union {
59911 fw_iso_callback_t sc;
59912 fw_iso_mc_callback_t mc;
59913 - } callback;
59914 + } __no_const callback;
59915 void *callback_data;
59916 };
59917
59918 diff --git a/include/linux/fs.h b/include/linux/fs.h
59919 index 10b2288..09180e4 100644
59920 --- a/include/linux/fs.h
59921 +++ b/include/linux/fs.h
59922 @@ -1609,7 +1609,8 @@ struct file_operations {
59923 int (*setlease)(struct file *, long, struct file_lock **);
59924 long (*fallocate)(struct file *file, int mode, loff_t offset,
59925 loff_t len);
59926 -};
59927 +} __do_const;
59928 +typedef struct file_operations __no_const file_operations_no_const;
59929
59930 struct inode_operations {
59931 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
59932 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
59933 index 003dc0f..3c4ea97 100644
59934 --- a/include/linux/fs_struct.h
59935 +++ b/include/linux/fs_struct.h
59936 @@ -6,7 +6,7 @@
59937 #include <linux/seqlock.h>
59938
59939 struct fs_struct {
59940 - int users;
59941 + atomic_t users;
59942 spinlock_t lock;
59943 seqcount_t seq;
59944 int umask;
59945 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
59946 index ce31408..b1ad003 100644
59947 --- a/include/linux/fscache-cache.h
59948 +++ b/include/linux/fscache-cache.h
59949 @@ -102,7 +102,7 @@ struct fscache_operation {
59950 fscache_operation_release_t release;
59951 };
59952
59953 -extern atomic_t fscache_op_debug_id;
59954 +extern atomic_unchecked_t fscache_op_debug_id;
59955 extern void fscache_op_work_func(struct work_struct *work);
59956
59957 extern void fscache_enqueue_operation(struct fscache_operation *);
59958 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
59959 {
59960 INIT_WORK(&op->work, fscache_op_work_func);
59961 atomic_set(&op->usage, 1);
59962 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
59963 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
59964 op->processor = processor;
59965 op->release = release;
59966 INIT_LIST_HEAD(&op->pend_link);
59967 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
59968 index 2a53f10..0187fdf 100644
59969 --- a/include/linux/fsnotify.h
59970 +++ b/include/linux/fsnotify.h
59971 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
59972 */
59973 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
59974 {
59975 - return kstrdup(name, GFP_KERNEL);
59976 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
59977 }
59978
59979 /*
59980 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
59981 index 91d0e0a3..035666b 100644
59982 --- a/include/linux/fsnotify_backend.h
59983 +++ b/include/linux/fsnotify_backend.h
59984 @@ -105,6 +105,7 @@ struct fsnotify_ops {
59985 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
59986 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
59987 };
59988 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
59989
59990 /*
59991 * A group is a "thing" that wants to receive notification about filesystem
59992 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
59993 index c3da42d..c70e0df 100644
59994 --- a/include/linux/ftrace_event.h
59995 +++ b/include/linux/ftrace_event.h
59996 @@ -97,7 +97,7 @@ struct trace_event_functions {
59997 trace_print_func raw;
59998 trace_print_func hex;
59999 trace_print_func binary;
60000 -};
60001 +} __no_const;
60002
60003 struct trace_event {
60004 struct hlist_node node;
60005 @@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
60006 extern int trace_add_event_call(struct ftrace_event_call *call);
60007 extern void trace_remove_event_call(struct ftrace_event_call *call);
60008
60009 -#define is_signed_type(type) (((type)(-1)) < 0)
60010 +#define is_signed_type(type) (((type)(-1)) < (type)1)
60011
60012 int trace_set_clr_event(const char *system, const char *event, int set);
60013
60014 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
60015 index 6d18f35..ab71e2c 100644
60016 --- a/include/linux/genhd.h
60017 +++ b/include/linux/genhd.h
60018 @@ -185,7 +185,7 @@ struct gendisk {
60019 struct kobject *slave_dir;
60020
60021 struct timer_rand_state *random;
60022 - atomic_t sync_io; /* RAID */
60023 + atomic_unchecked_t sync_io; /* RAID */
60024 struct disk_events *ev;
60025 #ifdef CONFIG_BLK_DEV_INTEGRITY
60026 struct blk_integrity *integrity;
60027 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
60028 new file mode 100644
60029 index 0000000..8a130b6
60030 --- /dev/null
60031 +++ b/include/linux/gracl.h
60032 @@ -0,0 +1,319 @@
60033 +#ifndef GR_ACL_H
60034 +#define GR_ACL_H
60035 +
60036 +#include <linux/grdefs.h>
60037 +#include <linux/resource.h>
60038 +#include <linux/capability.h>
60039 +#include <linux/dcache.h>
60040 +#include <asm/resource.h>
60041 +
60042 +/* Major status information */
60043 +
60044 +#define GR_VERSION "grsecurity 2.9"
60045 +#define GRSECURITY_VERSION 0x2900
60046 +
60047 +enum {
60048 + GR_SHUTDOWN = 0,
60049 + GR_ENABLE = 1,
60050 + GR_SPROLE = 2,
60051 + GR_RELOAD = 3,
60052 + GR_SEGVMOD = 4,
60053 + GR_STATUS = 5,
60054 + GR_UNSPROLE = 6,
60055 + GR_PASSSET = 7,
60056 + GR_SPROLEPAM = 8,
60057 +};
60058 +
60059 +/* Password setup definitions
60060 + * kernel/grhash.c */
60061 +enum {
60062 + GR_PW_LEN = 128,
60063 + GR_SALT_LEN = 16,
60064 + GR_SHA_LEN = 32,
60065 +};
60066 +
60067 +enum {
60068 + GR_SPROLE_LEN = 64,
60069 +};
60070 +
60071 +enum {
60072 + GR_NO_GLOB = 0,
60073 + GR_REG_GLOB,
60074 + GR_CREATE_GLOB
60075 +};
60076 +
60077 +#define GR_NLIMITS 32
60078 +
60079 +/* Begin Data Structures */
60080 +
60081 +struct sprole_pw {
60082 + unsigned char *rolename;
60083 + unsigned char salt[GR_SALT_LEN];
60084 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
60085 +};
60086 +
60087 +struct name_entry {
60088 + __u32 key;
60089 + ino_t inode;
60090 + dev_t device;
60091 + char *name;
60092 + __u16 len;
60093 + __u8 deleted;
60094 + struct name_entry *prev;
60095 + struct name_entry *next;
60096 +};
60097 +
60098 +struct inodev_entry {
60099 + struct name_entry *nentry;
60100 + struct inodev_entry *prev;
60101 + struct inodev_entry *next;
60102 +};
60103 +
60104 +struct acl_role_db {
60105 + struct acl_role_label **r_hash;
60106 + __u32 r_size;
60107 +};
60108 +
60109 +struct inodev_db {
60110 + struct inodev_entry **i_hash;
60111 + __u32 i_size;
60112 +};
60113 +
60114 +struct name_db {
60115 + struct name_entry **n_hash;
60116 + __u32 n_size;
60117 +};
60118 +
60119 +struct crash_uid {
60120 + uid_t uid;
60121 + unsigned long expires;
60122 +};
60123 +
60124 +struct gr_hash_struct {
60125 + void **table;
60126 + void **nametable;
60127 + void *first;
60128 + __u32 table_size;
60129 + __u32 used_size;
60130 + int type;
60131 +};
60132 +
60133 +/* Userspace Grsecurity ACL data structures */
60134 +
60135 +struct acl_subject_label {
60136 + char *filename;
60137 + ino_t inode;
60138 + dev_t device;
60139 + __u32 mode;
60140 + kernel_cap_t cap_mask;
60141 + kernel_cap_t cap_lower;
60142 + kernel_cap_t cap_invert_audit;
60143 +
60144 + struct rlimit res[GR_NLIMITS];
60145 + __u32 resmask;
60146 +
60147 + __u8 user_trans_type;
60148 + __u8 group_trans_type;
60149 + uid_t *user_transitions;
60150 + gid_t *group_transitions;
60151 + __u16 user_trans_num;
60152 + __u16 group_trans_num;
60153 +
60154 + __u32 sock_families[2];
60155 + __u32 ip_proto[8];
60156 + __u32 ip_type;
60157 + struct acl_ip_label **ips;
60158 + __u32 ip_num;
60159 + __u32 inaddr_any_override;
60160 +
60161 + __u32 crashes;
60162 + unsigned long expires;
60163 +
60164 + struct acl_subject_label *parent_subject;
60165 + struct gr_hash_struct *hash;
60166 + struct acl_subject_label *prev;
60167 + struct acl_subject_label *next;
60168 +
60169 + struct acl_object_label **obj_hash;
60170 + __u32 obj_hash_size;
60171 + __u16 pax_flags;
60172 +};
60173 +
60174 +struct role_allowed_ip {
60175 + __u32 addr;
60176 + __u32 netmask;
60177 +
60178 + struct role_allowed_ip *prev;
60179 + struct role_allowed_ip *next;
60180 +};
60181 +
60182 +struct role_transition {
60183 + char *rolename;
60184 +
60185 + struct role_transition *prev;
60186 + struct role_transition *next;
60187 +};
60188 +
60189 +struct acl_role_label {
60190 + char *rolename;
60191 + uid_t uidgid;
60192 + __u16 roletype;
60193 +
60194 + __u16 auth_attempts;
60195 + unsigned long expires;
60196 +
60197 + struct acl_subject_label *root_label;
60198 + struct gr_hash_struct *hash;
60199 +
60200 + struct acl_role_label *prev;
60201 + struct acl_role_label *next;
60202 +
60203 + struct role_transition *transitions;
60204 + struct role_allowed_ip *allowed_ips;
60205 + uid_t *domain_children;
60206 + __u16 domain_child_num;
60207 +
60208 + umode_t umask;
60209 +
60210 + struct acl_subject_label **subj_hash;
60211 + __u32 subj_hash_size;
60212 +};
60213 +
60214 +struct user_acl_role_db {
60215 + struct acl_role_label **r_table;
60216 + __u32 num_pointers; /* Number of allocations to track */
60217 + __u32 num_roles; /* Number of roles */
60218 + __u32 num_domain_children; /* Number of domain children */
60219 + __u32 num_subjects; /* Number of subjects */
60220 + __u32 num_objects; /* Number of objects */
60221 +};
60222 +
60223 +struct acl_object_label {
60224 + char *filename;
60225 + ino_t inode;
60226 + dev_t device;
60227 + __u32 mode;
60228 +
60229 + struct acl_subject_label *nested;
60230 + struct acl_object_label *globbed;
60231 +
60232 + /* next two structures not used */
60233 +
60234 + struct acl_object_label *prev;
60235 + struct acl_object_label *next;
60236 +};
60237 +
60238 +struct acl_ip_label {
60239 + char *iface;
60240 + __u32 addr;
60241 + __u32 netmask;
60242 + __u16 low, high;
60243 + __u8 mode;
60244 + __u32 type;
60245 + __u32 proto[8];
60246 +
60247 + /* next two structures not used */
60248 +
60249 + struct acl_ip_label *prev;
60250 + struct acl_ip_label *next;
60251 +};
60252 +
60253 +struct gr_arg {
60254 + struct user_acl_role_db role_db;
60255 + unsigned char pw[GR_PW_LEN];
60256 + unsigned char salt[GR_SALT_LEN];
60257 + unsigned char sum[GR_SHA_LEN];
60258 + unsigned char sp_role[GR_SPROLE_LEN];
60259 + struct sprole_pw *sprole_pws;
60260 + dev_t segv_device;
60261 + ino_t segv_inode;
60262 + uid_t segv_uid;
60263 + __u16 num_sprole_pws;
60264 + __u16 mode;
60265 +};
60266 +
60267 +struct gr_arg_wrapper {
60268 + struct gr_arg *arg;
60269 + __u32 version;
60270 + __u32 size;
60271 +};
60272 +
60273 +struct subject_map {
60274 + struct acl_subject_label *user;
60275 + struct acl_subject_label *kernel;
60276 + struct subject_map *prev;
60277 + struct subject_map *next;
60278 +};
60279 +
60280 +struct acl_subj_map_db {
60281 + struct subject_map **s_hash;
60282 + __u32 s_size;
60283 +};
60284 +
60285 +/* End Data Structures Section */
60286 +
60287 +/* Hash functions generated by empirical testing by Brad Spengler
60288 + Makes good use of the low bits of the inode. Generally 0-1 times
60289 + in loop for successful match. 0-3 for unsuccessful match.
60290 + Shift/add algorithm with modulus of table size and an XOR*/
60291 +
60292 +static __inline__ unsigned int
60293 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
60294 +{
60295 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
60296 +}
60297 +
60298 + static __inline__ unsigned int
60299 +shash(const struct acl_subject_label *userp, const unsigned int sz)
60300 +{
60301 + return ((const unsigned long)userp % sz);
60302 +}
60303 +
60304 +static __inline__ unsigned int
60305 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
60306 +{
60307 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
60308 +}
60309 +
60310 +static __inline__ unsigned int
60311 +nhash(const char *name, const __u16 len, const unsigned int sz)
60312 +{
60313 + return full_name_hash((const unsigned char *)name, len) % sz;
60314 +}
60315 +
60316 +#define FOR_EACH_ROLE_START(role) \
60317 + role = role_list; \
60318 + while (role) {
60319 +
60320 +#define FOR_EACH_ROLE_END(role) \
60321 + role = role->prev; \
60322 + }
60323 +
60324 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
60325 + subj = NULL; \
60326 + iter = 0; \
60327 + while (iter < role->subj_hash_size) { \
60328 + if (subj == NULL) \
60329 + subj = role->subj_hash[iter]; \
60330 + if (subj == NULL) { \
60331 + iter++; \
60332 + continue; \
60333 + }
60334 +
60335 +#define FOR_EACH_SUBJECT_END(subj,iter) \
60336 + subj = subj->next; \
60337 + if (subj == NULL) \
60338 + iter++; \
60339 + }
60340 +
60341 +
60342 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
60343 + subj = role->hash->first; \
60344 + while (subj != NULL) {
60345 +
60346 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
60347 + subj = subj->next; \
60348 + }
60349 +
60350 +#endif
60351 +
60352 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
60353 new file mode 100644
60354 index 0000000..323ecf2
60355 --- /dev/null
60356 +++ b/include/linux/gralloc.h
60357 @@ -0,0 +1,9 @@
60358 +#ifndef __GRALLOC_H
60359 +#define __GRALLOC_H
60360 +
60361 +void acl_free_all(void);
60362 +int acl_alloc_stack_init(unsigned long size);
60363 +void *acl_alloc(unsigned long len);
60364 +void *acl_alloc_num(unsigned long num, unsigned long len);
60365 +
60366 +#endif
60367 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
60368 new file mode 100644
60369 index 0000000..b30e9bc
60370 --- /dev/null
60371 +++ b/include/linux/grdefs.h
60372 @@ -0,0 +1,140 @@
60373 +#ifndef GRDEFS_H
60374 +#define GRDEFS_H
60375 +
60376 +/* Begin grsecurity status declarations */
60377 +
60378 +enum {
60379 + GR_READY = 0x01,
60380 + GR_STATUS_INIT = 0x00 // disabled state
60381 +};
60382 +
60383 +/* Begin ACL declarations */
60384 +
60385 +/* Role flags */
60386 +
60387 +enum {
60388 + GR_ROLE_USER = 0x0001,
60389 + GR_ROLE_GROUP = 0x0002,
60390 + GR_ROLE_DEFAULT = 0x0004,
60391 + GR_ROLE_SPECIAL = 0x0008,
60392 + GR_ROLE_AUTH = 0x0010,
60393 + GR_ROLE_NOPW = 0x0020,
60394 + GR_ROLE_GOD = 0x0040,
60395 + GR_ROLE_LEARN = 0x0080,
60396 + GR_ROLE_TPE = 0x0100,
60397 + GR_ROLE_DOMAIN = 0x0200,
60398 + GR_ROLE_PAM = 0x0400,
60399 + GR_ROLE_PERSIST = 0x0800
60400 +};
60401 +
60402 +/* ACL Subject and Object mode flags */
60403 +enum {
60404 + GR_DELETED = 0x80000000
60405 +};
60406 +
60407 +/* ACL Object-only mode flags */
60408 +enum {
60409 + GR_READ = 0x00000001,
60410 + GR_APPEND = 0x00000002,
60411 + GR_WRITE = 0x00000004,
60412 + GR_EXEC = 0x00000008,
60413 + GR_FIND = 0x00000010,
60414 + GR_INHERIT = 0x00000020,
60415 + GR_SETID = 0x00000040,
60416 + GR_CREATE = 0x00000080,
60417 + GR_DELETE = 0x00000100,
60418 + GR_LINK = 0x00000200,
60419 + GR_AUDIT_READ = 0x00000400,
60420 + GR_AUDIT_APPEND = 0x00000800,
60421 + GR_AUDIT_WRITE = 0x00001000,
60422 + GR_AUDIT_EXEC = 0x00002000,
60423 + GR_AUDIT_FIND = 0x00004000,
60424 + GR_AUDIT_INHERIT= 0x00008000,
60425 + GR_AUDIT_SETID = 0x00010000,
60426 + GR_AUDIT_CREATE = 0x00020000,
60427 + GR_AUDIT_DELETE = 0x00040000,
60428 + GR_AUDIT_LINK = 0x00080000,
60429 + GR_PTRACERD = 0x00100000,
60430 + GR_NOPTRACE = 0x00200000,
60431 + GR_SUPPRESS = 0x00400000,
60432 + GR_NOLEARN = 0x00800000,
60433 + GR_INIT_TRANSFER= 0x01000000
60434 +};
60435 +
60436 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
60437 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
60438 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
60439 +
60440 +/* ACL subject-only mode flags */
60441 +enum {
60442 + GR_KILL = 0x00000001,
60443 + GR_VIEW = 0x00000002,
60444 + GR_PROTECTED = 0x00000004,
60445 + GR_LEARN = 0x00000008,
60446 + GR_OVERRIDE = 0x00000010,
60447 + /* just a placeholder, this mode is only used in userspace */
60448 + GR_DUMMY = 0x00000020,
60449 + GR_PROTSHM = 0x00000040,
60450 + GR_KILLPROC = 0x00000080,
60451 + GR_KILLIPPROC = 0x00000100,
60452 + /* just a placeholder, this mode is only used in userspace */
60453 + GR_NOTROJAN = 0x00000200,
60454 + GR_PROTPROCFD = 0x00000400,
60455 + GR_PROCACCT = 0x00000800,
60456 + GR_RELAXPTRACE = 0x00001000,
60457 + GR_NESTED = 0x00002000,
60458 + GR_INHERITLEARN = 0x00004000,
60459 + GR_PROCFIND = 0x00008000,
60460 + GR_POVERRIDE = 0x00010000,
60461 + GR_KERNELAUTH = 0x00020000,
60462 + GR_ATSECURE = 0x00040000,
60463 + GR_SHMEXEC = 0x00080000
60464 +};
60465 +
60466 +enum {
60467 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
60468 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
60469 + GR_PAX_ENABLE_MPROTECT = 0x0004,
60470 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
60471 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
60472 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
60473 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
60474 + GR_PAX_DISABLE_MPROTECT = 0x0400,
60475 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
60476 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
60477 +};
60478 +
60479 +enum {
60480 + GR_ID_USER = 0x01,
60481 + GR_ID_GROUP = 0x02,
60482 +};
60483 +
60484 +enum {
60485 + GR_ID_ALLOW = 0x01,
60486 + GR_ID_DENY = 0x02,
60487 +};
60488 +
60489 +#define GR_CRASH_RES 31
60490 +#define GR_UIDTABLE_MAX 500
60491 +
60492 +/* begin resource learning section */
60493 +enum {
60494 + GR_RLIM_CPU_BUMP = 60,
60495 + GR_RLIM_FSIZE_BUMP = 50000,
60496 + GR_RLIM_DATA_BUMP = 10000,
60497 + GR_RLIM_STACK_BUMP = 1000,
60498 + GR_RLIM_CORE_BUMP = 10000,
60499 + GR_RLIM_RSS_BUMP = 500000,
60500 + GR_RLIM_NPROC_BUMP = 1,
60501 + GR_RLIM_NOFILE_BUMP = 5,
60502 + GR_RLIM_MEMLOCK_BUMP = 50000,
60503 + GR_RLIM_AS_BUMP = 500000,
60504 + GR_RLIM_LOCKS_BUMP = 2,
60505 + GR_RLIM_SIGPENDING_BUMP = 5,
60506 + GR_RLIM_MSGQUEUE_BUMP = 10000,
60507 + GR_RLIM_NICE_BUMP = 1,
60508 + GR_RLIM_RTPRIO_BUMP = 1,
60509 + GR_RLIM_RTTIME_BUMP = 1000000
60510 +};
60511 +
60512 +#endif
60513 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
60514 new file mode 100644
60515 index 0000000..da390f1
60516 --- /dev/null
60517 +++ b/include/linux/grinternal.h
60518 @@ -0,0 +1,221 @@
60519 +#ifndef __GRINTERNAL_H
60520 +#define __GRINTERNAL_H
60521 +
60522 +#ifdef CONFIG_GRKERNSEC
60523 +
60524 +#include <linux/fs.h>
60525 +#include <linux/mnt_namespace.h>
60526 +#include <linux/nsproxy.h>
60527 +#include <linux/gracl.h>
60528 +#include <linux/grdefs.h>
60529 +#include <linux/grmsg.h>
60530 +
60531 +void gr_add_learn_entry(const char *fmt, ...)
60532 + __attribute__ ((format (printf, 1, 2)));
60533 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
60534 + const struct vfsmount *mnt);
60535 +__u32 gr_check_create(const struct dentry *new_dentry,
60536 + const struct dentry *parent,
60537 + const struct vfsmount *mnt, const __u32 mode);
60538 +int gr_check_protected_task(const struct task_struct *task);
60539 +__u32 to_gr_audit(const __u32 reqmode);
60540 +int gr_set_acls(const int type);
60541 +int gr_apply_subject_to_task(struct task_struct *task);
60542 +int gr_acl_is_enabled(void);
60543 +char gr_roletype_to_char(void);
60544 +
60545 +void gr_handle_alertkill(struct task_struct *task);
60546 +char *gr_to_filename(const struct dentry *dentry,
60547 + const struct vfsmount *mnt);
60548 +char *gr_to_filename1(const struct dentry *dentry,
60549 + const struct vfsmount *mnt);
60550 +char *gr_to_filename2(const struct dentry *dentry,
60551 + const struct vfsmount *mnt);
60552 +char *gr_to_filename3(const struct dentry *dentry,
60553 + const struct vfsmount *mnt);
60554 +
60555 +extern int grsec_enable_ptrace_readexec;
60556 +extern int grsec_enable_harden_ptrace;
60557 +extern int grsec_enable_link;
60558 +extern int grsec_enable_fifo;
60559 +extern int grsec_enable_execve;
60560 +extern int grsec_enable_shm;
60561 +extern int grsec_enable_execlog;
60562 +extern int grsec_enable_signal;
60563 +extern int grsec_enable_audit_ptrace;
60564 +extern int grsec_enable_forkfail;
60565 +extern int grsec_enable_time;
60566 +extern int grsec_enable_rofs;
60567 +extern int grsec_enable_chroot_shmat;
60568 +extern int grsec_enable_chroot_mount;
60569 +extern int grsec_enable_chroot_double;
60570 +extern int grsec_enable_chroot_pivot;
60571 +extern int grsec_enable_chroot_chdir;
60572 +extern int grsec_enable_chroot_chmod;
60573 +extern int grsec_enable_chroot_mknod;
60574 +extern int grsec_enable_chroot_fchdir;
60575 +extern int grsec_enable_chroot_nice;
60576 +extern int grsec_enable_chroot_execlog;
60577 +extern int grsec_enable_chroot_caps;
60578 +extern int grsec_enable_chroot_sysctl;
60579 +extern int grsec_enable_chroot_unix;
60580 +extern int grsec_enable_tpe;
60581 +extern int grsec_tpe_gid;
60582 +extern int grsec_enable_tpe_all;
60583 +extern int grsec_enable_tpe_invert;
60584 +extern int grsec_enable_socket_all;
60585 +extern int grsec_socket_all_gid;
60586 +extern int grsec_enable_socket_client;
60587 +extern int grsec_socket_client_gid;
60588 +extern int grsec_enable_socket_server;
60589 +extern int grsec_socket_server_gid;
60590 +extern int grsec_audit_gid;
60591 +extern int grsec_enable_group;
60592 +extern int grsec_enable_audit_textrel;
60593 +extern int grsec_enable_log_rwxmaps;
60594 +extern int grsec_enable_mount;
60595 +extern int grsec_enable_chdir;
60596 +extern int grsec_resource_logging;
60597 +extern int grsec_enable_blackhole;
60598 +extern int grsec_lastack_retries;
60599 +extern int grsec_enable_brute;
60600 +extern int grsec_lock;
60601 +
60602 +extern spinlock_t grsec_alert_lock;
60603 +extern unsigned long grsec_alert_wtime;
60604 +extern unsigned long grsec_alert_fyet;
60605 +
60606 +extern spinlock_t grsec_audit_lock;
60607 +
60608 +extern rwlock_t grsec_exec_file_lock;
60609 +
60610 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
60611 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
60612 + (tsk)->exec_file->f_vfsmnt) : "/")
60613 +
60614 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
60615 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
60616 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
60617 +
60618 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
60619 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
60620 + (tsk)->exec_file->f_vfsmnt) : "/")
60621 +
60622 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
60623 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
60624 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
60625 +
60626 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
60627 +
60628 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
60629 +
60630 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
60631 + (task)->pid, (cred)->uid, \
60632 + (cred)->euid, (cred)->gid, (cred)->egid, \
60633 + gr_parent_task_fullpath(task), \
60634 + (task)->real_parent->comm, (task)->real_parent->pid, \
60635 + (pcred)->uid, (pcred)->euid, \
60636 + (pcred)->gid, (pcred)->egid
60637 +
60638 +#define GR_CHROOT_CAPS {{ \
60639 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
60640 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
60641 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
60642 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
60643 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
60644 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
60645 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
60646 +
60647 +#define security_learn(normal_msg,args...) \
60648 +({ \
60649 + read_lock(&grsec_exec_file_lock); \
60650 + gr_add_learn_entry(normal_msg "\n", ## args); \
60651 + read_unlock(&grsec_exec_file_lock); \
60652 +})
60653 +
60654 +enum {
60655 + GR_DO_AUDIT,
60656 + GR_DONT_AUDIT,
60657 + /* used for non-audit messages that we shouldn't kill the task on */
60658 + GR_DONT_AUDIT_GOOD
60659 +};
60660 +
60661 +enum {
60662 + GR_TTYSNIFF,
60663 + GR_RBAC,
60664 + GR_RBAC_STR,
60665 + GR_STR_RBAC,
60666 + GR_RBAC_MODE2,
60667 + GR_RBAC_MODE3,
60668 + GR_FILENAME,
60669 + GR_SYSCTL_HIDDEN,
60670 + GR_NOARGS,
60671 + GR_ONE_INT,
60672 + GR_ONE_INT_TWO_STR,
60673 + GR_ONE_STR,
60674 + GR_STR_INT,
60675 + GR_TWO_STR_INT,
60676 + GR_TWO_INT,
60677 + GR_TWO_U64,
60678 + GR_THREE_INT,
60679 + GR_FIVE_INT_TWO_STR,
60680 + GR_TWO_STR,
60681 + GR_THREE_STR,
60682 + GR_FOUR_STR,
60683 + GR_STR_FILENAME,
60684 + GR_FILENAME_STR,
60685 + GR_FILENAME_TWO_INT,
60686 + GR_FILENAME_TWO_INT_STR,
60687 + GR_TEXTREL,
60688 + GR_PTRACE,
60689 + GR_RESOURCE,
60690 + GR_CAP,
60691 + GR_SIG,
60692 + GR_SIG2,
60693 + GR_CRASH1,
60694 + GR_CRASH2,
60695 + GR_PSACCT,
60696 + GR_RWXMAP
60697 +};
60698 +
60699 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
60700 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
60701 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
60702 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
60703 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
60704 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
60705 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
60706 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
60707 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
60708 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
60709 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
60710 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
60711 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
60712 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
60713 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
60714 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
60715 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
60716 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
60717 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
60718 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
60719 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
60720 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
60721 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
60722 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
60723 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
60724 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
60725 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
60726 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
60727 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
60728 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
60729 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
60730 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
60731 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
60732 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
60733 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
60734 +
60735 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
60736 +
60737 +#endif
60738 +
60739 +#endif
60740 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
60741 new file mode 100644
60742 index 0000000..ae576a1
60743 --- /dev/null
60744 +++ b/include/linux/grmsg.h
60745 @@ -0,0 +1,109 @@
60746 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
60747 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
60748 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
60749 +#define GR_STOPMOD_MSG "denied modification of module state by "
60750 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
60751 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
60752 +#define GR_IOPERM_MSG "denied use of ioperm() by "
60753 +#define GR_IOPL_MSG "denied use of iopl() by "
60754 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
60755 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
60756 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
60757 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
60758 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
60759 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
60760 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
60761 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
60762 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
60763 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
60764 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
60765 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
60766 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
60767 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
60768 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
60769 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
60770 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
60771 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
60772 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
60773 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
60774 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
60775 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
60776 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
60777 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
60778 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
60779 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
60780 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
60781 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
60782 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
60783 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
60784 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
60785 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
60786 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
60787 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
60788 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
60789 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
60790 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
60791 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
60792 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
60793 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
60794 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
60795 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
60796 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
60797 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
60798 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
60799 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
60800 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
60801 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
60802 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
60803 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
60804 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
60805 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
60806 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
60807 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
60808 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
60809 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
60810 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
60811 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
60812 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
60813 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
60814 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
60815 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
60816 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
60817 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
60818 +#define GR_NICE_CHROOT_MSG "denied priority change by "
60819 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
60820 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
60821 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
60822 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
60823 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
60824 +#define GR_TIME_MSG "time set by "
60825 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
60826 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
60827 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
60828 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
60829 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
60830 +#define GR_BIND_MSG "denied bind() by "
60831 +#define GR_CONNECT_MSG "denied connect() by "
60832 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
60833 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
60834 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
60835 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
60836 +#define GR_CAP_ACL_MSG "use of %s denied for "
60837 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
60838 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
60839 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
60840 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
60841 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
60842 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
60843 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
60844 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
60845 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
60846 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
60847 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
60848 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
60849 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
60850 +#define GR_VM86_MSG "denied use of vm86 by "
60851 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
60852 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
60853 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
60854 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
60855 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
60856 new file mode 100644
60857 index 0000000..2ccf677
60858 --- /dev/null
60859 +++ b/include/linux/grsecurity.h
60860 @@ -0,0 +1,229 @@
60861 +#ifndef GR_SECURITY_H
60862 +#define GR_SECURITY_H
60863 +#include <linux/fs.h>
60864 +#include <linux/fs_struct.h>
60865 +#include <linux/binfmts.h>
60866 +#include <linux/gracl.h>
60867 +
60868 +/* notify of brain-dead configs */
60869 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60870 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
60871 +#endif
60872 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
60873 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
60874 +#endif
60875 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
60876 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
60877 +#endif
60878 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
60879 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
60880 +#endif
60881 +
60882 +#include <linux/compat.h>
60883 +
60884 +struct user_arg_ptr {
60885 +#ifdef CONFIG_COMPAT
60886 + bool is_compat;
60887 +#endif
60888 + union {
60889 + const char __user *const __user *native;
60890 +#ifdef CONFIG_COMPAT
60891 + compat_uptr_t __user *compat;
60892 +#endif
60893 + } ptr;
60894 +};
60895 +
60896 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
60897 +void gr_handle_brute_check(void);
60898 +void gr_handle_kernel_exploit(void);
60899 +int gr_process_user_ban(void);
60900 +
60901 +char gr_roletype_to_char(void);
60902 +
60903 +int gr_acl_enable_at_secure(void);
60904 +
60905 +int gr_check_user_change(int real, int effective, int fs);
60906 +int gr_check_group_change(int real, int effective, int fs);
60907 +
60908 +void gr_del_task_from_ip_table(struct task_struct *p);
60909 +
60910 +int gr_pid_is_chrooted(struct task_struct *p);
60911 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
60912 +int gr_handle_chroot_nice(void);
60913 +int gr_handle_chroot_sysctl(const int op);
60914 +int gr_handle_chroot_setpriority(struct task_struct *p,
60915 + const int niceval);
60916 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
60917 +int gr_handle_chroot_chroot(const struct dentry *dentry,
60918 + const struct vfsmount *mnt);
60919 +void gr_handle_chroot_chdir(struct path *path);
60920 +int gr_handle_chroot_chmod(const struct dentry *dentry,
60921 + const struct vfsmount *mnt, const int mode);
60922 +int gr_handle_chroot_mknod(const struct dentry *dentry,
60923 + const struct vfsmount *mnt, const int mode);
60924 +int gr_handle_chroot_mount(const struct dentry *dentry,
60925 + const struct vfsmount *mnt,
60926 + const char *dev_name);
60927 +int gr_handle_chroot_pivot(void);
60928 +int gr_handle_chroot_unix(const pid_t pid);
60929 +
60930 +int gr_handle_rawio(const struct inode *inode);
60931 +
60932 +void gr_handle_ioperm(void);
60933 +void gr_handle_iopl(void);
60934 +
60935 +umode_t gr_acl_umask(void);
60936 +
60937 +int gr_tpe_allow(const struct file *file);
60938 +
60939 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
60940 +void gr_clear_chroot_entries(struct task_struct *task);
60941 +
60942 +void gr_log_forkfail(const int retval);
60943 +void gr_log_timechange(void);
60944 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
60945 +void gr_log_chdir(const struct dentry *dentry,
60946 + const struct vfsmount *mnt);
60947 +void gr_log_chroot_exec(const struct dentry *dentry,
60948 + const struct vfsmount *mnt);
60949 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
60950 +void gr_log_remount(const char *devname, const int retval);
60951 +void gr_log_unmount(const char *devname, const int retval);
60952 +void gr_log_mount(const char *from, const char *to, const int retval);
60953 +void gr_log_textrel(struct vm_area_struct *vma);
60954 +void gr_log_rwxmmap(struct file *file);
60955 +void gr_log_rwxmprotect(struct file *file);
60956 +
60957 +int gr_handle_follow_link(const struct inode *parent,
60958 + const struct inode *inode,
60959 + const struct dentry *dentry,
60960 + const struct vfsmount *mnt);
60961 +int gr_handle_fifo(const struct dentry *dentry,
60962 + const struct vfsmount *mnt,
60963 + const struct dentry *dir, const int flag,
60964 + const int acc_mode);
60965 +int gr_handle_hardlink(const struct dentry *dentry,
60966 + const struct vfsmount *mnt,
60967 + struct inode *inode,
60968 + const int mode, const char *to);
60969 +
60970 +int gr_is_capable(const int cap);
60971 +int gr_is_capable_nolog(const int cap);
60972 +void gr_learn_resource(const struct task_struct *task, const int limit,
60973 + const unsigned long wanted, const int gt);
60974 +void gr_copy_label(struct task_struct *tsk);
60975 +void gr_handle_crash(struct task_struct *task, const int sig);
60976 +int gr_handle_signal(const struct task_struct *p, const int sig);
60977 +int gr_check_crash_uid(const uid_t uid);
60978 +int gr_check_protected_task(const struct task_struct *task);
60979 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
60980 +int gr_acl_handle_mmap(const struct file *file,
60981 + const unsigned long prot);
60982 +int gr_acl_handle_mprotect(const struct file *file,
60983 + const unsigned long prot);
60984 +int gr_check_hidden_task(const struct task_struct *tsk);
60985 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
60986 + const struct vfsmount *mnt);
60987 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
60988 + const struct vfsmount *mnt);
60989 +__u32 gr_acl_handle_access(const struct dentry *dentry,
60990 + const struct vfsmount *mnt, const int fmode);
60991 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
60992 + const struct vfsmount *mnt, umode_t *mode);
60993 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
60994 + const struct vfsmount *mnt);
60995 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
60996 + const struct vfsmount *mnt);
60997 +int gr_handle_ptrace(struct task_struct *task, const long request);
60998 +int gr_handle_proc_ptrace(struct task_struct *task);
60999 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
61000 + const struct vfsmount *mnt);
61001 +int gr_check_crash_exec(const struct file *filp);
61002 +int gr_acl_is_enabled(void);
61003 +void gr_set_kernel_label(struct task_struct *task);
61004 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
61005 + const gid_t gid);
61006 +int gr_set_proc_label(const struct dentry *dentry,
61007 + const struct vfsmount *mnt,
61008 + const int unsafe_flags);
61009 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
61010 + const struct vfsmount *mnt);
61011 +__u32 gr_acl_handle_open(const struct dentry *dentry,
61012 + const struct vfsmount *mnt, int acc_mode);
61013 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
61014 + const struct dentry *p_dentry,
61015 + const struct vfsmount *p_mnt,
61016 + int open_flags, int acc_mode, const int imode);
61017 +void gr_handle_create(const struct dentry *dentry,
61018 + const struct vfsmount *mnt);
61019 +void gr_handle_proc_create(const struct dentry *dentry,
61020 + const struct inode *inode);
61021 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
61022 + const struct dentry *parent_dentry,
61023 + const struct vfsmount *parent_mnt,
61024 + const int mode);
61025 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
61026 + const struct dentry *parent_dentry,
61027 + const struct vfsmount *parent_mnt);
61028 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
61029 + const struct vfsmount *mnt);
61030 +void gr_handle_delete(const ino_t ino, const dev_t dev);
61031 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
61032 + const struct vfsmount *mnt);
61033 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
61034 + const struct dentry *parent_dentry,
61035 + const struct vfsmount *parent_mnt,
61036 + const char *from);
61037 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
61038 + const struct dentry *parent_dentry,
61039 + const struct vfsmount *parent_mnt,
61040 + const struct dentry *old_dentry,
61041 + const struct vfsmount *old_mnt, const char *to);
61042 +int gr_acl_handle_rename(struct dentry *new_dentry,
61043 + struct dentry *parent_dentry,
61044 + const struct vfsmount *parent_mnt,
61045 + struct dentry *old_dentry,
61046 + struct inode *old_parent_inode,
61047 + struct vfsmount *old_mnt, const char *newname);
61048 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61049 + struct dentry *old_dentry,
61050 + struct dentry *new_dentry,
61051 + struct vfsmount *mnt, const __u8 replace);
61052 +__u32 gr_check_link(const struct dentry *new_dentry,
61053 + const struct dentry *parent_dentry,
61054 + const struct vfsmount *parent_mnt,
61055 + const struct dentry *old_dentry,
61056 + const struct vfsmount *old_mnt);
61057 +int gr_acl_handle_filldir(const struct file *file, const char *name,
61058 + const unsigned int namelen, const ino_t ino);
61059 +
61060 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
61061 + const struct vfsmount *mnt);
61062 +void gr_acl_handle_exit(void);
61063 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
61064 +int gr_acl_handle_procpidmem(const struct task_struct *task);
61065 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
61066 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
61067 +void gr_audit_ptrace(struct task_struct *task);
61068 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
61069 +
61070 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
61071 +
61072 +#ifdef CONFIG_GRKERNSEC
61073 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
61074 +void gr_handle_vm86(void);
61075 +void gr_handle_mem_readwrite(u64 from, u64 to);
61076 +
61077 +void gr_log_badprocpid(const char *entry);
61078 +
61079 +extern int grsec_enable_dmesg;
61080 +extern int grsec_disable_privio;
61081 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61082 +extern int grsec_enable_chroot_findtask;
61083 +#endif
61084 +#ifdef CONFIG_GRKERNSEC_SETXID
61085 +extern int grsec_enable_setxid;
61086 +#endif
61087 +#endif
61088 +
61089 +#endif
61090 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
61091 new file mode 100644
61092 index 0000000..e7ffaaf
61093 --- /dev/null
61094 +++ b/include/linux/grsock.h
61095 @@ -0,0 +1,19 @@
61096 +#ifndef __GRSOCK_H
61097 +#define __GRSOCK_H
61098 +
61099 +extern void gr_attach_curr_ip(const struct sock *sk);
61100 +extern int gr_handle_sock_all(const int family, const int type,
61101 + const int protocol);
61102 +extern int gr_handle_sock_server(const struct sockaddr *sck);
61103 +extern int gr_handle_sock_server_other(const struct sock *sck);
61104 +extern int gr_handle_sock_client(const struct sockaddr *sck);
61105 +extern int gr_search_connect(struct socket * sock,
61106 + struct sockaddr_in * addr);
61107 +extern int gr_search_bind(struct socket * sock,
61108 + struct sockaddr_in * addr);
61109 +extern int gr_search_listen(struct socket * sock);
61110 +extern int gr_search_accept(struct socket * sock);
61111 +extern int gr_search_socket(const int domain, const int type,
61112 + const int protocol);
61113 +
61114 +#endif
61115 diff --git a/include/linux/hid.h b/include/linux/hid.h
61116 index c235e4e..f0cf7a0 100644
61117 --- a/include/linux/hid.h
61118 +++ b/include/linux/hid.h
61119 @@ -679,7 +679,7 @@ struct hid_ll_driver {
61120 unsigned int code, int value);
61121
61122 int (*parse)(struct hid_device *hdev);
61123 -};
61124 +} __no_const;
61125
61126 #define PM_HINT_FULLON 1<<5
61127 #define PM_HINT_NORMAL 1<<1
61128 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
61129 index 3a93f73..b19d0b3 100644
61130 --- a/include/linux/highmem.h
61131 +++ b/include/linux/highmem.h
61132 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
61133 kunmap_atomic(kaddr, KM_USER0);
61134 }
61135
61136 +static inline void sanitize_highpage(struct page *page)
61137 +{
61138 + void *kaddr;
61139 + unsigned long flags;
61140 +
61141 + local_irq_save(flags);
61142 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
61143 + clear_page(kaddr);
61144 + kunmap_atomic(kaddr, KM_CLEARPAGE);
61145 + local_irq_restore(flags);
61146 +}
61147 +
61148 static inline void zero_user_segments(struct page *page,
61149 unsigned start1, unsigned end1,
61150 unsigned start2, unsigned end2)
61151 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
61152 index 07d103a..04ec65b 100644
61153 --- a/include/linux/i2c.h
61154 +++ b/include/linux/i2c.h
61155 @@ -364,6 +364,7 @@ struct i2c_algorithm {
61156 /* To determine what the adapter supports */
61157 u32 (*functionality) (struct i2c_adapter *);
61158 };
61159 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
61160
61161 /*
61162 * i2c_adapter is the structure used to identify a physical i2c bus along
61163 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
61164 index a6deef4..c56a7f2 100644
61165 --- a/include/linux/i2o.h
61166 +++ b/include/linux/i2o.h
61167 @@ -564,7 +564,7 @@ struct i2o_controller {
61168 struct i2o_device *exec; /* Executive */
61169 #if BITS_PER_LONG == 64
61170 spinlock_t context_list_lock; /* lock for context_list */
61171 - atomic_t context_list_counter; /* needed for unique contexts */
61172 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
61173 struct list_head context_list; /* list of context id's
61174 and pointers */
61175 #endif
61176 diff --git a/include/linux/init.h b/include/linux/init.h
61177 index 9146f39..885354d 100644
61178 --- a/include/linux/init.h
61179 +++ b/include/linux/init.h
61180 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cmdline);
61181
61182 /* Each module must use one module_init(). */
61183 #define module_init(initfn) \
61184 - static inline initcall_t __inittest(void) \
61185 + static inline __used initcall_t __inittest(void) \
61186 { return initfn; } \
61187 int init_module(void) __attribute__((alias(#initfn)));
61188
61189 /* This is only required if you want to be unloadable. */
61190 #define module_exit(exitfn) \
61191 - static inline exitcall_t __exittest(void) \
61192 + static inline __used exitcall_t __exittest(void) \
61193 { return exitfn; } \
61194 void cleanup_module(void) __attribute__((alias(#exitfn)));
61195
61196 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
61197 index 32574ee..00d4ef1 100644
61198 --- a/include/linux/init_task.h
61199 +++ b/include/linux/init_task.h
61200 @@ -128,6 +128,12 @@ extern struct cred init_cred;
61201
61202 #define INIT_TASK_COMM "swapper"
61203
61204 +#ifdef CONFIG_X86
61205 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
61206 +#else
61207 +#define INIT_TASK_THREAD_INFO
61208 +#endif
61209 +
61210 /*
61211 * INIT_TASK is used to set up the first task table, touch at
61212 * your own risk!. Base=0, limit=0x1fffff (=2MB)
61213 @@ -166,6 +172,7 @@ extern struct cred init_cred;
61214 RCU_INIT_POINTER(.cred, &init_cred), \
61215 .comm = INIT_TASK_COMM, \
61216 .thread = INIT_THREAD, \
61217 + INIT_TASK_THREAD_INFO \
61218 .fs = &init_fs, \
61219 .files = &init_files, \
61220 .signal = &init_signals, \
61221 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
61222 index e6ca56d..8583707 100644
61223 --- a/include/linux/intel-iommu.h
61224 +++ b/include/linux/intel-iommu.h
61225 @@ -296,7 +296,7 @@ struct iommu_flush {
61226 u8 fm, u64 type);
61227 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
61228 unsigned int size_order, u64 type);
61229 -};
61230 +} __no_const;
61231
61232 enum {
61233 SR_DMAR_FECTL_REG,
61234 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
61235 index a64b00e..464d8bc 100644
61236 --- a/include/linux/interrupt.h
61237 +++ b/include/linux/interrupt.h
61238 @@ -441,7 +441,7 @@ enum
61239 /* map softirq index to softirq name. update 'softirq_to_name' in
61240 * kernel/softirq.c when adding a new softirq.
61241 */
61242 -extern char *softirq_to_name[NR_SOFTIRQS];
61243 +extern const char * const softirq_to_name[NR_SOFTIRQS];
61244
61245 /* softirq mask and active fields moved to irq_cpustat_t in
61246 * asm/hardirq.h to get better cache usage. KAO
61247 @@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
61248
61249 struct softirq_action
61250 {
61251 - void (*action)(struct softirq_action *);
61252 + void (*action)(void);
61253 };
61254
61255 asmlinkage void do_softirq(void);
61256 asmlinkage void __do_softirq(void);
61257 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
61258 +extern void open_softirq(int nr, void (*action)(void));
61259 extern void softirq_init(void);
61260 static inline void __raise_softirq_irqoff(unsigned int nr)
61261 {
61262 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
61263 index 3875719..4cd454c 100644
61264 --- a/include/linux/kallsyms.h
61265 +++ b/include/linux/kallsyms.h
61266 @@ -15,7 +15,8 @@
61267
61268 struct module;
61269
61270 -#ifdef CONFIG_KALLSYMS
61271 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
61272 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
61273 /* Lookup the address for a symbol. Returns 0 if not found. */
61274 unsigned long kallsyms_lookup_name(const char *name);
61275
61276 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
61277 /* Stupid that this does nothing, but I didn't create this mess. */
61278 #define __print_symbol(fmt, addr)
61279 #endif /*CONFIG_KALLSYMS*/
61280 +#else /* when included by kallsyms.c, vsnprintf.c, or
61281 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
61282 +extern void __print_symbol(const char *fmt, unsigned long address);
61283 +extern int sprint_backtrace(char *buffer, unsigned long address);
61284 +extern int sprint_symbol(char *buffer, unsigned long address);
61285 +const char *kallsyms_lookup(unsigned long addr,
61286 + unsigned long *symbolsize,
61287 + unsigned long *offset,
61288 + char **modname, char *namebuf);
61289 +#endif
61290
61291 /* This macro allows us to keep printk typechecking */
61292 static __printf(1, 2)
61293 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
61294 index fa39183..40160be 100644
61295 --- a/include/linux/kgdb.h
61296 +++ b/include/linux/kgdb.h
61297 @@ -53,7 +53,7 @@ extern int kgdb_connected;
61298 extern int kgdb_io_module_registered;
61299
61300 extern atomic_t kgdb_setting_breakpoint;
61301 -extern atomic_t kgdb_cpu_doing_single_step;
61302 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
61303
61304 extern struct task_struct *kgdb_usethread;
61305 extern struct task_struct *kgdb_contthread;
61306 @@ -251,7 +251,7 @@ struct kgdb_arch {
61307 void (*disable_hw_break)(struct pt_regs *regs);
61308 void (*remove_all_hw_break)(void);
61309 void (*correct_hw_break)(void);
61310 -};
61311 +} __do_const;
61312
61313 /**
61314 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
61315 @@ -276,7 +276,7 @@ struct kgdb_io {
61316 void (*pre_exception) (void);
61317 void (*post_exception) (void);
61318 int is_console;
61319 -};
61320 +} __do_const;
61321
61322 extern struct kgdb_arch arch_kgdb_ops;
61323
61324 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
61325 index b16f653..eb908f4 100644
61326 --- a/include/linux/kmod.h
61327 +++ b/include/linux/kmod.h
61328 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
61329 * usually useless though. */
61330 extern __printf(2, 3)
61331 int __request_module(bool wait, const char *name, ...);
61332 +extern __printf(3, 4)
61333 +int ___request_module(bool wait, char *param_name, const char *name, ...);
61334 #define request_module(mod...) __request_module(true, mod)
61335 #define request_module_nowait(mod...) __request_module(false, mod)
61336 #define try_then_request_module(x, mod...) \
61337 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
61338 index d526231..086e89b 100644
61339 --- a/include/linux/kvm_host.h
61340 +++ b/include/linux/kvm_host.h
61341 @@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
61342 void vcpu_load(struct kvm_vcpu *vcpu);
61343 void vcpu_put(struct kvm_vcpu *vcpu);
61344
61345 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61346 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61347 struct module *module);
61348 void kvm_exit(void);
61349
61350 @@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
61351 struct kvm_guest_debug *dbg);
61352 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
61353
61354 -int kvm_arch_init(void *opaque);
61355 +int kvm_arch_init(const void *opaque);
61356 void kvm_arch_exit(void);
61357
61358 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
61359 diff --git a/include/linux/libata.h b/include/linux/libata.h
61360 index cafc09a..d7e7829 100644
61361 --- a/include/linux/libata.h
61362 +++ b/include/linux/libata.h
61363 @@ -909,7 +909,7 @@ struct ata_port_operations {
61364 * fields must be pointers.
61365 */
61366 const struct ata_port_operations *inherits;
61367 -};
61368 +} __do_const;
61369
61370 struct ata_port_info {
61371 unsigned long flags;
61372 diff --git a/include/linux/mca.h b/include/linux/mca.h
61373 index 3797270..7765ede 100644
61374 --- a/include/linux/mca.h
61375 +++ b/include/linux/mca.h
61376 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
61377 int region);
61378 void * (*mca_transform_memory)(struct mca_device *,
61379 void *memory);
61380 -};
61381 +} __no_const;
61382
61383 struct mca_bus {
61384 u64 default_dma_mask;
61385 diff --git a/include/linux/memory.h b/include/linux/memory.h
61386 index 935699b..11042cc 100644
61387 --- a/include/linux/memory.h
61388 +++ b/include/linux/memory.h
61389 @@ -144,7 +144,7 @@ struct memory_accessor {
61390 size_t count);
61391 ssize_t (*write)(struct memory_accessor *, const char *buf,
61392 off_t offset, size_t count);
61393 -};
61394 +} __no_const;
61395
61396 /*
61397 * Kernel text modification mutex, used for code patching. Users of this lock
61398 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
61399 index 9970337..9444122 100644
61400 --- a/include/linux/mfd/abx500.h
61401 +++ b/include/linux/mfd/abx500.h
61402 @@ -188,6 +188,7 @@ struct abx500_ops {
61403 int (*event_registers_startup_state_get) (struct device *, u8 *);
61404 int (*startup_irq_enabled) (struct device *, unsigned int);
61405 };
61406 +typedef struct abx500_ops __no_const abx500_ops_no_const;
61407
61408 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
61409 void abx500_remove_ops(struct device *dev);
61410 diff --git a/include/linux/mm.h b/include/linux/mm.h
61411 index 4baadd1..2e0b45e 100644
61412 --- a/include/linux/mm.h
61413 +++ b/include/linux/mm.h
61414 @@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
61415
61416 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
61417 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
61418 +
61419 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61420 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
61421 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
61422 +#else
61423 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
61424 +#endif
61425 +
61426 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
61427 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
61428
61429 @@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page);
61430 int set_page_dirty_lock(struct page *page);
61431 int clear_page_dirty_for_io(struct page *page);
61432
61433 -/* Is the vma a continuation of the stack vma above it? */
61434 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
61435 -{
61436 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
61437 -}
61438 -
61439 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
61440 - unsigned long addr)
61441 -{
61442 - return (vma->vm_flags & VM_GROWSDOWN) &&
61443 - (vma->vm_start == addr) &&
61444 - !vma_growsdown(vma->vm_prev, addr);
61445 -}
61446 -
61447 -/* Is the vma a continuation of the stack vma below it? */
61448 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
61449 -{
61450 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
61451 -}
61452 -
61453 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
61454 - unsigned long addr)
61455 -{
61456 - return (vma->vm_flags & VM_GROWSUP) &&
61457 - (vma->vm_end == addr) &&
61458 - !vma_growsup(vma->vm_next, addr);
61459 -}
61460 -
61461 extern unsigned long move_page_tables(struct vm_area_struct *vma,
61462 unsigned long old_addr, struct vm_area_struct *new_vma,
61463 unsigned long new_addr, unsigned long len);
61464 @@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
61465 }
61466 #endif
61467
61468 +#ifdef CONFIG_MMU
61469 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
61470 +#else
61471 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
61472 +{
61473 + return __pgprot(0);
61474 +}
61475 +#endif
61476 +
61477 int vma_wants_writenotify(struct vm_area_struct *vma);
61478
61479 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
61480 @@ -1419,6 +1407,7 @@ out:
61481 }
61482
61483 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
61484 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
61485
61486 extern unsigned long do_brk(unsigned long, unsigned long);
61487
61488 @@ -1476,6 +1465,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
61489 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
61490 struct vm_area_struct **pprev);
61491
61492 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
61493 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
61494 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
61495 +
61496 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
61497 NULL if none. Assume start_addr < end_addr. */
61498 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
61499 @@ -1492,15 +1485,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
61500 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
61501 }
61502
61503 -#ifdef CONFIG_MMU
61504 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
61505 -#else
61506 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
61507 -{
61508 - return __pgprot(0);
61509 -}
61510 -#endif
61511 -
61512 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
61513 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
61514 unsigned long pfn, unsigned long size, pgprot_t);
61515 @@ -1614,7 +1598,7 @@ extern int unpoison_memory(unsigned long pfn);
61516 extern int sysctl_memory_failure_early_kill;
61517 extern int sysctl_memory_failure_recovery;
61518 extern void shake_page(struct page *p, int access);
61519 -extern atomic_long_t mce_bad_pages;
61520 +extern atomic_long_unchecked_t mce_bad_pages;
61521 extern int soft_offline_page(struct page *page, int flags);
61522
61523 extern void dump_page(struct page *page);
61524 @@ -1628,5 +1612,11 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
61525 unsigned int pages_per_huge_page);
61526 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
61527
61528 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
61529 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
61530 +#else
61531 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
61532 +#endif
61533 +
61534 #endif /* __KERNEL__ */
61535 #endif /* _LINUX_MM_H */
61536 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
61537 index 5b42f1b..759e4b4 100644
61538 --- a/include/linux/mm_types.h
61539 +++ b/include/linux/mm_types.h
61540 @@ -253,6 +253,8 @@ struct vm_area_struct {
61541 #ifdef CONFIG_NUMA
61542 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
61543 #endif
61544 +
61545 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
61546 };
61547
61548 struct core_thread {
61549 @@ -389,6 +391,24 @@ struct mm_struct {
61550 #ifdef CONFIG_CPUMASK_OFFSTACK
61551 struct cpumask cpumask_allocation;
61552 #endif
61553 +
61554 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
61555 + unsigned long pax_flags;
61556 +#endif
61557 +
61558 +#ifdef CONFIG_PAX_DLRESOLVE
61559 + unsigned long call_dl_resolve;
61560 +#endif
61561 +
61562 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
61563 + unsigned long call_syscall;
61564 +#endif
61565 +
61566 +#ifdef CONFIG_PAX_ASLR
61567 + unsigned long delta_mmap; /* randomized offset */
61568 + unsigned long delta_stack; /* randomized offset */
61569 +#endif
61570 +
61571 };
61572
61573 static inline void mm_init_cpumask(struct mm_struct *mm)
61574 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
61575 index 1d1b1e1..2a13c78 100644
61576 --- a/include/linux/mmu_notifier.h
61577 +++ b/include/linux/mmu_notifier.h
61578 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
61579 */
61580 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
61581 ({ \
61582 - pte_t __pte; \
61583 + pte_t ___pte; \
61584 struct vm_area_struct *___vma = __vma; \
61585 unsigned long ___address = __address; \
61586 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
61587 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
61588 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
61589 - __pte; \
61590 + ___pte; \
61591 })
61592
61593 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
61594 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
61595 index 188cb2f..d78409b 100644
61596 --- a/include/linux/mmzone.h
61597 +++ b/include/linux/mmzone.h
61598 @@ -369,7 +369,7 @@ struct zone {
61599 unsigned long flags; /* zone flags, see below */
61600
61601 /* Zone statistics */
61602 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61603 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61604
61605 /*
61606 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
61607 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
61608 index 468819c..17b9db3 100644
61609 --- a/include/linux/mod_devicetable.h
61610 +++ b/include/linux/mod_devicetable.h
61611 @@ -12,7 +12,7 @@
61612 typedef unsigned long kernel_ulong_t;
61613 #endif
61614
61615 -#define PCI_ANY_ID (~0)
61616 +#define PCI_ANY_ID ((__u16)~0)
61617
61618 struct pci_device_id {
61619 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
61620 @@ -131,7 +131,7 @@ struct usb_device_id {
61621 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
61622 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
61623
61624 -#define HID_ANY_ID (~0)
61625 +#define HID_ANY_ID (~0U)
61626
61627 struct hid_device_id {
61628 __u16 bus;
61629 diff --git a/include/linux/module.h b/include/linux/module.h
61630 index 3cb7839..511cb87 100644
61631 --- a/include/linux/module.h
61632 +++ b/include/linux/module.h
61633 @@ -17,6 +17,7 @@
61634 #include <linux/moduleparam.h>
61635 #include <linux/tracepoint.h>
61636 #include <linux/export.h>
61637 +#include <linux/fs.h>
61638
61639 #include <linux/percpu.h>
61640 #include <asm/module.h>
61641 @@ -261,19 +262,16 @@ struct module
61642 int (*init)(void);
61643
61644 /* If this is non-NULL, vfree after init() returns */
61645 - void *module_init;
61646 + void *module_init_rx, *module_init_rw;
61647
61648 /* Here is the actual code + data, vfree'd on unload. */
61649 - void *module_core;
61650 + void *module_core_rx, *module_core_rw;
61651
61652 /* Here are the sizes of the init and core sections */
61653 - unsigned int init_size, core_size;
61654 + unsigned int init_size_rw, core_size_rw;
61655
61656 /* The size of the executable code in each section. */
61657 - unsigned int init_text_size, core_text_size;
61658 -
61659 - /* Size of RO sections of the module (text+rodata) */
61660 - unsigned int init_ro_size, core_ro_size;
61661 + unsigned int init_size_rx, core_size_rx;
61662
61663 /* Arch-specific module values */
61664 struct mod_arch_specific arch;
61665 @@ -329,6 +327,10 @@ struct module
61666 #ifdef CONFIG_EVENT_TRACING
61667 struct ftrace_event_call **trace_events;
61668 unsigned int num_trace_events;
61669 + struct file_operations trace_id;
61670 + struct file_operations trace_enable;
61671 + struct file_operations trace_format;
61672 + struct file_operations trace_filter;
61673 #endif
61674 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
61675 unsigned int num_ftrace_callsites;
61676 @@ -379,16 +381,46 @@ bool is_module_address(unsigned long addr);
61677 bool is_module_percpu_address(unsigned long addr);
61678 bool is_module_text_address(unsigned long addr);
61679
61680 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
61681 +{
61682 +
61683 +#ifdef CONFIG_PAX_KERNEXEC
61684 + if (ktla_ktva(addr) >= (unsigned long)start &&
61685 + ktla_ktva(addr) < (unsigned long)start + size)
61686 + return 1;
61687 +#endif
61688 +
61689 + return ((void *)addr >= start && (void *)addr < start + size);
61690 +}
61691 +
61692 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
61693 +{
61694 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
61695 +}
61696 +
61697 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
61698 +{
61699 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
61700 +}
61701 +
61702 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
61703 +{
61704 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
61705 +}
61706 +
61707 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
61708 +{
61709 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
61710 +}
61711 +
61712 static inline int within_module_core(unsigned long addr, struct module *mod)
61713 {
61714 - return (unsigned long)mod->module_core <= addr &&
61715 - addr < (unsigned long)mod->module_core + mod->core_size;
61716 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
61717 }
61718
61719 static inline int within_module_init(unsigned long addr, struct module *mod)
61720 {
61721 - return (unsigned long)mod->module_init <= addr &&
61722 - addr < (unsigned long)mod->module_init + mod->init_size;
61723 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
61724 }
61725
61726 /* Search for module by name: must hold module_mutex. */
61727 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
61728 index b2be02e..6a9fdb1 100644
61729 --- a/include/linux/moduleloader.h
61730 +++ b/include/linux/moduleloader.h
61731 @@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
61732 sections. Returns NULL on failure. */
61733 void *module_alloc(unsigned long size);
61734
61735 +#ifdef CONFIG_PAX_KERNEXEC
61736 +void *module_alloc_exec(unsigned long size);
61737 +#else
61738 +#define module_alloc_exec(x) module_alloc(x)
61739 +#endif
61740 +
61741 /* Free memory returned from module_alloc. */
61742 void module_free(struct module *mod, void *module_region);
61743
61744 +#ifdef CONFIG_PAX_KERNEXEC
61745 +void module_free_exec(struct module *mod, void *module_region);
61746 +#else
61747 +#define module_free_exec(x, y) module_free((x), (y))
61748 +#endif
61749 +
61750 /* Apply the given relocation to the (simplified) ELF. Return -error
61751 or 0. */
61752 int apply_relocate(Elf_Shdr *sechdrs,
61753 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
61754 index 7939f63..ec6df57 100644
61755 --- a/include/linux/moduleparam.h
61756 +++ b/include/linux/moduleparam.h
61757 @@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
61758 * @len is usually just sizeof(string).
61759 */
61760 #define module_param_string(name, string, len, perm) \
61761 - static const struct kparam_string __param_string_##name \
61762 + static const struct kparam_string __param_string_##name __used \
61763 = { len, string }; \
61764 __module_param_call(MODULE_PARAM_PREFIX, name, \
61765 &param_ops_string, \
61766 @@ -395,7 +395,7 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
61767 * module_param_named() for why this might be necessary.
61768 */
61769 #define module_param_array_named(name, array, type, nump, perm) \
61770 - static const struct kparam_array __param_arr_##name \
61771 + static const struct kparam_array __param_arr_##name __used \
61772 = { .max = ARRAY_SIZE(array), .num = nump, \
61773 .ops = &param_ops_##type, \
61774 .elemsize = sizeof(array[0]), .elem = array }; \
61775 diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
61776 index a9e6ba4..0f9e29b 100644
61777 --- a/include/linux/mtd/map.h
61778 +++ b/include/linux/mtd/map.h
61779 @@ -25,6 +25,7 @@
61780 #include <linux/types.h>
61781 #include <linux/list.h>
61782 #include <linux/string.h>
61783 +#include <linux/kernel.h>
61784 #include <linux/bug.h>
61785
61786
61787 diff --git a/include/linux/namei.h b/include/linux/namei.h
61788 index ffc0213..2c1f2cb 100644
61789 --- a/include/linux/namei.h
61790 +++ b/include/linux/namei.h
61791 @@ -24,7 +24,7 @@ struct nameidata {
61792 unsigned seq;
61793 int last_type;
61794 unsigned depth;
61795 - char *saved_names[MAX_NESTED_LINKS + 1];
61796 + const char *saved_names[MAX_NESTED_LINKS + 1];
61797
61798 /* Intent data */
61799 union {
61800 @@ -94,12 +94,12 @@ extern int follow_up(struct path *);
61801 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
61802 extern void unlock_rename(struct dentry *, struct dentry *);
61803
61804 -static inline void nd_set_link(struct nameidata *nd, char *path)
61805 +static inline void nd_set_link(struct nameidata *nd, const char *path)
61806 {
61807 nd->saved_names[nd->depth] = path;
61808 }
61809
61810 -static inline char *nd_get_link(struct nameidata *nd)
61811 +static inline const char *nd_get_link(const struct nameidata *nd)
61812 {
61813 return nd->saved_names[nd->depth];
61814 }
61815 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
61816 index a82ad4d..90d15b7 100644
61817 --- a/include/linux/netdevice.h
61818 +++ b/include/linux/netdevice.h
61819 @@ -949,6 +949,7 @@ struct net_device_ops {
61820 int (*ndo_set_features)(struct net_device *dev,
61821 u32 features);
61822 };
61823 +typedef struct net_device_ops __no_const net_device_ops_no_const;
61824
61825 /*
61826 * The DEVICE structure.
61827 @@ -1088,7 +1089,7 @@ struct net_device {
61828 int iflink;
61829
61830 struct net_device_stats stats;
61831 - atomic_long_t rx_dropped; /* dropped packets by core network
61832 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
61833 * Do not use this in drivers.
61834 */
61835
61836 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
61837 new file mode 100644
61838 index 0000000..33f4af8
61839 --- /dev/null
61840 +++ b/include/linux/netfilter/xt_gradm.h
61841 @@ -0,0 +1,9 @@
61842 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
61843 +#define _LINUX_NETFILTER_XT_GRADM_H 1
61844 +
61845 +struct xt_gradm_mtinfo {
61846 + __u16 flags;
61847 + __u16 invflags;
61848 +};
61849 +
61850 +#endif
61851 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
61852 index c65a18a..0c05f3a 100644
61853 --- a/include/linux/of_pdt.h
61854 +++ b/include/linux/of_pdt.h
61855 @@ -32,7 +32,7 @@ struct of_pdt_ops {
61856
61857 /* return 0 on success; fill in 'len' with number of bytes in path */
61858 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
61859 -};
61860 +} __no_const;
61861
61862 extern void *prom_early_alloc(unsigned long size);
61863
61864 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
61865 index a4c5624..79d6d88 100644
61866 --- a/include/linux/oprofile.h
61867 +++ b/include/linux/oprofile.h
61868 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
61869 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
61870 char const * name, ulong * val);
61871
61872 -/** Create a file for read-only access to an atomic_t. */
61873 +/** Create a file for read-only access to an atomic_unchecked_t. */
61874 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
61875 - char const * name, atomic_t * val);
61876 + char const * name, atomic_unchecked_t * val);
61877
61878 /** create a directory */
61879 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
61880 diff --git a/include/linux/padata.h b/include/linux/padata.h
61881 index 4633b2f..988bc08 100644
61882 --- a/include/linux/padata.h
61883 +++ b/include/linux/padata.h
61884 @@ -129,7 +129,7 @@ struct parallel_data {
61885 struct padata_instance *pinst;
61886 struct padata_parallel_queue __percpu *pqueue;
61887 struct padata_serial_queue __percpu *squeue;
61888 - atomic_t seq_nr;
61889 + atomic_unchecked_t seq_nr;
61890 atomic_t reorder_objects;
61891 atomic_t refcnt;
61892 unsigned int max_seq_nr;
61893 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
61894 index b1f8912..c955bff 100644
61895 --- a/include/linux/perf_event.h
61896 +++ b/include/linux/perf_event.h
61897 @@ -748,8 +748,8 @@ struct perf_event {
61898
61899 enum perf_event_active_state state;
61900 unsigned int attach_state;
61901 - local64_t count;
61902 - atomic64_t child_count;
61903 + local64_t count; /* PaX: fix it one day */
61904 + atomic64_unchecked_t child_count;
61905
61906 /*
61907 * These are the total time in nanoseconds that the event
61908 @@ -800,8 +800,8 @@ struct perf_event {
61909 * These accumulate total time (in nanoseconds) that children
61910 * events have been enabled and running, respectively.
61911 */
61912 - atomic64_t child_total_time_enabled;
61913 - atomic64_t child_total_time_running;
61914 + atomic64_unchecked_t child_total_time_enabled;
61915 + atomic64_unchecked_t child_total_time_running;
61916
61917 /*
61918 * Protect attach/detach and child_list:
61919 diff --git a/include/linux/personality.h b/include/linux/personality.h
61920 index 8fc7dd1a..c19d89e 100644
61921 --- a/include/linux/personality.h
61922 +++ b/include/linux/personality.h
61923 @@ -44,6 +44,7 @@ enum {
61924 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
61925 ADDR_NO_RANDOMIZE | \
61926 ADDR_COMPAT_LAYOUT | \
61927 + ADDR_LIMIT_3GB | \
61928 MMAP_PAGE_ZERO)
61929
61930 /*
61931 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
61932 index 77257c9..51d473a 100644
61933 --- a/include/linux/pipe_fs_i.h
61934 +++ b/include/linux/pipe_fs_i.h
61935 @@ -46,9 +46,9 @@ struct pipe_buffer {
61936 struct pipe_inode_info {
61937 wait_queue_head_t wait;
61938 unsigned int nrbufs, curbuf, buffers;
61939 - unsigned int readers;
61940 - unsigned int writers;
61941 - unsigned int waiting_writers;
61942 + atomic_t readers;
61943 + atomic_t writers;
61944 + atomic_t waiting_writers;
61945 unsigned int r_counter;
61946 unsigned int w_counter;
61947 struct page *tmp_page;
61948 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
61949 index d3085e7..fd01052 100644
61950 --- a/include/linux/pm_runtime.h
61951 +++ b/include/linux/pm_runtime.h
61952 @@ -95,7 +95,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
61953
61954 static inline void pm_runtime_mark_last_busy(struct device *dev)
61955 {
61956 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
61957 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
61958 }
61959
61960 #else /* !CONFIG_PM_RUNTIME */
61961 diff --git a/include/linux/poison.h b/include/linux/poison.h
61962 index 79159de..f1233a9 100644
61963 --- a/include/linux/poison.h
61964 +++ b/include/linux/poison.h
61965 @@ -19,8 +19,8 @@
61966 * under normal circumstances, used to verify that nobody uses
61967 * non-initialized list entries.
61968 */
61969 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
61970 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
61971 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
61972 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
61973
61974 /********** include/linux/timer.h **********/
61975 /*
61976 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
61977 index 58969b2..ead129b 100644
61978 --- a/include/linux/preempt.h
61979 +++ b/include/linux/preempt.h
61980 @@ -123,7 +123,7 @@ struct preempt_ops {
61981 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
61982 void (*sched_out)(struct preempt_notifier *notifier,
61983 struct task_struct *next);
61984 -};
61985 +} __no_const;
61986
61987 /**
61988 * preempt_notifier - key for installing preemption notifiers
61989 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
61990 index 643b96c..ef55a9c 100644
61991 --- a/include/linux/proc_fs.h
61992 +++ b/include/linux/proc_fs.h
61993 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
61994 return proc_create_data(name, mode, parent, proc_fops, NULL);
61995 }
61996
61997 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
61998 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
61999 +{
62000 +#ifdef CONFIG_GRKERNSEC_PROC_USER
62001 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
62002 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62003 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
62004 +#else
62005 + return proc_create_data(name, mode, parent, proc_fops, NULL);
62006 +#endif
62007 +}
62008 +
62009 +
62010 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
62011 mode_t mode, struct proc_dir_entry *base,
62012 read_proc_t *read_proc, void * data)
62013 @@ -258,7 +271,7 @@ union proc_op {
62014 int (*proc_show)(struct seq_file *m,
62015 struct pid_namespace *ns, struct pid *pid,
62016 struct task_struct *task);
62017 -};
62018 +} __no_const;
62019
62020 struct ctl_table_header;
62021 struct ctl_table;
62022 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
62023 index 800f113..e9ee2e3 100644
62024 --- a/include/linux/ptrace.h
62025 +++ b/include/linux/ptrace.h
62026 @@ -129,10 +129,12 @@ extern void __ptrace_unlink(struct task_struct *child);
62027 extern void exit_ptrace(struct task_struct *tracer);
62028 #define PTRACE_MODE_READ 1
62029 #define PTRACE_MODE_ATTACH 2
62030 -/* Returns 0 on success, -errno on denial. */
62031 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
62032 /* Returns true on success, false on denial. */
62033 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
62034 +/* Returns true on success, false on denial. */
62035 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
62036 +/* Returns true on success, false on denial. */
62037 +extern bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode);
62038
62039 static inline int ptrace_reparented(struct task_struct *child)
62040 {
62041 diff --git a/include/linux/random.h b/include/linux/random.h
62042 index 8f74538..02a1012 100644
62043 --- a/include/linux/random.h
62044 +++ b/include/linux/random.h
62045 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
62046
62047 u32 prandom32(struct rnd_state *);
62048
62049 +static inline unsigned long pax_get_random_long(void)
62050 +{
62051 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
62052 +}
62053 +
62054 /*
62055 * Handle minimum values for seeds
62056 */
62057 static inline u32 __seed(u32 x, u32 m)
62058 {
62059 - return (x < m) ? x + m : x;
62060 + return (x <= m) ? x + m + 1 : x;
62061 }
62062
62063 /**
62064 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
62065 index e0879a7..a12f962 100644
62066 --- a/include/linux/reboot.h
62067 +++ b/include/linux/reboot.h
62068 @@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
62069 * Architecture-specific implementations of sys_reboot commands.
62070 */
62071
62072 -extern void machine_restart(char *cmd);
62073 -extern void machine_halt(void);
62074 -extern void machine_power_off(void);
62075 +extern void machine_restart(char *cmd) __noreturn;
62076 +extern void machine_halt(void) __noreturn;
62077 +extern void machine_power_off(void) __noreturn;
62078
62079 extern void machine_shutdown(void);
62080 struct pt_regs;
62081 @@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
62082 */
62083
62084 extern void kernel_restart_prepare(char *cmd);
62085 -extern void kernel_restart(char *cmd);
62086 -extern void kernel_halt(void);
62087 -extern void kernel_power_off(void);
62088 +extern void kernel_restart(char *cmd) __noreturn;
62089 +extern void kernel_halt(void) __noreturn;
62090 +extern void kernel_power_off(void) __noreturn;
62091
62092 extern int C_A_D; /* for sysctl */
62093 void ctrl_alt_del(void);
62094 @@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
62095 * Emergency restart, callable from an interrupt handler.
62096 */
62097
62098 -extern void emergency_restart(void);
62099 +extern void emergency_restart(void) __noreturn;
62100 #include <asm/emergency-restart.h>
62101
62102 #endif
62103 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
62104 index 96d465f..b084e05 100644
62105 --- a/include/linux/reiserfs_fs.h
62106 +++ b/include/linux/reiserfs_fs.h
62107 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
62108 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
62109
62110 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
62111 -#define get_generation(s) atomic_read (&fs_generation(s))
62112 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
62113 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
62114 #define __fs_changed(gen,s) (gen != get_generation (s))
62115 #define fs_changed(gen,s) \
62116 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
62117 index 52c83b6..18ed7eb 100644
62118 --- a/include/linux/reiserfs_fs_sb.h
62119 +++ b/include/linux/reiserfs_fs_sb.h
62120 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
62121 /* Comment? -Hans */
62122 wait_queue_head_t s_wait;
62123 /* To be obsoleted soon by per buffer seals.. -Hans */
62124 - atomic_t s_generation_counter; // increased by one every time the
62125 + atomic_unchecked_t s_generation_counter; // increased by one every time the
62126 // tree gets re-balanced
62127 unsigned long s_properties; /* File system properties. Currently holds
62128 on-disk FS format */
62129 diff --git a/include/linux/relay.h b/include/linux/relay.h
62130 index 14a86bc..17d0700 100644
62131 --- a/include/linux/relay.h
62132 +++ b/include/linux/relay.h
62133 @@ -159,7 +159,7 @@ struct rchan_callbacks
62134 * The callback should return 0 if successful, negative if not.
62135 */
62136 int (*remove_buf_file)(struct dentry *dentry);
62137 -};
62138 +} __no_const;
62139
62140 /*
62141 * CONFIG_RELAY kernel API, kernel/relay.c
62142 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
62143 index c6c6084..5bf1212 100644
62144 --- a/include/linux/rfkill.h
62145 +++ b/include/linux/rfkill.h
62146 @@ -147,6 +147,7 @@ struct rfkill_ops {
62147 void (*query)(struct rfkill *rfkill, void *data);
62148 int (*set_block)(void *data, bool blocked);
62149 };
62150 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
62151
62152 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
62153 /**
62154 diff --git a/include/linux/rio.h b/include/linux/rio.h
62155 index 4d50611..c6858a2 100644
62156 --- a/include/linux/rio.h
62157 +++ b/include/linux/rio.h
62158 @@ -315,7 +315,7 @@ struct rio_ops {
62159 int mbox, void *buffer, size_t len);
62160 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
62161 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
62162 -};
62163 +} __no_const;
62164
62165 #define RIO_RESOURCE_MEM 0x00000100
62166 #define RIO_RESOURCE_DOORBELL 0x00000200
62167 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
62168 index 2148b12..519b820 100644
62169 --- a/include/linux/rmap.h
62170 +++ b/include/linux/rmap.h
62171 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
62172 void anon_vma_init(void); /* create anon_vma_cachep */
62173 int anon_vma_prepare(struct vm_area_struct *);
62174 void unlink_anon_vmas(struct vm_area_struct *);
62175 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
62176 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
62177 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
62178 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
62179 void __anon_vma_link(struct vm_area_struct *);
62180
62181 static inline void anon_vma_merge(struct vm_area_struct *vma,
62182 diff --git a/include/linux/sched.h b/include/linux/sched.h
62183 index 1c4f3e9..342eb1f 100644
62184 --- a/include/linux/sched.h
62185 +++ b/include/linux/sched.h
62186 @@ -101,6 +101,7 @@ struct bio_list;
62187 struct fs_struct;
62188 struct perf_event_context;
62189 struct blk_plug;
62190 +struct linux_binprm;
62191
62192 /*
62193 * List of flags we want to share for kernel threads,
62194 @@ -380,10 +381,13 @@ struct user_namespace;
62195 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
62196
62197 extern int sysctl_max_map_count;
62198 +extern unsigned long sysctl_heap_stack_gap;
62199
62200 #include <linux/aio.h>
62201
62202 #ifdef CONFIG_MMU
62203 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
62204 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
62205 extern void arch_pick_mmap_layout(struct mm_struct *mm);
62206 extern unsigned long
62207 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
62208 @@ -629,6 +633,17 @@ struct signal_struct {
62209 #ifdef CONFIG_TASKSTATS
62210 struct taskstats *stats;
62211 #endif
62212 +
62213 +#ifdef CONFIG_GRKERNSEC
62214 + u32 curr_ip;
62215 + u32 saved_ip;
62216 + u32 gr_saddr;
62217 + u32 gr_daddr;
62218 + u16 gr_sport;
62219 + u16 gr_dport;
62220 + u8 used_accept:1;
62221 +#endif
62222 +
62223 #ifdef CONFIG_AUDIT
62224 unsigned audit_tty;
62225 struct tty_audit_buf *tty_audit_buf;
62226 @@ -710,6 +725,11 @@ struct user_struct {
62227 struct key *session_keyring; /* UID's default session keyring */
62228 #endif
62229
62230 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
62231 + unsigned int banned;
62232 + unsigned long ban_expires;
62233 +#endif
62234 +
62235 /* Hash table maintenance information */
62236 struct hlist_node uidhash_node;
62237 uid_t uid;
62238 @@ -1337,8 +1357,8 @@ struct task_struct {
62239 struct list_head thread_group;
62240
62241 struct completion *vfork_done; /* for vfork() */
62242 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
62243 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
62244 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
62245 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
62246
62247 cputime_t utime, stime, utimescaled, stimescaled;
62248 cputime_t gtime;
62249 @@ -1354,13 +1374,6 @@ struct task_struct {
62250 struct task_cputime cputime_expires;
62251 struct list_head cpu_timers[3];
62252
62253 -/* process credentials */
62254 - const struct cred __rcu *real_cred; /* objective and real subjective task
62255 - * credentials (COW) */
62256 - const struct cred __rcu *cred; /* effective (overridable) subjective task
62257 - * credentials (COW) */
62258 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
62259 -
62260 char comm[TASK_COMM_LEN]; /* executable name excluding path
62261 - access with [gs]et_task_comm (which lock
62262 it with task_lock())
62263 @@ -1377,8 +1390,16 @@ struct task_struct {
62264 #endif
62265 /* CPU-specific state of this task */
62266 struct thread_struct thread;
62267 +/* thread_info moved to task_struct */
62268 +#ifdef CONFIG_X86
62269 + struct thread_info tinfo;
62270 +#endif
62271 /* filesystem information */
62272 struct fs_struct *fs;
62273 +
62274 + const struct cred __rcu *cred; /* effective (overridable) subjective task
62275 + * credentials (COW) */
62276 +
62277 /* open file information */
62278 struct files_struct *files;
62279 /* namespaces */
62280 @@ -1425,6 +1446,11 @@ struct task_struct {
62281 struct rt_mutex_waiter *pi_blocked_on;
62282 #endif
62283
62284 +/* process credentials */
62285 + const struct cred __rcu *real_cred; /* objective and real subjective task
62286 + * credentials (COW) */
62287 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
62288 +
62289 #ifdef CONFIG_DEBUG_MUTEXES
62290 /* mutex deadlock detection */
62291 struct mutex_waiter *blocked_on;
62292 @@ -1540,6 +1566,27 @@ struct task_struct {
62293 unsigned long default_timer_slack_ns;
62294
62295 struct list_head *scm_work_list;
62296 +
62297 +#ifdef CONFIG_GRKERNSEC
62298 + /* grsecurity */
62299 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62300 + u64 exec_id;
62301 +#endif
62302 +#ifdef CONFIG_GRKERNSEC_SETXID
62303 + const struct cred *delayed_cred;
62304 +#endif
62305 + struct dentry *gr_chroot_dentry;
62306 + struct acl_subject_label *acl;
62307 + struct acl_role_label *role;
62308 + struct file *exec_file;
62309 + u16 acl_role_id;
62310 + /* is this the task that authenticated to the special role */
62311 + u8 acl_sp_role;
62312 + u8 is_writable;
62313 + u8 brute;
62314 + u8 gr_is_chrooted;
62315 +#endif
62316 +
62317 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
62318 /* Index of current stored address in ret_stack */
62319 int curr_ret_stack;
62320 @@ -1574,6 +1621,51 @@ struct task_struct {
62321 #endif
62322 };
62323
62324 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
62325 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
62326 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
62327 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
62328 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
62329 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
62330 +
62331 +#ifdef CONFIG_PAX_SOFTMODE
62332 +extern int pax_softmode;
62333 +#endif
62334 +
62335 +extern int pax_check_flags(unsigned long *);
62336 +
62337 +/* if tsk != current then task_lock must be held on it */
62338 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
62339 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
62340 +{
62341 + if (likely(tsk->mm))
62342 + return tsk->mm->pax_flags;
62343 + else
62344 + return 0UL;
62345 +}
62346 +
62347 +/* if tsk != current then task_lock must be held on it */
62348 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
62349 +{
62350 + if (likely(tsk->mm)) {
62351 + tsk->mm->pax_flags = flags;
62352 + return 0;
62353 + }
62354 + return -EINVAL;
62355 +}
62356 +#endif
62357 +
62358 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62359 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
62360 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
62361 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
62362 +#endif
62363 +
62364 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
62365 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
62366 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
62367 +extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
62368 +
62369 /* Future-safe accessor for struct task_struct's cpus_allowed. */
62370 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
62371
62372 @@ -2081,7 +2173,9 @@ void yield(void);
62373 extern struct exec_domain default_exec_domain;
62374
62375 union thread_union {
62376 +#ifndef CONFIG_X86
62377 struct thread_info thread_info;
62378 +#endif
62379 unsigned long stack[THREAD_SIZE/sizeof(long)];
62380 };
62381
62382 @@ -2114,6 +2208,7 @@ extern struct pid_namespace init_pid_ns;
62383 */
62384
62385 extern struct task_struct *find_task_by_vpid(pid_t nr);
62386 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
62387 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
62388 struct pid_namespace *ns);
62389
62390 @@ -2235,6 +2330,12 @@ static inline void mmdrop(struct mm_struct * mm)
62391 extern void mmput(struct mm_struct *);
62392 /* Grab a reference to a task's mm, if it is not already going away */
62393 extern struct mm_struct *get_task_mm(struct task_struct *task);
62394 +/*
62395 + * Grab a reference to a task's mm, if it is not already going away
62396 + * and ptrace_may_access with the mode parameter passed to it
62397 + * succeeds.
62398 + */
62399 +extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
62400 /* Remove the current tasks stale references to the old mm_struct */
62401 extern void mm_release(struct task_struct *, struct mm_struct *);
62402 /* Allocate a new mm structure and copy contents from tsk->mm */
62403 @@ -2251,7 +2352,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
62404 extern void exit_itimers(struct signal_struct *);
62405 extern void flush_itimer_signals(void);
62406
62407 -extern NORET_TYPE void do_group_exit(int);
62408 +extern __noreturn void do_group_exit(int);
62409
62410 extern void daemonize(const char *, ...);
62411 extern int allow_signal(int);
62412 @@ -2416,13 +2517,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
62413
62414 #endif
62415
62416 -static inline int object_is_on_stack(void *obj)
62417 +static inline int object_starts_on_stack(void *obj)
62418 {
62419 - void *stack = task_stack_page(current);
62420 + const void *stack = task_stack_page(current);
62421
62422 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
62423 }
62424
62425 +#ifdef CONFIG_PAX_USERCOPY
62426 +extern int object_is_on_stack(const void *obj, unsigned long len);
62427 +#endif
62428 +
62429 extern void thread_info_cache_init(void);
62430
62431 #ifdef CONFIG_DEBUG_STACK_USAGE
62432 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
62433 index 899fbb4..1cb4138 100644
62434 --- a/include/linux/screen_info.h
62435 +++ b/include/linux/screen_info.h
62436 @@ -43,7 +43,8 @@ struct screen_info {
62437 __u16 pages; /* 0x32 */
62438 __u16 vesa_attributes; /* 0x34 */
62439 __u32 capabilities; /* 0x36 */
62440 - __u8 _reserved[6]; /* 0x3a */
62441 + __u16 vesapm_size; /* 0x3a */
62442 + __u8 _reserved[4]; /* 0x3c */
62443 } __attribute__((packed));
62444
62445 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
62446 diff --git a/include/linux/security.h b/include/linux/security.h
62447 index e8c619d..e0cbd1c 100644
62448 --- a/include/linux/security.h
62449 +++ b/include/linux/security.h
62450 @@ -37,6 +37,7 @@
62451 #include <linux/xfrm.h>
62452 #include <linux/slab.h>
62453 #include <linux/xattr.h>
62454 +#include <linux/grsecurity.h>
62455 #include <net/flow.h>
62456
62457 /* Maximum number of letters for an LSM name string */
62458 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
62459 index 0b69a46..b2ffa4c 100644
62460 --- a/include/linux/seq_file.h
62461 +++ b/include/linux/seq_file.h
62462 @@ -24,6 +24,9 @@ struct seq_file {
62463 struct mutex lock;
62464 const struct seq_operations *op;
62465 int poll_event;
62466 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62467 + u64 exec_id;
62468 +#endif
62469 void *private;
62470 };
62471
62472 @@ -33,6 +36,7 @@ struct seq_operations {
62473 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
62474 int (*show) (struct seq_file *m, void *v);
62475 };
62476 +typedef struct seq_operations __no_const seq_operations_no_const;
62477
62478 #define SEQ_SKIP 1
62479
62480 diff --git a/include/linux/shm.h b/include/linux/shm.h
62481 index 92808b8..c28cac4 100644
62482 --- a/include/linux/shm.h
62483 +++ b/include/linux/shm.h
62484 @@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
62485
62486 /* The task created the shm object. NULL if the task is dead. */
62487 struct task_struct *shm_creator;
62488 +#ifdef CONFIG_GRKERNSEC
62489 + time_t shm_createtime;
62490 + pid_t shm_lapid;
62491 +#endif
62492 };
62493
62494 /* shm_mode upper byte flags */
62495 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
62496 index fe86488..1563c1c 100644
62497 --- a/include/linux/skbuff.h
62498 +++ b/include/linux/skbuff.h
62499 @@ -642,7 +642,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
62500 */
62501 static inline int skb_queue_empty(const struct sk_buff_head *list)
62502 {
62503 - return list->next == (struct sk_buff *)list;
62504 + return list->next == (const struct sk_buff *)list;
62505 }
62506
62507 /**
62508 @@ -655,7 +655,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
62509 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
62510 const struct sk_buff *skb)
62511 {
62512 - return skb->next == (struct sk_buff *)list;
62513 + return skb->next == (const struct sk_buff *)list;
62514 }
62515
62516 /**
62517 @@ -668,7 +668,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
62518 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
62519 const struct sk_buff *skb)
62520 {
62521 - return skb->prev == (struct sk_buff *)list;
62522 + return skb->prev == (const struct sk_buff *)list;
62523 }
62524
62525 /**
62526 @@ -1523,7 +1523,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
62527 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
62528 */
62529 #ifndef NET_SKB_PAD
62530 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
62531 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
62532 #endif
62533
62534 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
62535 diff --git a/include/linux/slab.h b/include/linux/slab.h
62536 index 573c809..e84c132 100644
62537 --- a/include/linux/slab.h
62538 +++ b/include/linux/slab.h
62539 @@ -11,12 +11,20 @@
62540
62541 #include <linux/gfp.h>
62542 #include <linux/types.h>
62543 +#include <linux/err.h>
62544
62545 /*
62546 * Flags to pass to kmem_cache_create().
62547 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
62548 */
62549 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
62550 +
62551 +#ifdef CONFIG_PAX_USERCOPY
62552 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
62553 +#else
62554 +#define SLAB_USERCOPY 0x00000000UL
62555 +#endif
62556 +
62557 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
62558 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
62559 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
62560 @@ -87,10 +95,13 @@
62561 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
62562 * Both make kfree a no-op.
62563 */
62564 -#define ZERO_SIZE_PTR ((void *)16)
62565 +#define ZERO_SIZE_PTR \
62566 +({ \
62567 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
62568 + (void *)(-MAX_ERRNO-1L); \
62569 +})
62570
62571 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
62572 - (unsigned long)ZERO_SIZE_PTR)
62573 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
62574
62575 /*
62576 * struct kmem_cache related prototypes
62577 @@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
62578 void kfree(const void *);
62579 void kzfree(const void *);
62580 size_t ksize(const void *);
62581 +void check_object_size(const void *ptr, unsigned long n, bool to);
62582
62583 /*
62584 * Allocator specific definitions. These are mainly used to establish optimized
62585 @@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
62586
62587 void __init kmem_cache_init_late(void);
62588
62589 +#define kmalloc(x, y) \
62590 +({ \
62591 + void *___retval; \
62592 + intoverflow_t ___x = (intoverflow_t)x; \
62593 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
62594 + ___retval = NULL; \
62595 + else \
62596 + ___retval = kmalloc((size_t)___x, (y)); \
62597 + ___retval; \
62598 +})
62599 +
62600 +#define kmalloc_node(x, y, z) \
62601 +({ \
62602 + void *___retval; \
62603 + intoverflow_t ___x = (intoverflow_t)x; \
62604 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
62605 + ___retval = NULL; \
62606 + else \
62607 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
62608 + ___retval; \
62609 +})
62610 +
62611 +#define kzalloc(x, y) \
62612 +({ \
62613 + void *___retval; \
62614 + intoverflow_t ___x = (intoverflow_t)x; \
62615 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
62616 + ___retval = NULL; \
62617 + else \
62618 + ___retval = kzalloc((size_t)___x, (y)); \
62619 + ___retval; \
62620 +})
62621 +
62622 +#define __krealloc(x, y, z) \
62623 +({ \
62624 + void *___retval; \
62625 + intoverflow_t ___y = (intoverflow_t)y; \
62626 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
62627 + ___retval = NULL; \
62628 + else \
62629 + ___retval = __krealloc((x), (size_t)___y, (z)); \
62630 + ___retval; \
62631 +})
62632 +
62633 +#define krealloc(x, y, z) \
62634 +({ \
62635 + void *___retval; \
62636 + intoverflow_t ___y = (intoverflow_t)y; \
62637 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
62638 + ___retval = NULL; \
62639 + else \
62640 + ___retval = krealloc((x), (size_t)___y, (z)); \
62641 + ___retval; \
62642 +})
62643 +
62644 #endif /* _LINUX_SLAB_H */
62645 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
62646 index d00e0ba..1b3bf7b 100644
62647 --- a/include/linux/slab_def.h
62648 +++ b/include/linux/slab_def.h
62649 @@ -68,10 +68,10 @@ struct kmem_cache {
62650 unsigned long node_allocs;
62651 unsigned long node_frees;
62652 unsigned long node_overflow;
62653 - atomic_t allochit;
62654 - atomic_t allocmiss;
62655 - atomic_t freehit;
62656 - atomic_t freemiss;
62657 + atomic_unchecked_t allochit;
62658 + atomic_unchecked_t allocmiss;
62659 + atomic_unchecked_t freehit;
62660 + atomic_unchecked_t freemiss;
62661
62662 /*
62663 * If debugging is enabled, then the allocator can add additional
62664 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
62665 index a32bcfd..53b71f4 100644
62666 --- a/include/linux/slub_def.h
62667 +++ b/include/linux/slub_def.h
62668 @@ -89,7 +89,7 @@ struct kmem_cache {
62669 struct kmem_cache_order_objects max;
62670 struct kmem_cache_order_objects min;
62671 gfp_t allocflags; /* gfp flags to use on each alloc */
62672 - int refcount; /* Refcount for slab cache destroy */
62673 + atomic_t refcount; /* Refcount for slab cache destroy */
62674 void (*ctor)(void *);
62675 int inuse; /* Offset to metadata */
62676 int align; /* Alignment */
62677 @@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
62678 }
62679
62680 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
62681 -void *__kmalloc(size_t size, gfp_t flags);
62682 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
62683
62684 static __always_inline void *
62685 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
62686 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
62687 index de8832d..0147b46 100644
62688 --- a/include/linux/sonet.h
62689 +++ b/include/linux/sonet.h
62690 @@ -61,7 +61,7 @@ struct sonet_stats {
62691 #include <linux/atomic.h>
62692
62693 struct k_sonet_stats {
62694 -#define __HANDLE_ITEM(i) atomic_t i
62695 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
62696 __SONET_ITEMS
62697 #undef __HANDLE_ITEM
62698 };
62699 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
62700 index 3d8f9c4..69f1c0a 100644
62701 --- a/include/linux/sunrpc/clnt.h
62702 +++ b/include/linux/sunrpc/clnt.h
62703 @@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
62704 {
62705 switch (sap->sa_family) {
62706 case AF_INET:
62707 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
62708 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
62709 case AF_INET6:
62710 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
62711 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
62712 }
62713 return 0;
62714 }
62715 @@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
62716 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
62717 const struct sockaddr *src)
62718 {
62719 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
62720 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
62721 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
62722
62723 dsin->sin_family = ssin->sin_family;
62724 @@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
62725 if (sa->sa_family != AF_INET6)
62726 return 0;
62727
62728 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
62729 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
62730 }
62731
62732 #endif /* __KERNEL__ */
62733 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
62734 index e775689..9e206d9 100644
62735 --- a/include/linux/sunrpc/sched.h
62736 +++ b/include/linux/sunrpc/sched.h
62737 @@ -105,6 +105,7 @@ struct rpc_call_ops {
62738 void (*rpc_call_done)(struct rpc_task *, void *);
62739 void (*rpc_release)(void *);
62740 };
62741 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
62742
62743 struct rpc_task_setup {
62744 struct rpc_task *task;
62745 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
62746 index c14fe86..393245e 100644
62747 --- a/include/linux/sunrpc/svc_rdma.h
62748 +++ b/include/linux/sunrpc/svc_rdma.h
62749 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
62750 extern unsigned int svcrdma_max_requests;
62751 extern unsigned int svcrdma_max_req_size;
62752
62753 -extern atomic_t rdma_stat_recv;
62754 -extern atomic_t rdma_stat_read;
62755 -extern atomic_t rdma_stat_write;
62756 -extern atomic_t rdma_stat_sq_starve;
62757 -extern atomic_t rdma_stat_rq_starve;
62758 -extern atomic_t rdma_stat_rq_poll;
62759 -extern atomic_t rdma_stat_rq_prod;
62760 -extern atomic_t rdma_stat_sq_poll;
62761 -extern atomic_t rdma_stat_sq_prod;
62762 +extern atomic_unchecked_t rdma_stat_recv;
62763 +extern atomic_unchecked_t rdma_stat_read;
62764 +extern atomic_unchecked_t rdma_stat_write;
62765 +extern atomic_unchecked_t rdma_stat_sq_starve;
62766 +extern atomic_unchecked_t rdma_stat_rq_starve;
62767 +extern atomic_unchecked_t rdma_stat_rq_poll;
62768 +extern atomic_unchecked_t rdma_stat_rq_prod;
62769 +extern atomic_unchecked_t rdma_stat_sq_poll;
62770 +extern atomic_unchecked_t rdma_stat_sq_prod;
62771
62772 #define RPCRDMA_VERSION 1
62773
62774 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
62775 index 703cfa33..0b8ca72ac 100644
62776 --- a/include/linux/sysctl.h
62777 +++ b/include/linux/sysctl.h
62778 @@ -155,7 +155,11 @@ enum
62779 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
62780 };
62781
62782 -
62783 +#ifdef CONFIG_PAX_SOFTMODE
62784 +enum {
62785 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
62786 +};
62787 +#endif
62788
62789 /* CTL_VM names: */
62790 enum
62791 @@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
62792
62793 extern int proc_dostring(struct ctl_table *, int,
62794 void __user *, size_t *, loff_t *);
62795 +extern int proc_dostring_modpriv(struct ctl_table *, int,
62796 + void __user *, size_t *, loff_t *);
62797 extern int proc_dointvec(struct ctl_table *, int,
62798 void __user *, size_t *, loff_t *);
62799 extern int proc_dointvec_minmax(struct ctl_table *, int,
62800 diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
62801 index a71a292..51bd91d 100644
62802 --- a/include/linux/tracehook.h
62803 +++ b/include/linux/tracehook.h
62804 @@ -54,12 +54,12 @@ struct linux_binprm;
62805 /*
62806 * ptrace report for syscall entry and exit looks identical.
62807 */
62808 -static inline void ptrace_report_syscall(struct pt_regs *regs)
62809 +static inline int ptrace_report_syscall(struct pt_regs *regs)
62810 {
62811 int ptrace = current->ptrace;
62812
62813 if (!(ptrace & PT_PTRACED))
62814 - return;
62815 + return 0;
62816
62817 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
62818
62819 @@ -72,6 +72,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
62820 send_sig(current->exit_code, current, 1);
62821 current->exit_code = 0;
62822 }
62823 +
62824 + return fatal_signal_pending(current);
62825 }
62826
62827 /**
62828 @@ -96,8 +98,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
62829 static inline __must_check int tracehook_report_syscall_entry(
62830 struct pt_regs *regs)
62831 {
62832 - ptrace_report_syscall(regs);
62833 - return 0;
62834 + return ptrace_report_syscall(regs);
62835 }
62836
62837 /**
62838 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
62839 index ff7dc08..893e1bd 100644
62840 --- a/include/linux/tty_ldisc.h
62841 +++ b/include/linux/tty_ldisc.h
62842 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
62843
62844 struct module *owner;
62845
62846 - int refcount;
62847 + atomic_t refcount;
62848 };
62849
62850 struct tty_ldisc {
62851 diff --git a/include/linux/types.h b/include/linux/types.h
62852 index 57a9723..dbe234a 100644
62853 --- a/include/linux/types.h
62854 +++ b/include/linux/types.h
62855 @@ -213,10 +213,26 @@ typedef struct {
62856 int counter;
62857 } atomic_t;
62858
62859 +#ifdef CONFIG_PAX_REFCOUNT
62860 +typedef struct {
62861 + int counter;
62862 +} atomic_unchecked_t;
62863 +#else
62864 +typedef atomic_t atomic_unchecked_t;
62865 +#endif
62866 +
62867 #ifdef CONFIG_64BIT
62868 typedef struct {
62869 long counter;
62870 } atomic64_t;
62871 +
62872 +#ifdef CONFIG_PAX_REFCOUNT
62873 +typedef struct {
62874 + long counter;
62875 +} atomic64_unchecked_t;
62876 +#else
62877 +typedef atomic64_t atomic64_unchecked_t;
62878 +#endif
62879 #endif
62880
62881 struct list_head {
62882 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
62883 index 5ca0951..ab496a5 100644
62884 --- a/include/linux/uaccess.h
62885 +++ b/include/linux/uaccess.h
62886 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
62887 long ret; \
62888 mm_segment_t old_fs = get_fs(); \
62889 \
62890 - set_fs(KERNEL_DS); \
62891 pagefault_disable(); \
62892 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
62893 - pagefault_enable(); \
62894 + set_fs(KERNEL_DS); \
62895 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
62896 set_fs(old_fs); \
62897 + pagefault_enable(); \
62898 ret; \
62899 })
62900
62901 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
62902 index 99c1b4d..bb94261 100644
62903 --- a/include/linux/unaligned/access_ok.h
62904 +++ b/include/linux/unaligned/access_ok.h
62905 @@ -6,32 +6,32 @@
62906
62907 static inline u16 get_unaligned_le16(const void *p)
62908 {
62909 - return le16_to_cpup((__le16 *)p);
62910 + return le16_to_cpup((const __le16 *)p);
62911 }
62912
62913 static inline u32 get_unaligned_le32(const void *p)
62914 {
62915 - return le32_to_cpup((__le32 *)p);
62916 + return le32_to_cpup((const __le32 *)p);
62917 }
62918
62919 static inline u64 get_unaligned_le64(const void *p)
62920 {
62921 - return le64_to_cpup((__le64 *)p);
62922 + return le64_to_cpup((const __le64 *)p);
62923 }
62924
62925 static inline u16 get_unaligned_be16(const void *p)
62926 {
62927 - return be16_to_cpup((__be16 *)p);
62928 + return be16_to_cpup((const __be16 *)p);
62929 }
62930
62931 static inline u32 get_unaligned_be32(const void *p)
62932 {
62933 - return be32_to_cpup((__be32 *)p);
62934 + return be32_to_cpup((const __be32 *)p);
62935 }
62936
62937 static inline u64 get_unaligned_be64(const void *p)
62938 {
62939 - return be64_to_cpup((__be64 *)p);
62940 + return be64_to_cpup((const __be64 *)p);
62941 }
62942
62943 static inline void put_unaligned_le16(u16 val, void *p)
62944 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
62945 index e5a40c3..20ab0f6 100644
62946 --- a/include/linux/usb/renesas_usbhs.h
62947 +++ b/include/linux/usb/renesas_usbhs.h
62948 @@ -39,7 +39,7 @@ enum {
62949 */
62950 struct renesas_usbhs_driver_callback {
62951 int (*notify_hotplug)(struct platform_device *pdev);
62952 -};
62953 +} __no_const;
62954
62955 /*
62956 * callback functions for platform
62957 @@ -89,7 +89,7 @@ struct renesas_usbhs_platform_callback {
62958 * VBUS control is needed for Host
62959 */
62960 int (*set_vbus)(struct platform_device *pdev, int enable);
62961 -};
62962 +} __no_const;
62963
62964 /*
62965 * parameters for renesas usbhs
62966 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
62967 index 6f8fbcf..8259001 100644
62968 --- a/include/linux/vermagic.h
62969 +++ b/include/linux/vermagic.h
62970 @@ -25,9 +25,35 @@
62971 #define MODULE_ARCH_VERMAGIC ""
62972 #endif
62973
62974 +#ifdef CONFIG_PAX_REFCOUNT
62975 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
62976 +#else
62977 +#define MODULE_PAX_REFCOUNT ""
62978 +#endif
62979 +
62980 +#ifdef CONSTIFY_PLUGIN
62981 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
62982 +#else
62983 +#define MODULE_CONSTIFY_PLUGIN ""
62984 +#endif
62985 +
62986 +#ifdef STACKLEAK_PLUGIN
62987 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
62988 +#else
62989 +#define MODULE_STACKLEAK_PLUGIN ""
62990 +#endif
62991 +
62992 +#ifdef CONFIG_GRKERNSEC
62993 +#define MODULE_GRSEC "GRSEC "
62994 +#else
62995 +#define MODULE_GRSEC ""
62996 +#endif
62997 +
62998 #define VERMAGIC_STRING \
62999 UTS_RELEASE " " \
63000 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
63001 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
63002 - MODULE_ARCH_VERMAGIC
63003 + MODULE_ARCH_VERMAGIC \
63004 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
63005 + MODULE_GRSEC
63006
63007 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
63008 index 4bde182..aec92c1 100644
63009 --- a/include/linux/vmalloc.h
63010 +++ b/include/linux/vmalloc.h
63011 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
63012 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
63013 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
63014 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
63015 +
63016 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
63017 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
63018 +#endif
63019 +
63020 /* bits [20..32] reserved for arch specific ioremap internals */
63021
63022 /*
63023 @@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
63024 # endif
63025 #endif
63026
63027 +#define vmalloc(x) \
63028 +({ \
63029 + void *___retval; \
63030 + intoverflow_t ___x = (intoverflow_t)x; \
63031 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
63032 + ___retval = NULL; \
63033 + else \
63034 + ___retval = vmalloc((unsigned long)___x); \
63035 + ___retval; \
63036 +})
63037 +
63038 +#define vzalloc(x) \
63039 +({ \
63040 + void *___retval; \
63041 + intoverflow_t ___x = (intoverflow_t)x; \
63042 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
63043 + ___retval = NULL; \
63044 + else \
63045 + ___retval = vzalloc((unsigned long)___x); \
63046 + ___retval; \
63047 +})
63048 +
63049 +#define __vmalloc(x, y, z) \
63050 +({ \
63051 + void *___retval; \
63052 + intoverflow_t ___x = (intoverflow_t)x; \
63053 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
63054 + ___retval = NULL; \
63055 + else \
63056 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
63057 + ___retval; \
63058 +})
63059 +
63060 +#define vmalloc_user(x) \
63061 +({ \
63062 + void *___retval; \
63063 + intoverflow_t ___x = (intoverflow_t)x; \
63064 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
63065 + ___retval = NULL; \
63066 + else \
63067 + ___retval = vmalloc_user((unsigned long)___x); \
63068 + ___retval; \
63069 +})
63070 +
63071 +#define vmalloc_exec(x) \
63072 +({ \
63073 + void *___retval; \
63074 + intoverflow_t ___x = (intoverflow_t)x; \
63075 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
63076 + ___retval = NULL; \
63077 + else \
63078 + ___retval = vmalloc_exec((unsigned long)___x); \
63079 + ___retval; \
63080 +})
63081 +
63082 +#define vmalloc_node(x, y) \
63083 +({ \
63084 + void *___retval; \
63085 + intoverflow_t ___x = (intoverflow_t)x; \
63086 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
63087 + ___retval = NULL; \
63088 + else \
63089 + ___retval = vmalloc_node((unsigned long)___x, (y));\
63090 + ___retval; \
63091 +})
63092 +
63093 +#define vzalloc_node(x, y) \
63094 +({ \
63095 + void *___retval; \
63096 + intoverflow_t ___x = (intoverflow_t)x; \
63097 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
63098 + ___retval = NULL; \
63099 + else \
63100 + ___retval = vzalloc_node((unsigned long)___x, (y));\
63101 + ___retval; \
63102 +})
63103 +
63104 +#define vmalloc_32(x) \
63105 +({ \
63106 + void *___retval; \
63107 + intoverflow_t ___x = (intoverflow_t)x; \
63108 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
63109 + ___retval = NULL; \
63110 + else \
63111 + ___retval = vmalloc_32((unsigned long)___x); \
63112 + ___retval; \
63113 +})
63114 +
63115 +#define vmalloc_32_user(x) \
63116 +({ \
63117 +void *___retval; \
63118 + intoverflow_t ___x = (intoverflow_t)x; \
63119 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
63120 + ___retval = NULL; \
63121 + else \
63122 + ___retval = vmalloc_32_user((unsigned long)___x);\
63123 + ___retval; \
63124 +})
63125 +
63126 #endif /* _LINUX_VMALLOC_H */
63127 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
63128 index 65efb92..137adbb 100644
63129 --- a/include/linux/vmstat.h
63130 +++ b/include/linux/vmstat.h
63131 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
63132 /*
63133 * Zone based page accounting with per cpu differentials.
63134 */
63135 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63136 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63137
63138 static inline void zone_page_state_add(long x, struct zone *zone,
63139 enum zone_stat_item item)
63140 {
63141 - atomic_long_add(x, &zone->vm_stat[item]);
63142 - atomic_long_add(x, &vm_stat[item]);
63143 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
63144 + atomic_long_add_unchecked(x, &vm_stat[item]);
63145 }
63146
63147 static inline unsigned long global_page_state(enum zone_stat_item item)
63148 {
63149 - long x = atomic_long_read(&vm_stat[item]);
63150 + long x = atomic_long_read_unchecked(&vm_stat[item]);
63151 #ifdef CONFIG_SMP
63152 if (x < 0)
63153 x = 0;
63154 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
63155 static inline unsigned long zone_page_state(struct zone *zone,
63156 enum zone_stat_item item)
63157 {
63158 - long x = atomic_long_read(&zone->vm_stat[item]);
63159 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63160 #ifdef CONFIG_SMP
63161 if (x < 0)
63162 x = 0;
63163 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
63164 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
63165 enum zone_stat_item item)
63166 {
63167 - long x = atomic_long_read(&zone->vm_stat[item]);
63168 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63169
63170 #ifdef CONFIG_SMP
63171 int cpu;
63172 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
63173
63174 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
63175 {
63176 - atomic_long_inc(&zone->vm_stat[item]);
63177 - atomic_long_inc(&vm_stat[item]);
63178 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
63179 + atomic_long_inc_unchecked(&vm_stat[item]);
63180 }
63181
63182 static inline void __inc_zone_page_state(struct page *page,
63183 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
63184
63185 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
63186 {
63187 - atomic_long_dec(&zone->vm_stat[item]);
63188 - atomic_long_dec(&vm_stat[item]);
63189 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
63190 + atomic_long_dec_unchecked(&vm_stat[item]);
63191 }
63192
63193 static inline void __dec_zone_page_state(struct page *page,
63194 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
63195 index e5d1220..ef6e406 100644
63196 --- a/include/linux/xattr.h
63197 +++ b/include/linux/xattr.h
63198 @@ -57,6 +57,11 @@
63199 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
63200 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
63201
63202 +/* User namespace */
63203 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
63204 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
63205 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
63206 +
63207 #ifdef __KERNEL__
63208
63209 #include <linux/types.h>
63210 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
63211 index 4aeff96..b378cdc 100644
63212 --- a/include/media/saa7146_vv.h
63213 +++ b/include/media/saa7146_vv.h
63214 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
63215 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
63216
63217 /* the extension can override this */
63218 - struct v4l2_ioctl_ops ops;
63219 + v4l2_ioctl_ops_no_const ops;
63220 /* pointer to the saa7146 core ops */
63221 const struct v4l2_ioctl_ops *core_ops;
63222
63223 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
63224 index c7c40f1..4f01585 100644
63225 --- a/include/media/v4l2-dev.h
63226 +++ b/include/media/v4l2-dev.h
63227 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
63228
63229
63230 struct v4l2_file_operations {
63231 - struct module *owner;
63232 + struct module * const owner;
63233 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
63234 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
63235 unsigned int (*poll) (struct file *, struct poll_table_struct *);
63236 @@ -68,6 +68,7 @@ struct v4l2_file_operations {
63237 int (*open) (struct file *);
63238 int (*release) (struct file *);
63239 };
63240 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
63241
63242 /*
63243 * Newer version of video_device, handled by videodev2.c
63244 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
63245 index 4d1c74a..65e1221 100644
63246 --- a/include/media/v4l2-ioctl.h
63247 +++ b/include/media/v4l2-ioctl.h
63248 @@ -274,7 +274,7 @@ struct v4l2_ioctl_ops {
63249 long (*vidioc_default) (struct file *file, void *fh,
63250 bool valid_prio, int cmd, void *arg);
63251 };
63252 -
63253 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
63254
63255 /* v4l debugging and diagnostics */
63256
63257 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
63258 index 8d55251..dfe5b0a 100644
63259 --- a/include/net/caif/caif_hsi.h
63260 +++ b/include/net/caif/caif_hsi.h
63261 @@ -98,7 +98,7 @@ struct cfhsi_drv {
63262 void (*rx_done_cb) (struct cfhsi_drv *drv);
63263 void (*wake_up_cb) (struct cfhsi_drv *drv);
63264 void (*wake_down_cb) (struct cfhsi_drv *drv);
63265 -};
63266 +} __no_const;
63267
63268 /* Structure implemented by HSI device. */
63269 struct cfhsi_dev {
63270 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
63271 index 9e5425b..8136ffc 100644
63272 --- a/include/net/caif/cfctrl.h
63273 +++ b/include/net/caif/cfctrl.h
63274 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
63275 void (*radioset_rsp)(void);
63276 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
63277 struct cflayer *client_layer);
63278 -};
63279 +} __no_const;
63280
63281 /* Link Setup Parameters for CAIF-Links. */
63282 struct cfctrl_link_param {
63283 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
63284 struct cfctrl {
63285 struct cfsrvl serv;
63286 struct cfctrl_rsp res;
63287 - atomic_t req_seq_no;
63288 - atomic_t rsp_seq_no;
63289 + atomic_unchecked_t req_seq_no;
63290 + atomic_unchecked_t rsp_seq_no;
63291 struct list_head list;
63292 /* Protects from simultaneous access to first_req list */
63293 spinlock_t info_list_lock;
63294 diff --git a/include/net/flow.h b/include/net/flow.h
63295 index 2a7eefd..3250f3b 100644
63296 --- a/include/net/flow.h
63297 +++ b/include/net/flow.h
63298 @@ -218,6 +218,6 @@ extern struct flow_cache_object *flow_cache_lookup(
63299
63300 extern void flow_cache_flush(void);
63301 extern void flow_cache_flush_deferred(void);
63302 -extern atomic_t flow_cache_genid;
63303 +extern atomic_unchecked_t flow_cache_genid;
63304
63305 #endif
63306 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
63307 index e9ff3fc..9d3e5c7 100644
63308 --- a/include/net/inetpeer.h
63309 +++ b/include/net/inetpeer.h
63310 @@ -48,8 +48,8 @@ struct inet_peer {
63311 */
63312 union {
63313 struct {
63314 - atomic_t rid; /* Frag reception counter */
63315 - atomic_t ip_id_count; /* IP ID for the next packet */
63316 + atomic_unchecked_t rid; /* Frag reception counter */
63317 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
63318 __u32 tcp_ts;
63319 __u32 tcp_ts_stamp;
63320 };
63321 @@ -113,11 +113,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
63322 more++;
63323 inet_peer_refcheck(p);
63324 do {
63325 - old = atomic_read(&p->ip_id_count);
63326 + old = atomic_read_unchecked(&p->ip_id_count);
63327 new = old + more;
63328 if (!new)
63329 new = 1;
63330 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
63331 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
63332 return new;
63333 }
63334
63335 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
63336 index 10422ef..662570f 100644
63337 --- a/include/net/ip_fib.h
63338 +++ b/include/net/ip_fib.h
63339 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
63340
63341 #define FIB_RES_SADDR(net, res) \
63342 ((FIB_RES_NH(res).nh_saddr_genid == \
63343 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
63344 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
63345 FIB_RES_NH(res).nh_saddr : \
63346 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
63347 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
63348 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
63349 index e5a7b9a..f4fc44b 100644
63350 --- a/include/net/ip_vs.h
63351 +++ b/include/net/ip_vs.h
63352 @@ -509,7 +509,7 @@ struct ip_vs_conn {
63353 struct ip_vs_conn *control; /* Master control connection */
63354 atomic_t n_control; /* Number of controlled ones */
63355 struct ip_vs_dest *dest; /* real server */
63356 - atomic_t in_pkts; /* incoming packet counter */
63357 + atomic_unchecked_t in_pkts; /* incoming packet counter */
63358
63359 /* packet transmitter for different forwarding methods. If it
63360 mangles the packet, it must return NF_DROP or better NF_STOLEN,
63361 @@ -647,7 +647,7 @@ struct ip_vs_dest {
63362 __be16 port; /* port number of the server */
63363 union nf_inet_addr addr; /* IP address of the server */
63364 volatile unsigned flags; /* dest status flags */
63365 - atomic_t conn_flags; /* flags to copy to conn */
63366 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
63367 atomic_t weight; /* server weight */
63368
63369 atomic_t refcnt; /* reference counter */
63370 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
63371 index 69b610a..fe3962c 100644
63372 --- a/include/net/irda/ircomm_core.h
63373 +++ b/include/net/irda/ircomm_core.h
63374 @@ -51,7 +51,7 @@ typedef struct {
63375 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
63376 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
63377 struct ircomm_info *);
63378 -} call_t;
63379 +} __no_const call_t;
63380
63381 struct ircomm_cb {
63382 irda_queue_t queue;
63383 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
63384 index 59ba38bc..d515662 100644
63385 --- a/include/net/irda/ircomm_tty.h
63386 +++ b/include/net/irda/ircomm_tty.h
63387 @@ -35,6 +35,7 @@
63388 #include <linux/termios.h>
63389 #include <linux/timer.h>
63390 #include <linux/tty.h> /* struct tty_struct */
63391 +#include <asm/local.h>
63392
63393 #include <net/irda/irias_object.h>
63394 #include <net/irda/ircomm_core.h>
63395 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
63396 unsigned short close_delay;
63397 unsigned short closing_wait; /* time to wait before closing */
63398
63399 - int open_count;
63400 - int blocked_open; /* # of blocked opens */
63401 + local_t open_count;
63402 + local_t blocked_open; /* # of blocked opens */
63403
63404 /* Protect concurent access to :
63405 * o self->open_count
63406 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
63407 index f2419cf..473679f 100644
63408 --- a/include/net/iucv/af_iucv.h
63409 +++ b/include/net/iucv/af_iucv.h
63410 @@ -139,7 +139,7 @@ struct iucv_sock {
63411 struct iucv_sock_list {
63412 struct hlist_head head;
63413 rwlock_t lock;
63414 - atomic_t autobind_name;
63415 + atomic_unchecked_t autobind_name;
63416 };
63417
63418 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
63419 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
63420 index 2720884..3aa5c25 100644
63421 --- a/include/net/neighbour.h
63422 +++ b/include/net/neighbour.h
63423 @@ -122,7 +122,7 @@ struct neigh_ops {
63424 void (*error_report)(struct neighbour *, struct sk_buff *);
63425 int (*output)(struct neighbour *, struct sk_buff *);
63426 int (*connected_output)(struct neighbour *, struct sk_buff *);
63427 -};
63428 +} __do_const;
63429
63430 struct pneigh_entry {
63431 struct pneigh_entry *next;
63432 diff --git a/include/net/netlink.h b/include/net/netlink.h
63433 index cb1f350..3279d2c 100644
63434 --- a/include/net/netlink.h
63435 +++ b/include/net/netlink.h
63436 @@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
63437 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
63438 {
63439 if (mark)
63440 - skb_trim(skb, (unsigned char *) mark - skb->data);
63441 + skb_trim(skb, (const unsigned char *) mark - skb->data);
63442 }
63443
63444 /**
63445 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
63446 index d786b4f..4c3dd41 100644
63447 --- a/include/net/netns/ipv4.h
63448 +++ b/include/net/netns/ipv4.h
63449 @@ -56,8 +56,8 @@ struct netns_ipv4 {
63450
63451 unsigned int sysctl_ping_group_range[2];
63452
63453 - atomic_t rt_genid;
63454 - atomic_t dev_addr_genid;
63455 + atomic_unchecked_t rt_genid;
63456 + atomic_unchecked_t dev_addr_genid;
63457
63458 #ifdef CONFIG_IP_MROUTE
63459 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
63460 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
63461 index 6a72a58..e6a127d 100644
63462 --- a/include/net/sctp/sctp.h
63463 +++ b/include/net/sctp/sctp.h
63464 @@ -318,9 +318,9 @@ do { \
63465
63466 #else /* SCTP_DEBUG */
63467
63468 -#define SCTP_DEBUG_PRINTK(whatever...)
63469 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
63470 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
63471 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
63472 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
63473 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
63474 #define SCTP_ENABLE_DEBUG
63475 #define SCTP_DISABLE_DEBUG
63476 #define SCTP_ASSERT(expr, str, func)
63477 diff --git a/include/net/sock.h b/include/net/sock.h
63478 index 32e3937..87a1dbc 100644
63479 --- a/include/net/sock.h
63480 +++ b/include/net/sock.h
63481 @@ -277,7 +277,7 @@ struct sock {
63482 #ifdef CONFIG_RPS
63483 __u32 sk_rxhash;
63484 #endif
63485 - atomic_t sk_drops;
63486 + atomic_unchecked_t sk_drops;
63487 int sk_rcvbuf;
63488
63489 struct sk_filter __rcu *sk_filter;
63490 @@ -1402,7 +1402,7 @@ static inline void sk_nocaps_add(struct sock *sk, int flags)
63491 }
63492
63493 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
63494 - char __user *from, char *to,
63495 + char __user *from, unsigned char *to,
63496 int copy, int offset)
63497 {
63498 if (skb->ip_summed == CHECKSUM_NONE) {
63499 diff --git a/include/net/tcp.h b/include/net/tcp.h
63500 index bb18c4d..bb87972 100644
63501 --- a/include/net/tcp.h
63502 +++ b/include/net/tcp.h
63503 @@ -1409,7 +1409,7 @@ struct tcp_seq_afinfo {
63504 char *name;
63505 sa_family_t family;
63506 const struct file_operations *seq_fops;
63507 - struct seq_operations seq_ops;
63508 + seq_operations_no_const seq_ops;
63509 };
63510
63511 struct tcp_iter_state {
63512 diff --git a/include/net/udp.h b/include/net/udp.h
63513 index 3b285f4..0219639 100644
63514 --- a/include/net/udp.h
63515 +++ b/include/net/udp.h
63516 @@ -237,7 +237,7 @@ struct udp_seq_afinfo {
63517 sa_family_t family;
63518 struct udp_table *udp_table;
63519 const struct file_operations *seq_fops;
63520 - struct seq_operations seq_ops;
63521 + seq_operations_no_const seq_ops;
63522 };
63523
63524 struct udp_iter_state {
63525 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
63526 index b203e14..1df3991 100644
63527 --- a/include/net/xfrm.h
63528 +++ b/include/net/xfrm.h
63529 @@ -505,7 +505,7 @@ struct xfrm_policy {
63530 struct timer_list timer;
63531
63532 struct flow_cache_object flo;
63533 - atomic_t genid;
63534 + atomic_unchecked_t genid;
63535 u32 priority;
63536 u32 index;
63537 struct xfrm_mark mark;
63538 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
63539 index 1a046b1..ee0bef0 100644
63540 --- a/include/rdma/iw_cm.h
63541 +++ b/include/rdma/iw_cm.h
63542 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
63543 int backlog);
63544
63545 int (*destroy_listen)(struct iw_cm_id *cm_id);
63546 -};
63547 +} __no_const;
63548
63549 /**
63550 * iw_create_cm_id - Create an IW CM identifier.
63551 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
63552 index 5d1a758..1dbf795 100644
63553 --- a/include/scsi/libfc.h
63554 +++ b/include/scsi/libfc.h
63555 @@ -748,6 +748,7 @@ struct libfc_function_template {
63556 */
63557 void (*disc_stop_final) (struct fc_lport *);
63558 };
63559 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
63560
63561 /**
63562 * struct fc_disc - Discovery context
63563 @@ -851,7 +852,7 @@ struct fc_lport {
63564 struct fc_vport *vport;
63565
63566 /* Operational Information */
63567 - struct libfc_function_template tt;
63568 + libfc_function_template_no_const tt;
63569 u8 link_up;
63570 u8 qfull;
63571 enum fc_lport_state state;
63572 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
63573 index 5591ed5..13eb457 100644
63574 --- a/include/scsi/scsi_device.h
63575 +++ b/include/scsi/scsi_device.h
63576 @@ -161,9 +161,9 @@ struct scsi_device {
63577 unsigned int max_device_blocked; /* what device_blocked counts down from */
63578 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
63579
63580 - atomic_t iorequest_cnt;
63581 - atomic_t iodone_cnt;
63582 - atomic_t ioerr_cnt;
63583 + atomic_unchecked_t iorequest_cnt;
63584 + atomic_unchecked_t iodone_cnt;
63585 + atomic_unchecked_t ioerr_cnt;
63586
63587 struct device sdev_gendev,
63588 sdev_dev;
63589 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
63590 index 2a65167..91e01f8 100644
63591 --- a/include/scsi/scsi_transport_fc.h
63592 +++ b/include/scsi/scsi_transport_fc.h
63593 @@ -711,7 +711,7 @@ struct fc_function_template {
63594 unsigned long show_host_system_hostname:1;
63595
63596 unsigned long disable_target_scan:1;
63597 -};
63598 +} __do_const;
63599
63600
63601 /**
63602 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
63603 index 030b87c..98a6954 100644
63604 --- a/include/sound/ak4xxx-adda.h
63605 +++ b/include/sound/ak4xxx-adda.h
63606 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
63607 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
63608 unsigned char val);
63609 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
63610 -};
63611 +} __no_const;
63612
63613 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
63614
63615 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
63616 index 8c05e47..2b5df97 100644
63617 --- a/include/sound/hwdep.h
63618 +++ b/include/sound/hwdep.h
63619 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
63620 struct snd_hwdep_dsp_status *status);
63621 int (*dsp_load)(struct snd_hwdep *hw,
63622 struct snd_hwdep_dsp_image *image);
63623 -};
63624 +} __no_const;
63625
63626 struct snd_hwdep {
63627 struct snd_card *card;
63628 diff --git a/include/sound/info.h b/include/sound/info.h
63629 index 5492cc4..1a65278 100644
63630 --- a/include/sound/info.h
63631 +++ b/include/sound/info.h
63632 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
63633 struct snd_info_buffer *buffer);
63634 void (*write)(struct snd_info_entry *entry,
63635 struct snd_info_buffer *buffer);
63636 -};
63637 +} __no_const;
63638
63639 struct snd_info_entry_ops {
63640 int (*open)(struct snd_info_entry *entry,
63641 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
63642 index 0cf91b2..b70cae4 100644
63643 --- a/include/sound/pcm.h
63644 +++ b/include/sound/pcm.h
63645 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
63646 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
63647 int (*ack)(struct snd_pcm_substream *substream);
63648 };
63649 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
63650
63651 /*
63652 *
63653 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
63654 index af1b49e..a5d55a5 100644
63655 --- a/include/sound/sb16_csp.h
63656 +++ b/include/sound/sb16_csp.h
63657 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
63658 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
63659 int (*csp_stop) (struct snd_sb_csp * p);
63660 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
63661 -};
63662 +} __no_const;
63663
63664 /*
63665 * CSP private data
63666 diff --git a/include/sound/soc.h b/include/sound/soc.h
63667 index 11cfb59..e3f93f4 100644
63668 --- a/include/sound/soc.h
63669 +++ b/include/sound/soc.h
63670 @@ -683,7 +683,7 @@ struct snd_soc_platform_driver {
63671 /* platform IO - used for platform DAPM */
63672 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
63673 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
63674 -};
63675 +} __do_const;
63676
63677 struct snd_soc_platform {
63678 const char *name;
63679 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
63680 index 444cd6b..3327cc5 100644
63681 --- a/include/sound/ymfpci.h
63682 +++ b/include/sound/ymfpci.h
63683 @@ -358,7 +358,7 @@ struct snd_ymfpci {
63684 spinlock_t reg_lock;
63685 spinlock_t voice_lock;
63686 wait_queue_head_t interrupt_sleep;
63687 - atomic_t interrupt_sleep_count;
63688 + atomic_unchecked_t interrupt_sleep_count;
63689 struct snd_info_entry *proc_entry;
63690 const struct firmware *dsp_microcode;
63691 const struct firmware *controller_microcode;
63692 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
63693 index 94bbec3..3a8c6b0 100644
63694 --- a/include/target/target_core_base.h
63695 +++ b/include/target/target_core_base.h
63696 @@ -346,7 +346,7 @@ struct t10_reservation_ops {
63697 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
63698 int (*t10_pr_register)(struct se_cmd *);
63699 int (*t10_pr_clear)(struct se_cmd *);
63700 -};
63701 +} __no_const;
63702
63703 struct t10_reservation {
63704 /* Reservation effects all target ports */
63705 @@ -465,8 +465,8 @@ struct se_cmd {
63706 atomic_t t_se_count;
63707 atomic_t t_task_cdbs_left;
63708 atomic_t t_task_cdbs_ex_left;
63709 - atomic_t t_task_cdbs_sent;
63710 - atomic_t t_transport_aborted;
63711 + atomic_unchecked_t t_task_cdbs_sent;
63712 + atomic_unchecked_t t_transport_aborted;
63713 atomic_t t_transport_active;
63714 atomic_t t_transport_complete;
63715 atomic_t t_transport_queue_active;
63716 @@ -705,7 +705,7 @@ struct se_device {
63717 /* Active commands on this virtual SE device */
63718 atomic_t simple_cmds;
63719 atomic_t depth_left;
63720 - atomic_t dev_ordered_id;
63721 + atomic_unchecked_t dev_ordered_id;
63722 atomic_t execute_tasks;
63723 atomic_t dev_ordered_sync;
63724 atomic_t dev_qf_count;
63725 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
63726 index 1c09820..7f5ec79 100644
63727 --- a/include/trace/events/irq.h
63728 +++ b/include/trace/events/irq.h
63729 @@ -36,7 +36,7 @@ struct softirq_action;
63730 */
63731 TRACE_EVENT(irq_handler_entry,
63732
63733 - TP_PROTO(int irq, struct irqaction *action),
63734 + TP_PROTO(int irq, const struct irqaction *action),
63735
63736 TP_ARGS(irq, action),
63737
63738 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
63739 */
63740 TRACE_EVENT(irq_handler_exit,
63741
63742 - TP_PROTO(int irq, struct irqaction *action, int ret),
63743 + TP_PROTO(int irq, const struct irqaction *action, int ret),
63744
63745 TP_ARGS(irq, action, ret),
63746
63747 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
63748 index c41f308..6918de3 100644
63749 --- a/include/video/udlfb.h
63750 +++ b/include/video/udlfb.h
63751 @@ -52,10 +52,10 @@ struct dlfb_data {
63752 u32 pseudo_palette[256];
63753 int blank_mode; /*one of FB_BLANK_ */
63754 /* blit-only rendering path metrics, exposed through sysfs */
63755 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
63756 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
63757 - atomic_t bytes_sent; /* to usb, after compression including overhead */
63758 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
63759 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
63760 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
63761 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
63762 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
63763 };
63764
63765 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
63766 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
63767 index 0993a22..32ba2fe 100644
63768 --- a/include/video/uvesafb.h
63769 +++ b/include/video/uvesafb.h
63770 @@ -177,6 +177,7 @@ struct uvesafb_par {
63771 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
63772 u8 pmi_setpal; /* PMI for palette changes */
63773 u16 *pmi_base; /* protected mode interface location */
63774 + u8 *pmi_code; /* protected mode code location */
63775 void *pmi_start;
63776 void *pmi_pal;
63777 u8 *vbe_state_orig; /*
63778 diff --git a/init/Kconfig b/init/Kconfig
63779 index 43298f9..2f56c12 100644
63780 --- a/init/Kconfig
63781 +++ b/init/Kconfig
63782 @@ -1214,7 +1214,7 @@ config SLUB_DEBUG
63783
63784 config COMPAT_BRK
63785 bool "Disable heap randomization"
63786 - default y
63787 + default n
63788 help
63789 Randomizing heap placement makes heap exploits harder, but it
63790 also breaks ancient binaries (including anything libc5 based).
63791 diff --git a/init/do_mounts.c b/init/do_mounts.c
63792 index db6e5ee..7677ff7 100644
63793 --- a/init/do_mounts.c
63794 +++ b/init/do_mounts.c
63795 @@ -325,11 +325,11 @@ static void __init get_fs_names(char *page)
63796
63797 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
63798 {
63799 - int err = sys_mount(name, "/root", fs, flags, data);
63800 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
63801 if (err)
63802 return err;
63803
63804 - sys_chdir((const char __user __force *)"/root");
63805 + sys_chdir((const char __force_user*)"/root");
63806 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
63807 printk(KERN_INFO
63808 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
63809 @@ -448,18 +448,18 @@ void __init change_floppy(char *fmt, ...)
63810 va_start(args, fmt);
63811 vsprintf(buf, fmt, args);
63812 va_end(args);
63813 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
63814 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
63815 if (fd >= 0) {
63816 sys_ioctl(fd, FDEJECT, 0);
63817 sys_close(fd);
63818 }
63819 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
63820 - fd = sys_open("/dev/console", O_RDWR, 0);
63821 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
63822 if (fd >= 0) {
63823 sys_ioctl(fd, TCGETS, (long)&termios);
63824 termios.c_lflag &= ~ICANON;
63825 sys_ioctl(fd, TCSETSF, (long)&termios);
63826 - sys_read(fd, &c, 1);
63827 + sys_read(fd, (char __user *)&c, 1);
63828 termios.c_lflag |= ICANON;
63829 sys_ioctl(fd, TCSETSF, (long)&termios);
63830 sys_close(fd);
63831 @@ -553,6 +553,6 @@ void __init prepare_namespace(void)
63832 mount_root();
63833 out:
63834 devtmpfs_mount("dev");
63835 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
63836 - sys_chroot((const char __user __force *)".");
63837 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
63838 + sys_chroot((const char __force_user *)".");
63839 }
63840 diff --git a/init/do_mounts.h b/init/do_mounts.h
63841 index f5b978a..69dbfe8 100644
63842 --- a/init/do_mounts.h
63843 +++ b/init/do_mounts.h
63844 @@ -15,15 +15,15 @@ extern int root_mountflags;
63845
63846 static inline int create_dev(char *name, dev_t dev)
63847 {
63848 - sys_unlink(name);
63849 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
63850 + sys_unlink((char __force_user *)name);
63851 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
63852 }
63853
63854 #if BITS_PER_LONG == 32
63855 static inline u32 bstat(char *name)
63856 {
63857 struct stat64 stat;
63858 - if (sys_stat64(name, &stat) != 0)
63859 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
63860 return 0;
63861 if (!S_ISBLK(stat.st_mode))
63862 return 0;
63863 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
63864 static inline u32 bstat(char *name)
63865 {
63866 struct stat stat;
63867 - if (sys_newstat(name, &stat) != 0)
63868 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
63869 return 0;
63870 if (!S_ISBLK(stat.st_mode))
63871 return 0;
63872 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
63873 index 3098a38..253064e 100644
63874 --- a/init/do_mounts_initrd.c
63875 +++ b/init/do_mounts_initrd.c
63876 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
63877 create_dev("/dev/root.old", Root_RAM0);
63878 /* mount initrd on rootfs' /root */
63879 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
63880 - sys_mkdir("/old", 0700);
63881 - root_fd = sys_open("/", 0, 0);
63882 - old_fd = sys_open("/old", 0, 0);
63883 + sys_mkdir((const char __force_user *)"/old", 0700);
63884 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
63885 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
63886 /* move initrd over / and chdir/chroot in initrd root */
63887 - sys_chdir("/root");
63888 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
63889 - sys_chroot(".");
63890 + sys_chdir((const char __force_user *)"/root");
63891 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
63892 + sys_chroot((const char __force_user *)".");
63893
63894 /*
63895 * In case that a resume from disk is carried out by linuxrc or one of
63896 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
63897
63898 /* move initrd to rootfs' /old */
63899 sys_fchdir(old_fd);
63900 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
63901 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
63902 /* switch root and cwd back to / of rootfs */
63903 sys_fchdir(root_fd);
63904 - sys_chroot(".");
63905 + sys_chroot((const char __force_user *)".");
63906 sys_close(old_fd);
63907 sys_close(root_fd);
63908
63909 if (new_decode_dev(real_root_dev) == Root_RAM0) {
63910 - sys_chdir("/old");
63911 + sys_chdir((const char __force_user *)"/old");
63912 return;
63913 }
63914
63915 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
63916 mount_root();
63917
63918 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
63919 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
63920 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
63921 if (!error)
63922 printk("okay\n");
63923 else {
63924 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
63925 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
63926 if (error == -ENOENT)
63927 printk("/initrd does not exist. Ignored.\n");
63928 else
63929 printk("failed\n");
63930 printk(KERN_NOTICE "Unmounting old root\n");
63931 - sys_umount("/old", MNT_DETACH);
63932 + sys_umount((char __force_user *)"/old", MNT_DETACH);
63933 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
63934 if (fd < 0) {
63935 error = fd;
63936 @@ -116,11 +116,11 @@ int __init initrd_load(void)
63937 * mounted in the normal path.
63938 */
63939 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
63940 - sys_unlink("/initrd.image");
63941 + sys_unlink((const char __force_user *)"/initrd.image");
63942 handle_initrd();
63943 return 1;
63944 }
63945 }
63946 - sys_unlink("/initrd.image");
63947 + sys_unlink((const char __force_user *)"/initrd.image");
63948 return 0;
63949 }
63950 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
63951 index 32c4799..c27ee74 100644
63952 --- a/init/do_mounts_md.c
63953 +++ b/init/do_mounts_md.c
63954 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
63955 partitioned ? "_d" : "", minor,
63956 md_setup_args[ent].device_names);
63957
63958 - fd = sys_open(name, 0, 0);
63959 + fd = sys_open((char __force_user *)name, 0, 0);
63960 if (fd < 0) {
63961 printk(KERN_ERR "md: open failed - cannot start "
63962 "array %s\n", name);
63963 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
63964 * array without it
63965 */
63966 sys_close(fd);
63967 - fd = sys_open(name, 0, 0);
63968 + fd = sys_open((char __force_user *)name, 0, 0);
63969 sys_ioctl(fd, BLKRRPART, 0);
63970 }
63971 sys_close(fd);
63972 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
63973
63974 wait_for_device_probe();
63975
63976 - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
63977 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
63978 if (fd >= 0) {
63979 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
63980 sys_close(fd);
63981 diff --git a/init/initramfs.c b/init/initramfs.c
63982 index 2531811..040d4d4 100644
63983 --- a/init/initramfs.c
63984 +++ b/init/initramfs.c
63985 @@ -74,7 +74,7 @@ static void __init free_hash(void)
63986 }
63987 }
63988
63989 -static long __init do_utime(char __user *filename, time_t mtime)
63990 +static long __init do_utime(__force char __user *filename, time_t mtime)
63991 {
63992 struct timespec t[2];
63993
63994 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
63995 struct dir_entry *de, *tmp;
63996 list_for_each_entry_safe(de, tmp, &dir_list, list) {
63997 list_del(&de->list);
63998 - do_utime(de->name, de->mtime);
63999 + do_utime((char __force_user *)de->name, de->mtime);
64000 kfree(de->name);
64001 kfree(de);
64002 }
64003 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
64004 if (nlink >= 2) {
64005 char *old = find_link(major, minor, ino, mode, collected);
64006 if (old)
64007 - return (sys_link(old, collected) < 0) ? -1 : 1;
64008 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
64009 }
64010 return 0;
64011 }
64012 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
64013 {
64014 struct stat st;
64015
64016 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
64017 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
64018 if (S_ISDIR(st.st_mode))
64019 - sys_rmdir(path);
64020 + sys_rmdir((char __force_user *)path);
64021 else
64022 - sys_unlink(path);
64023 + sys_unlink((char __force_user *)path);
64024 }
64025 }
64026
64027 @@ -305,7 +305,7 @@ static int __init do_name(void)
64028 int openflags = O_WRONLY|O_CREAT;
64029 if (ml != 1)
64030 openflags |= O_TRUNC;
64031 - wfd = sys_open(collected, openflags, mode);
64032 + wfd = sys_open((char __force_user *)collected, openflags, mode);
64033
64034 if (wfd >= 0) {
64035 sys_fchown(wfd, uid, gid);
64036 @@ -317,17 +317,17 @@ static int __init do_name(void)
64037 }
64038 }
64039 } else if (S_ISDIR(mode)) {
64040 - sys_mkdir(collected, mode);
64041 - sys_chown(collected, uid, gid);
64042 - sys_chmod(collected, mode);
64043 + sys_mkdir((char __force_user *)collected, mode);
64044 + sys_chown((char __force_user *)collected, uid, gid);
64045 + sys_chmod((char __force_user *)collected, mode);
64046 dir_add(collected, mtime);
64047 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
64048 S_ISFIFO(mode) || S_ISSOCK(mode)) {
64049 if (maybe_link() == 0) {
64050 - sys_mknod(collected, mode, rdev);
64051 - sys_chown(collected, uid, gid);
64052 - sys_chmod(collected, mode);
64053 - do_utime(collected, mtime);
64054 + sys_mknod((char __force_user *)collected, mode, rdev);
64055 + sys_chown((char __force_user *)collected, uid, gid);
64056 + sys_chmod((char __force_user *)collected, mode);
64057 + do_utime((char __force_user *)collected, mtime);
64058 }
64059 }
64060 return 0;
64061 @@ -336,15 +336,15 @@ static int __init do_name(void)
64062 static int __init do_copy(void)
64063 {
64064 if (count >= body_len) {
64065 - sys_write(wfd, victim, body_len);
64066 + sys_write(wfd, (char __force_user *)victim, body_len);
64067 sys_close(wfd);
64068 - do_utime(vcollected, mtime);
64069 + do_utime((char __force_user *)vcollected, mtime);
64070 kfree(vcollected);
64071 eat(body_len);
64072 state = SkipIt;
64073 return 0;
64074 } else {
64075 - sys_write(wfd, victim, count);
64076 + sys_write(wfd, (char __force_user *)victim, count);
64077 body_len -= count;
64078 eat(count);
64079 return 1;
64080 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
64081 {
64082 collected[N_ALIGN(name_len) + body_len] = '\0';
64083 clean_path(collected, 0);
64084 - sys_symlink(collected + N_ALIGN(name_len), collected);
64085 - sys_lchown(collected, uid, gid);
64086 - do_utime(collected, mtime);
64087 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
64088 + sys_lchown((char __force_user *)collected, uid, gid);
64089 + do_utime((char __force_user *)collected, mtime);
64090 state = SkipIt;
64091 next_state = Reset;
64092 return 0;
64093 diff --git a/init/main.c b/init/main.c
64094 index 217ed23..ec5406f 100644
64095 --- a/init/main.c
64096 +++ b/init/main.c
64097 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
64098 extern void tc_init(void);
64099 #endif
64100
64101 +extern void grsecurity_init(void);
64102 +
64103 /*
64104 * Debug helper: via this flag we know that we are in 'early bootup code'
64105 * where only the boot processor is running with IRQ disabled. This means
64106 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
64107
64108 __setup("reset_devices", set_reset_devices);
64109
64110 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
64111 +extern char pax_enter_kernel_user[];
64112 +extern char pax_exit_kernel_user[];
64113 +extern pgdval_t clone_pgd_mask;
64114 +#endif
64115 +
64116 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
64117 +static int __init setup_pax_nouderef(char *str)
64118 +{
64119 +#ifdef CONFIG_X86_32
64120 + unsigned int cpu;
64121 + struct desc_struct *gdt;
64122 +
64123 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
64124 + gdt = get_cpu_gdt_table(cpu);
64125 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
64126 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
64127 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
64128 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
64129 + }
64130 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
64131 +#else
64132 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
64133 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
64134 + clone_pgd_mask = ~(pgdval_t)0UL;
64135 +#endif
64136 +
64137 + return 0;
64138 +}
64139 +early_param("pax_nouderef", setup_pax_nouderef);
64140 +#endif
64141 +
64142 +#ifdef CONFIG_PAX_SOFTMODE
64143 +int pax_softmode;
64144 +
64145 +static int __init setup_pax_softmode(char *str)
64146 +{
64147 + get_option(&str, &pax_softmode);
64148 + return 1;
64149 +}
64150 +__setup("pax_softmode=", setup_pax_softmode);
64151 +#endif
64152 +
64153 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
64154 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
64155 static const char *panic_later, *panic_param;
64156 @@ -681,6 +726,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
64157 {
64158 int count = preempt_count();
64159 int ret;
64160 + const char *msg1 = "", *msg2 = "";
64161
64162 if (initcall_debug)
64163 ret = do_one_initcall_debug(fn);
64164 @@ -693,15 +739,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
64165 sprintf(msgbuf, "error code %d ", ret);
64166
64167 if (preempt_count() != count) {
64168 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
64169 + msg1 = " preemption imbalance";
64170 preempt_count() = count;
64171 }
64172 if (irqs_disabled()) {
64173 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
64174 + msg2 = " disabled interrupts";
64175 local_irq_enable();
64176 }
64177 - if (msgbuf[0]) {
64178 - printk("initcall %pF returned with %s\n", fn, msgbuf);
64179 + if (msgbuf[0] || *msg1 || *msg2) {
64180 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
64181 }
64182
64183 return ret;
64184 @@ -820,7 +866,7 @@ static int __init kernel_init(void * unused)
64185 do_basic_setup();
64186
64187 /* Open the /dev/console on the rootfs, this should never fail */
64188 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
64189 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
64190 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
64191
64192 (void) sys_dup(0);
64193 @@ -833,11 +879,13 @@ static int __init kernel_init(void * unused)
64194 if (!ramdisk_execute_command)
64195 ramdisk_execute_command = "/init";
64196
64197 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
64198 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
64199 ramdisk_execute_command = NULL;
64200 prepare_namespace();
64201 }
64202
64203 + grsecurity_init();
64204 +
64205 /*
64206 * Ok, we have completed the initial bootup, and
64207 * we're essentially up and running. Get rid of the
64208 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
64209 index 5b4293d..f179875 100644
64210 --- a/ipc/mqueue.c
64211 +++ b/ipc/mqueue.c
64212 @@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
64213 mq_bytes = (mq_msg_tblsz +
64214 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
64215
64216 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
64217 spin_lock(&mq_lock);
64218 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
64219 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
64220 diff --git a/ipc/msg.c b/ipc/msg.c
64221 index 7385de2..a8180e08 100644
64222 --- a/ipc/msg.c
64223 +++ b/ipc/msg.c
64224 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
64225 return security_msg_queue_associate(msq, msgflg);
64226 }
64227
64228 +static struct ipc_ops msg_ops = {
64229 + .getnew = newque,
64230 + .associate = msg_security,
64231 + .more_checks = NULL
64232 +};
64233 +
64234 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
64235 {
64236 struct ipc_namespace *ns;
64237 - struct ipc_ops msg_ops;
64238 struct ipc_params msg_params;
64239
64240 ns = current->nsproxy->ipc_ns;
64241
64242 - msg_ops.getnew = newque;
64243 - msg_ops.associate = msg_security;
64244 - msg_ops.more_checks = NULL;
64245 -
64246 msg_params.key = key;
64247 msg_params.flg = msgflg;
64248
64249 diff --git a/ipc/sem.c b/ipc/sem.c
64250 index 5215a81..cfc0cac 100644
64251 --- a/ipc/sem.c
64252 +++ b/ipc/sem.c
64253 @@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
64254 return 0;
64255 }
64256
64257 +static struct ipc_ops sem_ops = {
64258 + .getnew = newary,
64259 + .associate = sem_security,
64260 + .more_checks = sem_more_checks
64261 +};
64262 +
64263 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
64264 {
64265 struct ipc_namespace *ns;
64266 - struct ipc_ops sem_ops;
64267 struct ipc_params sem_params;
64268
64269 ns = current->nsproxy->ipc_ns;
64270 @@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
64271 if (nsems < 0 || nsems > ns->sc_semmsl)
64272 return -EINVAL;
64273
64274 - sem_ops.getnew = newary;
64275 - sem_ops.associate = sem_security;
64276 - sem_ops.more_checks = sem_more_checks;
64277 -
64278 sem_params.key = key;
64279 sem_params.flg = semflg;
64280 sem_params.u.nsems = nsems;
64281 diff --git a/ipc/shm.c b/ipc/shm.c
64282 index b76be5b..859e750 100644
64283 --- a/ipc/shm.c
64284 +++ b/ipc/shm.c
64285 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
64286 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
64287 #endif
64288
64289 +#ifdef CONFIG_GRKERNSEC
64290 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64291 + const time_t shm_createtime, const uid_t cuid,
64292 + const int shmid);
64293 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64294 + const time_t shm_createtime);
64295 +#endif
64296 +
64297 void shm_init_ns(struct ipc_namespace *ns)
64298 {
64299 ns->shm_ctlmax = SHMMAX;
64300 @@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
64301 shp->shm_lprid = 0;
64302 shp->shm_atim = shp->shm_dtim = 0;
64303 shp->shm_ctim = get_seconds();
64304 +#ifdef CONFIG_GRKERNSEC
64305 + {
64306 + struct timespec timeval;
64307 + do_posix_clock_monotonic_gettime(&timeval);
64308 +
64309 + shp->shm_createtime = timeval.tv_sec;
64310 + }
64311 +#endif
64312 shp->shm_segsz = size;
64313 shp->shm_nattch = 0;
64314 shp->shm_file = file;
64315 @@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
64316 return 0;
64317 }
64318
64319 +static struct ipc_ops shm_ops = {
64320 + .getnew = newseg,
64321 + .associate = shm_security,
64322 + .more_checks = shm_more_checks
64323 +};
64324 +
64325 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
64326 {
64327 struct ipc_namespace *ns;
64328 - struct ipc_ops shm_ops;
64329 struct ipc_params shm_params;
64330
64331 ns = current->nsproxy->ipc_ns;
64332
64333 - shm_ops.getnew = newseg;
64334 - shm_ops.associate = shm_security;
64335 - shm_ops.more_checks = shm_more_checks;
64336 -
64337 shm_params.key = key;
64338 shm_params.flg = shmflg;
64339 shm_params.u.size = size;
64340 @@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
64341 f_mode = FMODE_READ | FMODE_WRITE;
64342 }
64343 if (shmflg & SHM_EXEC) {
64344 +
64345 +#ifdef CONFIG_PAX_MPROTECT
64346 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
64347 + goto out;
64348 +#endif
64349 +
64350 prot |= PROT_EXEC;
64351 acc_mode |= S_IXUGO;
64352 }
64353 @@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
64354 if (err)
64355 goto out_unlock;
64356
64357 +#ifdef CONFIG_GRKERNSEC
64358 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
64359 + shp->shm_perm.cuid, shmid) ||
64360 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
64361 + err = -EACCES;
64362 + goto out_unlock;
64363 + }
64364 +#endif
64365 +
64366 path = shp->shm_file->f_path;
64367 path_get(&path);
64368 shp->shm_nattch++;
64369 +#ifdef CONFIG_GRKERNSEC
64370 + shp->shm_lapid = current->pid;
64371 +#endif
64372 size = i_size_read(path.dentry->d_inode);
64373 shm_unlock(shp);
64374
64375 diff --git a/kernel/acct.c b/kernel/acct.c
64376 index fa7eb3d..7faf116 100644
64377 --- a/kernel/acct.c
64378 +++ b/kernel/acct.c
64379 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
64380 */
64381 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
64382 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
64383 - file->f_op->write(file, (char *)&ac,
64384 + file->f_op->write(file, (char __force_user *)&ac,
64385 sizeof(acct_t), &file->f_pos);
64386 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
64387 set_fs(fs);
64388 diff --git a/kernel/audit.c b/kernel/audit.c
64389 index 09fae26..ed71d5b 100644
64390 --- a/kernel/audit.c
64391 +++ b/kernel/audit.c
64392 @@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
64393 3) suppressed due to audit_rate_limit
64394 4) suppressed due to audit_backlog_limit
64395 */
64396 -static atomic_t audit_lost = ATOMIC_INIT(0);
64397 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
64398
64399 /* The netlink socket. */
64400 static struct sock *audit_sock;
64401 @@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
64402 unsigned long now;
64403 int print;
64404
64405 - atomic_inc(&audit_lost);
64406 + atomic_inc_unchecked(&audit_lost);
64407
64408 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
64409
64410 @@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
64411 printk(KERN_WARNING
64412 "audit: audit_lost=%d audit_rate_limit=%d "
64413 "audit_backlog_limit=%d\n",
64414 - atomic_read(&audit_lost),
64415 + atomic_read_unchecked(&audit_lost),
64416 audit_rate_limit,
64417 audit_backlog_limit);
64418 audit_panic(message);
64419 @@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
64420 status_set.pid = audit_pid;
64421 status_set.rate_limit = audit_rate_limit;
64422 status_set.backlog_limit = audit_backlog_limit;
64423 - status_set.lost = atomic_read(&audit_lost);
64424 + status_set.lost = atomic_read_unchecked(&audit_lost);
64425 status_set.backlog = skb_queue_len(&audit_skb_queue);
64426 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
64427 &status_set, sizeof(status_set));
64428 @@ -1260,12 +1260,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
64429 avail = audit_expand(ab,
64430 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
64431 if (!avail)
64432 - goto out;
64433 + goto out_va_end;
64434 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
64435 }
64436 - va_end(args2);
64437 if (len > 0)
64438 skb_put(skb, len);
64439 +out_va_end:
64440 + va_end(args2);
64441 out:
64442 return;
64443 }
64444 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
64445 index 47b7fc1..c003c33 100644
64446 --- a/kernel/auditsc.c
64447 +++ b/kernel/auditsc.c
64448 @@ -1166,8 +1166,8 @@ static void audit_log_execve_info(struct audit_context *context,
64449 struct audit_buffer **ab,
64450 struct audit_aux_data_execve *axi)
64451 {
64452 - int i;
64453 - size_t len, len_sent = 0;
64454 + int i, len;
64455 + size_t len_sent = 0;
64456 const char __user *p;
64457 char *buf;
64458
64459 @@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
64460 }
64461
64462 /* global counter which is incremented every time something logs in */
64463 -static atomic_t session_id = ATOMIC_INIT(0);
64464 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
64465
64466 /**
64467 * audit_set_loginuid - set a task's audit_context loginuid
64468 @@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
64469 */
64470 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
64471 {
64472 - unsigned int sessionid = atomic_inc_return(&session_id);
64473 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
64474 struct audit_context *context = task->audit_context;
64475
64476 if (context && context->in_syscall) {
64477 diff --git a/kernel/capability.c b/kernel/capability.c
64478 index b463871..fa3ea1f 100644
64479 --- a/kernel/capability.c
64480 +++ b/kernel/capability.c
64481 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
64482 * before modification is attempted and the application
64483 * fails.
64484 */
64485 + if (tocopy > ARRAY_SIZE(kdata))
64486 + return -EFAULT;
64487 +
64488 if (copy_to_user(dataptr, kdata, tocopy
64489 * sizeof(struct __user_cap_data_struct))) {
64490 return -EFAULT;
64491 @@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
64492 BUG();
64493 }
64494
64495 - if (security_capable(ns, current_cred(), cap) == 0) {
64496 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
64497 current->flags |= PF_SUPERPRIV;
64498 return true;
64499 }
64500 @@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *ns, int cap)
64501 }
64502 EXPORT_SYMBOL(ns_capable);
64503
64504 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
64505 +{
64506 + if (unlikely(!cap_valid(cap))) {
64507 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
64508 + BUG();
64509 + }
64510 +
64511 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
64512 + current->flags |= PF_SUPERPRIV;
64513 + return true;
64514 + }
64515 + return false;
64516 +}
64517 +EXPORT_SYMBOL(ns_capable_nolog);
64518 +
64519 +bool capable_nolog(int cap)
64520 +{
64521 + return ns_capable_nolog(&init_user_ns, cap);
64522 +}
64523 +EXPORT_SYMBOL(capable_nolog);
64524 +
64525 /**
64526 * task_ns_capable - Determine whether current task has a superior
64527 * capability targeted at a specific task's user namespace.
64528 @@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct *t, int cap)
64529 }
64530 EXPORT_SYMBOL(task_ns_capable);
64531
64532 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
64533 +{
64534 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
64535 +}
64536 +EXPORT_SYMBOL(task_ns_capable_nolog);
64537 +
64538 /**
64539 * nsown_capable - Check superior capability to one's own user_ns
64540 * @cap: The capability in question
64541 diff --git a/kernel/compat.c b/kernel/compat.c
64542 index f346ced..aa2b1f4 100644
64543 --- a/kernel/compat.c
64544 +++ b/kernel/compat.c
64545 @@ -13,6 +13,7 @@
64546
64547 #include <linux/linkage.h>
64548 #include <linux/compat.h>
64549 +#include <linux/module.h>
64550 #include <linux/errno.h>
64551 #include <linux/time.h>
64552 #include <linux/signal.h>
64553 @@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
64554 mm_segment_t oldfs;
64555 long ret;
64556
64557 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
64558 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
64559 oldfs = get_fs();
64560 set_fs(KERNEL_DS);
64561 ret = hrtimer_nanosleep_restart(restart);
64562 @@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
64563 oldfs = get_fs();
64564 set_fs(KERNEL_DS);
64565 ret = hrtimer_nanosleep(&tu,
64566 - rmtp ? (struct timespec __user *)&rmt : NULL,
64567 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
64568 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
64569 set_fs(oldfs);
64570
64571 @@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
64572 mm_segment_t old_fs = get_fs();
64573
64574 set_fs(KERNEL_DS);
64575 - ret = sys_sigpending((old_sigset_t __user *) &s);
64576 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
64577 set_fs(old_fs);
64578 if (ret == 0)
64579 ret = put_user(s, set);
64580 @@ -332,8 +333,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
64581 old_fs = get_fs();
64582 set_fs(KERNEL_DS);
64583 ret = sys_sigprocmask(how,
64584 - set ? (old_sigset_t __user *) &s : NULL,
64585 - oset ? (old_sigset_t __user *) &s : NULL);
64586 + set ? (old_sigset_t __force_user *) &s : NULL,
64587 + oset ? (old_sigset_t __force_user *) &s : NULL);
64588 set_fs(old_fs);
64589 if (ret == 0)
64590 if (oset)
64591 @@ -370,7 +371,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
64592 mm_segment_t old_fs = get_fs();
64593
64594 set_fs(KERNEL_DS);
64595 - ret = sys_old_getrlimit(resource, &r);
64596 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
64597 set_fs(old_fs);
64598
64599 if (!ret) {
64600 @@ -442,7 +443,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
64601 mm_segment_t old_fs = get_fs();
64602
64603 set_fs(KERNEL_DS);
64604 - ret = sys_getrusage(who, (struct rusage __user *) &r);
64605 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
64606 set_fs(old_fs);
64607
64608 if (ret)
64609 @@ -469,8 +470,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
64610 set_fs (KERNEL_DS);
64611 ret = sys_wait4(pid,
64612 (stat_addr ?
64613 - (unsigned int __user *) &status : NULL),
64614 - options, (struct rusage __user *) &r);
64615 + (unsigned int __force_user *) &status : NULL),
64616 + options, (struct rusage __force_user *) &r);
64617 set_fs (old_fs);
64618
64619 if (ret > 0) {
64620 @@ -495,8 +496,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
64621 memset(&info, 0, sizeof(info));
64622
64623 set_fs(KERNEL_DS);
64624 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
64625 - uru ? (struct rusage __user *)&ru : NULL);
64626 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
64627 + uru ? (struct rusage __force_user *)&ru : NULL);
64628 set_fs(old_fs);
64629
64630 if ((ret < 0) || (info.si_signo == 0))
64631 @@ -626,8 +627,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
64632 oldfs = get_fs();
64633 set_fs(KERNEL_DS);
64634 err = sys_timer_settime(timer_id, flags,
64635 - (struct itimerspec __user *) &newts,
64636 - (struct itimerspec __user *) &oldts);
64637 + (struct itimerspec __force_user *) &newts,
64638 + (struct itimerspec __force_user *) &oldts);
64639 set_fs(oldfs);
64640 if (!err && old && put_compat_itimerspec(old, &oldts))
64641 return -EFAULT;
64642 @@ -644,7 +645,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
64643 oldfs = get_fs();
64644 set_fs(KERNEL_DS);
64645 err = sys_timer_gettime(timer_id,
64646 - (struct itimerspec __user *) &ts);
64647 + (struct itimerspec __force_user *) &ts);
64648 set_fs(oldfs);
64649 if (!err && put_compat_itimerspec(setting, &ts))
64650 return -EFAULT;
64651 @@ -663,7 +664,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
64652 oldfs = get_fs();
64653 set_fs(KERNEL_DS);
64654 err = sys_clock_settime(which_clock,
64655 - (struct timespec __user *) &ts);
64656 + (struct timespec __force_user *) &ts);
64657 set_fs(oldfs);
64658 return err;
64659 }
64660 @@ -678,7 +679,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
64661 oldfs = get_fs();
64662 set_fs(KERNEL_DS);
64663 err = sys_clock_gettime(which_clock,
64664 - (struct timespec __user *) &ts);
64665 + (struct timespec __force_user *) &ts);
64666 set_fs(oldfs);
64667 if (!err && put_compat_timespec(&ts, tp))
64668 return -EFAULT;
64669 @@ -698,7 +699,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
64670
64671 oldfs = get_fs();
64672 set_fs(KERNEL_DS);
64673 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
64674 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
64675 set_fs(oldfs);
64676
64677 err = compat_put_timex(utp, &txc);
64678 @@ -718,7 +719,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
64679 oldfs = get_fs();
64680 set_fs(KERNEL_DS);
64681 err = sys_clock_getres(which_clock,
64682 - (struct timespec __user *) &ts);
64683 + (struct timespec __force_user *) &ts);
64684 set_fs(oldfs);
64685 if (!err && tp && put_compat_timespec(&ts, tp))
64686 return -EFAULT;
64687 @@ -730,9 +731,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
64688 long err;
64689 mm_segment_t oldfs;
64690 struct timespec tu;
64691 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
64692 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
64693
64694 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
64695 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
64696 oldfs = get_fs();
64697 set_fs(KERNEL_DS);
64698 err = clock_nanosleep_restart(restart);
64699 @@ -764,8 +765,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
64700 oldfs = get_fs();
64701 set_fs(KERNEL_DS);
64702 err = sys_clock_nanosleep(which_clock, flags,
64703 - (struct timespec __user *) &in,
64704 - (struct timespec __user *) &out);
64705 + (struct timespec __force_user *) &in,
64706 + (struct timespec __force_user *) &out);
64707 set_fs(oldfs);
64708
64709 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
64710 diff --git a/kernel/configs.c b/kernel/configs.c
64711 index 42e8fa0..9e7406b 100644
64712 --- a/kernel/configs.c
64713 +++ b/kernel/configs.c
64714 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
64715 struct proc_dir_entry *entry;
64716
64717 /* create the current config file */
64718 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
64719 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
64720 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
64721 + &ikconfig_file_ops);
64722 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64723 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
64724 + &ikconfig_file_ops);
64725 +#endif
64726 +#else
64727 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
64728 &ikconfig_file_ops);
64729 +#endif
64730 +
64731 if (!entry)
64732 return -ENOMEM;
64733
64734 diff --git a/kernel/cred.c b/kernel/cred.c
64735 index 5791612..a3c04dc 100644
64736 --- a/kernel/cred.c
64737 +++ b/kernel/cred.c
64738 @@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
64739 validate_creds(cred);
64740 put_cred(cred);
64741 }
64742 +
64743 +#ifdef CONFIG_GRKERNSEC_SETXID
64744 + cred = (struct cred *) tsk->delayed_cred;
64745 + if (cred) {
64746 + tsk->delayed_cred = NULL;
64747 + validate_creds(cred);
64748 + put_cred(cred);
64749 + }
64750 +#endif
64751 }
64752
64753 /**
64754 @@ -470,7 +479,7 @@ error_put:
64755 * Always returns 0 thus allowing this function to be tail-called at the end
64756 * of, say, sys_setgid().
64757 */
64758 -int commit_creds(struct cred *new)
64759 +static int __commit_creds(struct cred *new)
64760 {
64761 struct task_struct *task = current;
64762 const struct cred *old = task->real_cred;
64763 @@ -489,6 +498,8 @@ int commit_creds(struct cred *new)
64764
64765 get_cred(new); /* we will require a ref for the subj creds too */
64766
64767 + gr_set_role_label(task, new->uid, new->gid);
64768 +
64769 /* dumpability changes */
64770 if (old->euid != new->euid ||
64771 old->egid != new->egid ||
64772 @@ -538,6 +549,92 @@ int commit_creds(struct cred *new)
64773 put_cred(old);
64774 return 0;
64775 }
64776 +#ifdef CONFIG_GRKERNSEC_SETXID
64777 +extern int set_user(struct cred *new);
64778 +
64779 +void gr_delayed_cred_worker(void)
64780 +{
64781 + const struct cred *new = current->delayed_cred;
64782 + struct cred *ncred;
64783 +
64784 + current->delayed_cred = NULL;
64785 +
64786 + if (current_uid() && new != NULL) {
64787 + // from doing get_cred on it when queueing this
64788 + put_cred(new);
64789 + return;
64790 + } else if (new == NULL)
64791 + return;
64792 +
64793 + ncred = prepare_creds();
64794 + if (!ncred)
64795 + goto die;
64796 + // uids
64797 + ncred->uid = new->uid;
64798 + ncred->euid = new->euid;
64799 + ncred->suid = new->suid;
64800 + ncred->fsuid = new->fsuid;
64801 + // gids
64802 + ncred->gid = new->gid;
64803 + ncred->egid = new->egid;
64804 + ncred->sgid = new->sgid;
64805 + ncred->fsgid = new->fsgid;
64806 + // groups
64807 + if (set_groups(ncred, new->group_info) < 0) {
64808 + abort_creds(ncred);
64809 + goto die;
64810 + }
64811 + // caps
64812 + ncred->securebits = new->securebits;
64813 + ncred->cap_inheritable = new->cap_inheritable;
64814 + ncred->cap_permitted = new->cap_permitted;
64815 + ncred->cap_effective = new->cap_effective;
64816 + ncred->cap_bset = new->cap_bset;
64817 +
64818 + if (set_user(ncred)) {
64819 + abort_creds(ncred);
64820 + goto die;
64821 + }
64822 +
64823 + // from doing get_cred on it when queueing this
64824 + put_cred(new);
64825 +
64826 + __commit_creds(ncred);
64827 + return;
64828 +die:
64829 + // from doing get_cred on it when queueing this
64830 + put_cred(new);
64831 + do_group_exit(SIGKILL);
64832 +}
64833 +#endif
64834 +
64835 +int commit_creds(struct cred *new)
64836 +{
64837 +#ifdef CONFIG_GRKERNSEC_SETXID
64838 + struct task_struct *t;
64839 +
64840 + /* we won't get called with tasklist_lock held for writing
64841 + and interrupts disabled as the cred struct in that case is
64842 + init_cred
64843 + */
64844 + if (grsec_enable_setxid && !current_is_single_threaded() &&
64845 + !current_uid() && new->uid) {
64846 + rcu_read_lock();
64847 + read_lock(&tasklist_lock);
64848 + for (t = next_thread(current); t != current;
64849 + t = next_thread(t)) {
64850 + if (t->delayed_cred == NULL) {
64851 + t->delayed_cred = get_cred(new);
64852 + set_tsk_need_resched(t);
64853 + }
64854 + }
64855 + read_unlock(&tasklist_lock);
64856 + rcu_read_unlock();
64857 + }
64858 +#endif
64859 + return __commit_creds(new);
64860 +}
64861 +
64862 EXPORT_SYMBOL(commit_creds);
64863
64864 /**
64865 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
64866 index 0d7c087..01b8cef 100644
64867 --- a/kernel/debug/debug_core.c
64868 +++ b/kernel/debug/debug_core.c
64869 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
64870 */
64871 static atomic_t masters_in_kgdb;
64872 static atomic_t slaves_in_kgdb;
64873 -static atomic_t kgdb_break_tasklet_var;
64874 +static atomic_unchecked_t kgdb_break_tasklet_var;
64875 atomic_t kgdb_setting_breakpoint;
64876
64877 struct task_struct *kgdb_usethread;
64878 @@ -129,7 +129,7 @@ int kgdb_single_step;
64879 static pid_t kgdb_sstep_pid;
64880
64881 /* to keep track of the CPU which is doing the single stepping*/
64882 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
64883 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
64884
64885 /*
64886 * If you are debugging a problem where roundup (the collection of
64887 @@ -542,7 +542,7 @@ return_normal:
64888 * kernel will only try for the value of sstep_tries before
64889 * giving up and continuing on.
64890 */
64891 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
64892 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
64893 (kgdb_info[cpu].task &&
64894 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
64895 atomic_set(&kgdb_active, -1);
64896 @@ -636,8 +636,8 @@ cpu_master_loop:
64897 }
64898
64899 kgdb_restore:
64900 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
64901 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
64902 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
64903 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
64904 if (kgdb_info[sstep_cpu].task)
64905 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
64906 else
64907 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void)
64908 static void kgdb_tasklet_bpt(unsigned long ing)
64909 {
64910 kgdb_breakpoint();
64911 - atomic_set(&kgdb_break_tasklet_var, 0);
64912 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
64913 }
64914
64915 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
64916
64917 void kgdb_schedule_breakpoint(void)
64918 {
64919 - if (atomic_read(&kgdb_break_tasklet_var) ||
64920 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
64921 atomic_read(&kgdb_active) != -1 ||
64922 atomic_read(&kgdb_setting_breakpoint))
64923 return;
64924 - atomic_inc(&kgdb_break_tasklet_var);
64925 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
64926 tasklet_schedule(&kgdb_tasklet_breakpoint);
64927 }
64928 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
64929 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
64930 index 63786e7..0780cac 100644
64931 --- a/kernel/debug/kdb/kdb_main.c
64932 +++ b/kernel/debug/kdb/kdb_main.c
64933 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
64934 list_for_each_entry(mod, kdb_modules, list) {
64935
64936 kdb_printf("%-20s%8u 0x%p ", mod->name,
64937 - mod->core_size, (void *)mod);
64938 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
64939 #ifdef CONFIG_MODULE_UNLOAD
64940 kdb_printf("%4d ", module_refcount(mod));
64941 #endif
64942 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
64943 kdb_printf(" (Loading)");
64944 else
64945 kdb_printf(" (Live)");
64946 - kdb_printf(" 0x%p", mod->module_core);
64947 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
64948
64949 #ifdef CONFIG_MODULE_UNLOAD
64950 {
64951 diff --git a/kernel/events/core.c b/kernel/events/core.c
64952 index 58690af..d903d75 100644
64953 --- a/kernel/events/core.c
64954 +++ b/kernel/events/core.c
64955 @@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
64956 return 0;
64957 }
64958
64959 -static atomic64_t perf_event_id;
64960 +static atomic64_unchecked_t perf_event_id;
64961
64962 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
64963 enum event_type_t event_type);
64964 @@ -2540,7 +2540,7 @@ static void __perf_event_read(void *info)
64965
64966 static inline u64 perf_event_count(struct perf_event *event)
64967 {
64968 - return local64_read(&event->count) + atomic64_read(&event->child_count);
64969 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
64970 }
64971
64972 static u64 perf_event_read(struct perf_event *event)
64973 @@ -3065,9 +3065,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
64974 mutex_lock(&event->child_mutex);
64975 total += perf_event_read(event);
64976 *enabled += event->total_time_enabled +
64977 - atomic64_read(&event->child_total_time_enabled);
64978 + atomic64_read_unchecked(&event->child_total_time_enabled);
64979 *running += event->total_time_running +
64980 - atomic64_read(&event->child_total_time_running);
64981 + atomic64_read_unchecked(&event->child_total_time_running);
64982
64983 list_for_each_entry(child, &event->child_list, child_list) {
64984 total += perf_event_read(child);
64985 @@ -3474,10 +3474,10 @@ void perf_event_update_userpage(struct perf_event *event)
64986 userpg->offset -= local64_read(&event->hw.prev_count);
64987
64988 userpg->time_enabled = enabled +
64989 - atomic64_read(&event->child_total_time_enabled);
64990 + atomic64_read_unchecked(&event->child_total_time_enabled);
64991
64992 userpg->time_running = running +
64993 - atomic64_read(&event->child_total_time_running);
64994 + atomic64_read_unchecked(&event->child_total_time_running);
64995
64996 barrier();
64997 ++userpg->lock;
64998 @@ -3906,11 +3906,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
64999 values[n++] = perf_event_count(event);
65000 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
65001 values[n++] = enabled +
65002 - atomic64_read(&event->child_total_time_enabled);
65003 + atomic64_read_unchecked(&event->child_total_time_enabled);
65004 }
65005 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
65006 values[n++] = running +
65007 - atomic64_read(&event->child_total_time_running);
65008 + atomic64_read_unchecked(&event->child_total_time_running);
65009 }
65010 if (read_format & PERF_FORMAT_ID)
65011 values[n++] = primary_event_id(event);
65012 @@ -4561,12 +4561,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
65013 * need to add enough zero bytes after the string to handle
65014 * the 64bit alignment we do later.
65015 */
65016 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
65017 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
65018 if (!buf) {
65019 name = strncpy(tmp, "//enomem", sizeof(tmp));
65020 goto got_name;
65021 }
65022 - name = d_path(&file->f_path, buf, PATH_MAX);
65023 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
65024 if (IS_ERR(name)) {
65025 name = strncpy(tmp, "//toolong", sizeof(tmp));
65026 goto got_name;
65027 @@ -5921,7 +5921,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
65028 event->parent = parent_event;
65029
65030 event->ns = get_pid_ns(current->nsproxy->pid_ns);
65031 - event->id = atomic64_inc_return(&perf_event_id);
65032 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
65033
65034 event->state = PERF_EVENT_STATE_INACTIVE;
65035
65036 @@ -6443,10 +6443,10 @@ static void sync_child_event(struct perf_event *child_event,
65037 /*
65038 * Add back the child's count to the parent's count:
65039 */
65040 - atomic64_add(child_val, &parent_event->child_count);
65041 - atomic64_add(child_event->total_time_enabled,
65042 + atomic64_add_unchecked(child_val, &parent_event->child_count);
65043 + atomic64_add_unchecked(child_event->total_time_enabled,
65044 &parent_event->child_total_time_enabled);
65045 - atomic64_add(child_event->total_time_running,
65046 + atomic64_add_unchecked(child_event->total_time_running,
65047 &parent_event->child_total_time_running);
65048
65049 /*
65050 diff --git a/kernel/exit.c b/kernel/exit.c
65051 index e6e01b9..0a21b0a 100644
65052 --- a/kernel/exit.c
65053 +++ b/kernel/exit.c
65054 @@ -57,6 +57,10 @@
65055 #include <asm/pgtable.h>
65056 #include <asm/mmu_context.h>
65057
65058 +#ifdef CONFIG_GRKERNSEC
65059 +extern rwlock_t grsec_exec_file_lock;
65060 +#endif
65061 +
65062 static void exit_mm(struct task_struct * tsk);
65063
65064 static void __unhash_process(struct task_struct *p, bool group_dead)
65065 @@ -168,6 +172,10 @@ void release_task(struct task_struct * p)
65066 struct task_struct *leader;
65067 int zap_leader;
65068 repeat:
65069 +#ifdef CONFIG_NET
65070 + gr_del_task_from_ip_table(p);
65071 +#endif
65072 +
65073 /* don't need to get the RCU readlock here - the process is dead and
65074 * can't be modifying its own credentials. But shut RCU-lockdep up */
65075 rcu_read_lock();
65076 @@ -380,7 +388,7 @@ int allow_signal(int sig)
65077 * know it'll be handled, so that they don't get converted to
65078 * SIGKILL or just silently dropped.
65079 */
65080 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
65081 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
65082 recalc_sigpending();
65083 spin_unlock_irq(&current->sighand->siglock);
65084 return 0;
65085 @@ -416,6 +424,17 @@ void daemonize(const char *name, ...)
65086 vsnprintf(current->comm, sizeof(current->comm), name, args);
65087 va_end(args);
65088
65089 +#ifdef CONFIG_GRKERNSEC
65090 + write_lock(&grsec_exec_file_lock);
65091 + if (current->exec_file) {
65092 + fput(current->exec_file);
65093 + current->exec_file = NULL;
65094 + }
65095 + write_unlock(&grsec_exec_file_lock);
65096 +#endif
65097 +
65098 + gr_set_kernel_label(current);
65099 +
65100 /*
65101 * If we were started as result of loading a module, close all of the
65102 * user space pages. We don't need them, and if we didn't close them
65103 @@ -893,6 +912,8 @@ NORET_TYPE void do_exit(long code)
65104 struct task_struct *tsk = current;
65105 int group_dead;
65106
65107 + set_fs(USER_DS);
65108 +
65109 profile_task_exit(tsk);
65110
65111 WARN_ON(blk_needs_flush_plug(tsk));
65112 @@ -909,7 +930,6 @@ NORET_TYPE void do_exit(long code)
65113 * mm_release()->clear_child_tid() from writing to a user-controlled
65114 * kernel address.
65115 */
65116 - set_fs(USER_DS);
65117
65118 ptrace_event(PTRACE_EVENT_EXIT, code);
65119
65120 @@ -971,6 +991,9 @@ NORET_TYPE void do_exit(long code)
65121 tsk->exit_code = code;
65122 taskstats_exit(tsk, group_dead);
65123
65124 + gr_acl_handle_psacct(tsk, code);
65125 + gr_acl_handle_exit();
65126 +
65127 exit_mm(tsk);
65128
65129 if (group_dead)
65130 @@ -1068,7 +1091,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
65131 * Take down every thread in the group. This is called by fatal signals
65132 * as well as by sys_exit_group (below).
65133 */
65134 -NORET_TYPE void
65135 +__noreturn void
65136 do_group_exit(int exit_code)
65137 {
65138 struct signal_struct *sig = current->signal;
65139 diff --git a/kernel/fork.c b/kernel/fork.c
65140 index 0acf42c0..9e40e2e 100644
65141 --- a/kernel/fork.c
65142 +++ b/kernel/fork.c
65143 @@ -281,7 +281,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
65144 *stackend = STACK_END_MAGIC; /* for overflow detection */
65145
65146 #ifdef CONFIG_CC_STACKPROTECTOR
65147 - tsk->stack_canary = get_random_int();
65148 + tsk->stack_canary = pax_get_random_long();
65149 #endif
65150
65151 /*
65152 @@ -305,13 +305,77 @@ out:
65153 }
65154
65155 #ifdef CONFIG_MMU
65156 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
65157 +{
65158 + struct vm_area_struct *tmp;
65159 + unsigned long charge;
65160 + struct mempolicy *pol;
65161 + struct file *file;
65162 +
65163 + charge = 0;
65164 + if (mpnt->vm_flags & VM_ACCOUNT) {
65165 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65166 + if (security_vm_enough_memory(len))
65167 + goto fail_nomem;
65168 + charge = len;
65169 + }
65170 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65171 + if (!tmp)
65172 + goto fail_nomem;
65173 + *tmp = *mpnt;
65174 + tmp->vm_mm = mm;
65175 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
65176 + pol = mpol_dup(vma_policy(mpnt));
65177 + if (IS_ERR(pol))
65178 + goto fail_nomem_policy;
65179 + vma_set_policy(tmp, pol);
65180 + if (anon_vma_fork(tmp, mpnt))
65181 + goto fail_nomem_anon_vma_fork;
65182 + tmp->vm_flags &= ~VM_LOCKED;
65183 + tmp->vm_next = tmp->vm_prev = NULL;
65184 + tmp->vm_mirror = NULL;
65185 + file = tmp->vm_file;
65186 + if (file) {
65187 + struct inode *inode = file->f_path.dentry->d_inode;
65188 + struct address_space *mapping = file->f_mapping;
65189 +
65190 + get_file(file);
65191 + if (tmp->vm_flags & VM_DENYWRITE)
65192 + atomic_dec(&inode->i_writecount);
65193 + mutex_lock(&mapping->i_mmap_mutex);
65194 + if (tmp->vm_flags & VM_SHARED)
65195 + mapping->i_mmap_writable++;
65196 + flush_dcache_mmap_lock(mapping);
65197 + /* insert tmp into the share list, just after mpnt */
65198 + vma_prio_tree_add(tmp, mpnt);
65199 + flush_dcache_mmap_unlock(mapping);
65200 + mutex_unlock(&mapping->i_mmap_mutex);
65201 + }
65202 +
65203 + /*
65204 + * Clear hugetlb-related page reserves for children. This only
65205 + * affects MAP_PRIVATE mappings. Faults generated by the child
65206 + * are not guaranteed to succeed, even if read-only
65207 + */
65208 + if (is_vm_hugetlb_page(tmp))
65209 + reset_vma_resv_huge_pages(tmp);
65210 +
65211 + return tmp;
65212 +
65213 +fail_nomem_anon_vma_fork:
65214 + mpol_put(pol);
65215 +fail_nomem_policy:
65216 + kmem_cache_free(vm_area_cachep, tmp);
65217 +fail_nomem:
65218 + vm_unacct_memory(charge);
65219 + return NULL;
65220 +}
65221 +
65222 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65223 {
65224 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
65225 struct rb_node **rb_link, *rb_parent;
65226 int retval;
65227 - unsigned long charge;
65228 - struct mempolicy *pol;
65229
65230 down_write(&oldmm->mmap_sem);
65231 flush_cache_dup_mm(oldmm);
65232 @@ -323,8 +387,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65233 mm->locked_vm = 0;
65234 mm->mmap = NULL;
65235 mm->mmap_cache = NULL;
65236 - mm->free_area_cache = oldmm->mmap_base;
65237 - mm->cached_hole_size = ~0UL;
65238 + mm->free_area_cache = oldmm->free_area_cache;
65239 + mm->cached_hole_size = oldmm->cached_hole_size;
65240 mm->map_count = 0;
65241 cpumask_clear(mm_cpumask(mm));
65242 mm->mm_rb = RB_ROOT;
65243 @@ -340,8 +404,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65244
65245 prev = NULL;
65246 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
65247 - struct file *file;
65248 -
65249 if (mpnt->vm_flags & VM_DONTCOPY) {
65250 long pages = vma_pages(mpnt);
65251 mm->total_vm -= pages;
65252 @@ -349,53 +411,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65253 -pages);
65254 continue;
65255 }
65256 - charge = 0;
65257 - if (mpnt->vm_flags & VM_ACCOUNT) {
65258 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65259 - if (security_vm_enough_memory(len))
65260 - goto fail_nomem;
65261 - charge = len;
65262 + tmp = dup_vma(mm, mpnt);
65263 + if (!tmp) {
65264 + retval = -ENOMEM;
65265 + goto out;
65266 }
65267 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65268 - if (!tmp)
65269 - goto fail_nomem;
65270 - *tmp = *mpnt;
65271 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
65272 - pol = mpol_dup(vma_policy(mpnt));
65273 - retval = PTR_ERR(pol);
65274 - if (IS_ERR(pol))
65275 - goto fail_nomem_policy;
65276 - vma_set_policy(tmp, pol);
65277 - tmp->vm_mm = mm;
65278 - if (anon_vma_fork(tmp, mpnt))
65279 - goto fail_nomem_anon_vma_fork;
65280 - tmp->vm_flags &= ~VM_LOCKED;
65281 - tmp->vm_next = tmp->vm_prev = NULL;
65282 - file = tmp->vm_file;
65283 - if (file) {
65284 - struct inode *inode = file->f_path.dentry->d_inode;
65285 - struct address_space *mapping = file->f_mapping;
65286 -
65287 - get_file(file);
65288 - if (tmp->vm_flags & VM_DENYWRITE)
65289 - atomic_dec(&inode->i_writecount);
65290 - mutex_lock(&mapping->i_mmap_mutex);
65291 - if (tmp->vm_flags & VM_SHARED)
65292 - mapping->i_mmap_writable++;
65293 - flush_dcache_mmap_lock(mapping);
65294 - /* insert tmp into the share list, just after mpnt */
65295 - vma_prio_tree_add(tmp, mpnt);
65296 - flush_dcache_mmap_unlock(mapping);
65297 - mutex_unlock(&mapping->i_mmap_mutex);
65298 - }
65299 -
65300 - /*
65301 - * Clear hugetlb-related page reserves for children. This only
65302 - * affects MAP_PRIVATE mappings. Faults generated by the child
65303 - * are not guaranteed to succeed, even if read-only
65304 - */
65305 - if (is_vm_hugetlb_page(tmp))
65306 - reset_vma_resv_huge_pages(tmp);
65307
65308 /*
65309 * Link in the new vma and copy the page table entries.
65310 @@ -418,6 +438,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65311 if (retval)
65312 goto out;
65313 }
65314 +
65315 +#ifdef CONFIG_PAX_SEGMEXEC
65316 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
65317 + struct vm_area_struct *mpnt_m;
65318 +
65319 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
65320 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
65321 +
65322 + if (!mpnt->vm_mirror)
65323 + continue;
65324 +
65325 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
65326 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
65327 + mpnt->vm_mirror = mpnt_m;
65328 + } else {
65329 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
65330 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
65331 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
65332 + mpnt->vm_mirror->vm_mirror = mpnt;
65333 + }
65334 + }
65335 + BUG_ON(mpnt_m);
65336 + }
65337 +#endif
65338 +
65339 /* a new mm has just been created */
65340 arch_dup_mmap(oldmm, mm);
65341 retval = 0;
65342 @@ -426,14 +471,6 @@ out:
65343 flush_tlb_mm(oldmm);
65344 up_write(&oldmm->mmap_sem);
65345 return retval;
65346 -fail_nomem_anon_vma_fork:
65347 - mpol_put(pol);
65348 -fail_nomem_policy:
65349 - kmem_cache_free(vm_area_cachep, tmp);
65350 -fail_nomem:
65351 - retval = -ENOMEM;
65352 - vm_unacct_memory(charge);
65353 - goto out;
65354 }
65355
65356 static inline int mm_alloc_pgd(struct mm_struct *mm)
65357 @@ -645,6 +682,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)
65358 }
65359 EXPORT_SYMBOL_GPL(get_task_mm);
65360
65361 +struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
65362 +{
65363 + struct mm_struct *mm;
65364 + int err;
65365 +
65366 + err = mutex_lock_killable(&task->signal->cred_guard_mutex);
65367 + if (err)
65368 + return ERR_PTR(err);
65369 +
65370 + mm = get_task_mm(task);
65371 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
65372 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
65373 + mmput(mm);
65374 + mm = ERR_PTR(-EACCES);
65375 + }
65376 + mutex_unlock(&task->signal->cred_guard_mutex);
65377 +
65378 + return mm;
65379 +}
65380 +
65381 /* Please note the differences between mmput and mm_release.
65382 * mmput is called whenever we stop holding onto a mm_struct,
65383 * error success whatever.
65384 @@ -830,13 +887,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
65385 spin_unlock(&fs->lock);
65386 return -EAGAIN;
65387 }
65388 - fs->users++;
65389 + atomic_inc(&fs->users);
65390 spin_unlock(&fs->lock);
65391 return 0;
65392 }
65393 tsk->fs = copy_fs_struct(fs);
65394 if (!tsk->fs)
65395 return -ENOMEM;
65396 + gr_set_chroot_entries(tsk, &tsk->fs->root);
65397 return 0;
65398 }
65399
65400 @@ -1100,6 +1158,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
65401 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
65402 #endif
65403 retval = -EAGAIN;
65404 +
65405 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
65406 +
65407 if (atomic_read(&p->real_cred->user->processes) >=
65408 task_rlimit(p, RLIMIT_NPROC)) {
65409 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
65410 @@ -1259,6 +1320,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
65411 if (clone_flags & CLONE_THREAD)
65412 p->tgid = current->tgid;
65413
65414 + gr_copy_label(p);
65415 +
65416 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
65417 /*
65418 * Clear TID on mm_release()?
65419 @@ -1421,6 +1484,8 @@ bad_fork_cleanup_count:
65420 bad_fork_free:
65421 free_task(p);
65422 fork_out:
65423 + gr_log_forkfail(retval);
65424 +
65425 return ERR_PTR(retval);
65426 }
65427
65428 @@ -1521,6 +1586,8 @@ long do_fork(unsigned long clone_flags,
65429 if (clone_flags & CLONE_PARENT_SETTID)
65430 put_user(nr, parent_tidptr);
65431
65432 + gr_handle_brute_check();
65433 +
65434 if (clone_flags & CLONE_VFORK) {
65435 p->vfork_done = &vfork;
65436 init_completion(&vfork);
65437 @@ -1630,7 +1697,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
65438 return 0;
65439
65440 /* don't need lock here; in the worst case we'll do useless copy */
65441 - if (fs->users == 1)
65442 + if (atomic_read(&fs->users) == 1)
65443 return 0;
65444
65445 *new_fsp = copy_fs_struct(fs);
65446 @@ -1719,7 +1786,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
65447 fs = current->fs;
65448 spin_lock(&fs->lock);
65449 current->fs = new_fs;
65450 - if (--fs->users)
65451 + gr_set_chroot_entries(current, &current->fs->root);
65452 + if (atomic_dec_return(&fs->users))
65453 new_fs = NULL;
65454 else
65455 new_fs = fs;
65456 diff --git a/kernel/futex.c b/kernel/futex.c
65457 index 1614be2..37abc7e 100644
65458 --- a/kernel/futex.c
65459 +++ b/kernel/futex.c
65460 @@ -54,6 +54,7 @@
65461 #include <linux/mount.h>
65462 #include <linux/pagemap.h>
65463 #include <linux/syscalls.h>
65464 +#include <linux/ptrace.h>
65465 #include <linux/signal.h>
65466 #include <linux/export.h>
65467 #include <linux/magic.h>
65468 @@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
65469 struct page *page, *page_head;
65470 int err, ro = 0;
65471
65472 +#ifdef CONFIG_PAX_SEGMEXEC
65473 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
65474 + return -EFAULT;
65475 +#endif
65476 +
65477 /*
65478 * The futex address must be "naturally" aligned.
65479 */
65480 @@ -2459,6 +2465,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
65481 if (!p)
65482 goto err_unlock;
65483 ret = -EPERM;
65484 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65485 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
65486 + goto err_unlock;
65487 +#endif
65488 pcred = __task_cred(p);
65489 /* If victim is in different user_ns, then uids are not
65490 comparable, so we must have CAP_SYS_PTRACE */
65491 @@ -2724,6 +2734,7 @@ static int __init futex_init(void)
65492 {
65493 u32 curval;
65494 int i;
65495 + mm_segment_t oldfs;
65496
65497 /*
65498 * This will fail and we want it. Some arch implementations do
65499 @@ -2735,8 +2746,11 @@ static int __init futex_init(void)
65500 * implementation, the non-functional ones will return
65501 * -ENOSYS.
65502 */
65503 + oldfs = get_fs();
65504 + set_fs(USER_DS);
65505 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
65506 futex_cmpxchg_enabled = 1;
65507 + set_fs(oldfs);
65508
65509 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
65510 plist_head_init(&futex_queues[i].chain);
65511 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
65512 index 5f9e689..582d46d 100644
65513 --- a/kernel/futex_compat.c
65514 +++ b/kernel/futex_compat.c
65515 @@ -10,6 +10,7 @@
65516 #include <linux/compat.h>
65517 #include <linux/nsproxy.h>
65518 #include <linux/futex.h>
65519 +#include <linux/ptrace.h>
65520
65521 #include <asm/uaccess.h>
65522
65523 @@ -136,7 +137,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
65524 {
65525 struct compat_robust_list_head __user *head;
65526 unsigned long ret;
65527 - const struct cred *cred = current_cred(), *pcred;
65528 + const struct cred *cred = current_cred();
65529 + const struct cred *pcred;
65530
65531 if (!futex_cmpxchg_enabled)
65532 return -ENOSYS;
65533 @@ -152,6 +154,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
65534 if (!p)
65535 goto err_unlock;
65536 ret = -EPERM;
65537 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65538 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
65539 + goto err_unlock;
65540 +#endif
65541 pcred = __task_cred(p);
65542 /* If victim is in different user_ns, then uids are not
65543 comparable, so we must have CAP_SYS_PTRACE */
65544 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
65545 index 9b22d03..6295b62 100644
65546 --- a/kernel/gcov/base.c
65547 +++ b/kernel/gcov/base.c
65548 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
65549 }
65550
65551 #ifdef CONFIG_MODULES
65552 -static inline int within(void *addr, void *start, unsigned long size)
65553 -{
65554 - return ((addr >= start) && (addr < start + size));
65555 -}
65556 -
65557 /* Update list and generate events when modules are unloaded. */
65558 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
65559 void *data)
65560 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
65561 prev = NULL;
65562 /* Remove entries located in module from linked list. */
65563 for (info = gcov_info_head; info; info = info->next) {
65564 - if (within(info, mod->module_core, mod->core_size)) {
65565 + if (within_module_core_rw((unsigned long)info, mod)) {
65566 if (prev)
65567 prev->next = info->next;
65568 else
65569 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
65570 index ae34bf5..4e2f3d0 100644
65571 --- a/kernel/hrtimer.c
65572 +++ b/kernel/hrtimer.c
65573 @@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
65574 local_irq_restore(flags);
65575 }
65576
65577 -static void run_hrtimer_softirq(struct softirq_action *h)
65578 +static void run_hrtimer_softirq(void)
65579 {
65580 hrtimer_peek_ahead_timers();
65581 }
65582 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
65583 index 66ff710..05a5128 100644
65584 --- a/kernel/jump_label.c
65585 +++ b/kernel/jump_label.c
65586 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
65587
65588 size = (((unsigned long)stop - (unsigned long)start)
65589 / sizeof(struct jump_entry));
65590 + pax_open_kernel();
65591 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
65592 + pax_close_kernel();
65593 }
65594
65595 static void jump_label_update(struct jump_label_key *key, int enable);
65596 @@ -303,10 +305,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
65597 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
65598 struct jump_entry *iter;
65599
65600 + pax_open_kernel();
65601 for (iter = iter_start; iter < iter_stop; iter++) {
65602 if (within_module_init(iter->code, mod))
65603 iter->code = 0;
65604 }
65605 + pax_close_kernel();
65606 }
65607
65608 static int
65609 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
65610 index 079f1d3..a407562 100644
65611 --- a/kernel/kallsyms.c
65612 +++ b/kernel/kallsyms.c
65613 @@ -11,6 +11,9 @@
65614 * Changed the compression method from stem compression to "table lookup"
65615 * compression (see scripts/kallsyms.c for a more complete description)
65616 */
65617 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65618 +#define __INCLUDED_BY_HIDESYM 1
65619 +#endif
65620 #include <linux/kallsyms.h>
65621 #include <linux/module.h>
65622 #include <linux/init.h>
65623 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
65624
65625 static inline int is_kernel_inittext(unsigned long addr)
65626 {
65627 + if (system_state != SYSTEM_BOOTING)
65628 + return 0;
65629 +
65630 if (addr >= (unsigned long)_sinittext
65631 && addr <= (unsigned long)_einittext)
65632 return 1;
65633 return 0;
65634 }
65635
65636 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65637 +#ifdef CONFIG_MODULES
65638 +static inline int is_module_text(unsigned long addr)
65639 +{
65640 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
65641 + return 1;
65642 +
65643 + addr = ktla_ktva(addr);
65644 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
65645 +}
65646 +#else
65647 +static inline int is_module_text(unsigned long addr)
65648 +{
65649 + return 0;
65650 +}
65651 +#endif
65652 +#endif
65653 +
65654 static inline int is_kernel_text(unsigned long addr)
65655 {
65656 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
65657 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
65658
65659 static inline int is_kernel(unsigned long addr)
65660 {
65661 +
65662 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65663 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
65664 + return 1;
65665 +
65666 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
65667 +#else
65668 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
65669 +#endif
65670 +
65671 return 1;
65672 return in_gate_area_no_mm(addr);
65673 }
65674
65675 static int is_ksym_addr(unsigned long addr)
65676 {
65677 +
65678 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
65679 + if (is_module_text(addr))
65680 + return 0;
65681 +#endif
65682 +
65683 if (all_var)
65684 return is_kernel(addr);
65685
65686 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
65687
65688 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
65689 {
65690 - iter->name[0] = '\0';
65691 iter->nameoff = get_symbol_offset(new_pos);
65692 iter->pos = new_pos;
65693 }
65694 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
65695 {
65696 struct kallsym_iter *iter = m->private;
65697
65698 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65699 + if (current_uid())
65700 + return 0;
65701 +#endif
65702 +
65703 /* Some debugging symbols have no name. Ignore them. */
65704 if (!iter->name[0])
65705 return 0;
65706 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
65707 struct kallsym_iter *iter;
65708 int ret;
65709
65710 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
65711 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
65712 if (!iter)
65713 return -ENOMEM;
65714 reset_iter(iter, 0);
65715 diff --git a/kernel/kexec.c b/kernel/kexec.c
65716 index dc7bc08..4601964 100644
65717 --- a/kernel/kexec.c
65718 +++ b/kernel/kexec.c
65719 @@ -1048,7 +1048,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
65720 unsigned long flags)
65721 {
65722 struct compat_kexec_segment in;
65723 - struct kexec_segment out, __user *ksegments;
65724 + struct kexec_segment out;
65725 + struct kexec_segment __user *ksegments;
65726 unsigned long i, result;
65727
65728 /* Don't allow clients that don't understand the native
65729 diff --git a/kernel/kmod.c b/kernel/kmod.c
65730 index a4bea97..7a1ae9a 100644
65731 --- a/kernel/kmod.c
65732 +++ b/kernel/kmod.c
65733 @@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
65734 * If module auto-loading support is disabled then this function
65735 * becomes a no-operation.
65736 */
65737 -int __request_module(bool wait, const char *fmt, ...)
65738 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
65739 {
65740 - va_list args;
65741 char module_name[MODULE_NAME_LEN];
65742 unsigned int max_modprobes;
65743 int ret;
65744 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
65745 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
65746 static char *envp[] = { "HOME=/",
65747 "TERM=linux",
65748 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
65749 @@ -88,9 +87,7 @@ int __request_module(bool wait, const char *fmt, ...)
65750 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
65751 static int kmod_loop_msg;
65752
65753 - va_start(args, fmt);
65754 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
65755 - va_end(args);
65756 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
65757 if (ret >= MODULE_NAME_LEN)
65758 return -ENAMETOOLONG;
65759
65760 @@ -98,6 +95,20 @@ int __request_module(bool wait, const char *fmt, ...)
65761 if (ret)
65762 return ret;
65763
65764 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65765 + if (!current_uid()) {
65766 + /* hack to workaround consolekit/udisks stupidity */
65767 + read_lock(&tasklist_lock);
65768 + if (!strcmp(current->comm, "mount") &&
65769 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
65770 + read_unlock(&tasklist_lock);
65771 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
65772 + return -EPERM;
65773 + }
65774 + read_unlock(&tasklist_lock);
65775 + }
65776 +#endif
65777 +
65778 /* If modprobe needs a service that is in a module, we get a recursive
65779 * loop. Limit the number of running kmod threads to max_threads/2 or
65780 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
65781 @@ -133,6 +144,47 @@ int __request_module(bool wait, const char *fmt, ...)
65782 atomic_dec(&kmod_concurrent);
65783 return ret;
65784 }
65785 +
65786 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
65787 +{
65788 + va_list args;
65789 + int ret;
65790 +
65791 + va_start(args, fmt);
65792 + ret = ____request_module(wait, module_param, fmt, args);
65793 + va_end(args);
65794 +
65795 + return ret;
65796 +}
65797 +
65798 +int __request_module(bool wait, const char *fmt, ...)
65799 +{
65800 + va_list args;
65801 + int ret;
65802 +
65803 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65804 + if (current_uid()) {
65805 + char module_param[MODULE_NAME_LEN];
65806 +
65807 + memset(module_param, 0, sizeof(module_param));
65808 +
65809 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
65810 +
65811 + va_start(args, fmt);
65812 + ret = ____request_module(wait, module_param, fmt, args);
65813 + va_end(args);
65814 +
65815 + return ret;
65816 + }
65817 +#endif
65818 +
65819 + va_start(args, fmt);
65820 + ret = ____request_module(wait, NULL, fmt, args);
65821 + va_end(args);
65822 +
65823 + return ret;
65824 +}
65825 +
65826 EXPORT_SYMBOL(__request_module);
65827 #endif /* CONFIG_MODULES */
65828
65829 @@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
65830 *
65831 * Thus the __user pointer cast is valid here.
65832 */
65833 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
65834 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
65835
65836 /*
65837 * If ret is 0, either ____call_usermodehelper failed and the
65838 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
65839 index bc90b87..43c7d8c 100644
65840 --- a/kernel/kprobes.c
65841 +++ b/kernel/kprobes.c
65842 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
65843 * kernel image and loaded module images reside. This is required
65844 * so x86_64 can correctly handle the %rip-relative fixups.
65845 */
65846 - kip->insns = module_alloc(PAGE_SIZE);
65847 + kip->insns = module_alloc_exec(PAGE_SIZE);
65848 if (!kip->insns) {
65849 kfree(kip);
65850 return NULL;
65851 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
65852 */
65853 if (!list_is_singular(&kip->list)) {
65854 list_del(&kip->list);
65855 - module_free(NULL, kip->insns);
65856 + module_free_exec(NULL, kip->insns);
65857 kfree(kip);
65858 }
65859 return 1;
65860 @@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
65861 {
65862 int i, err = 0;
65863 unsigned long offset = 0, size = 0;
65864 - char *modname, namebuf[128];
65865 + char *modname, namebuf[KSYM_NAME_LEN];
65866 const char *symbol_name;
65867 void *addr;
65868 struct kprobe_blackpoint *kb;
65869 @@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
65870 const char *sym = NULL;
65871 unsigned int i = *(loff_t *) v;
65872 unsigned long offset = 0;
65873 - char *modname, namebuf[128];
65874 + char *modname, namebuf[KSYM_NAME_LEN];
65875
65876 head = &kprobe_table[i];
65877 preempt_disable();
65878 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
65879 index b2e08c9..01d8049 100644
65880 --- a/kernel/lockdep.c
65881 +++ b/kernel/lockdep.c
65882 @@ -592,6 +592,10 @@ static int static_obj(void *obj)
65883 end = (unsigned long) &_end,
65884 addr = (unsigned long) obj;
65885
65886 +#ifdef CONFIG_PAX_KERNEXEC
65887 + start = ktla_ktva(start);
65888 +#endif
65889 +
65890 /*
65891 * static variable?
65892 */
65893 @@ -731,6 +735,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
65894 if (!static_obj(lock->key)) {
65895 debug_locks_off();
65896 printk("INFO: trying to register non-static key.\n");
65897 + printk("lock:%pS key:%pS.\n", lock, lock->key);
65898 printk("the code is fine but needs lockdep annotation.\n");
65899 printk("turning off the locking correctness validator.\n");
65900 dump_stack();
65901 @@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
65902 if (!class)
65903 return 0;
65904 }
65905 - atomic_inc((atomic_t *)&class->ops);
65906 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
65907 if (very_verbose(class)) {
65908 printk("\nacquire class [%p] %s", class->key, class->name);
65909 if (class->name_version > 1)
65910 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
65911 index 91c32a0..b2c71c5 100644
65912 --- a/kernel/lockdep_proc.c
65913 +++ b/kernel/lockdep_proc.c
65914 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
65915
65916 static void print_name(struct seq_file *m, struct lock_class *class)
65917 {
65918 - char str[128];
65919 + char str[KSYM_NAME_LEN];
65920 const char *name = class->name;
65921
65922 if (!name) {
65923 diff --git a/kernel/module.c b/kernel/module.c
65924 index 178333c..04e3408 100644
65925 --- a/kernel/module.c
65926 +++ b/kernel/module.c
65927 @@ -58,6 +58,7 @@
65928 #include <linux/jump_label.h>
65929 #include <linux/pfn.h>
65930 #include <linux/bsearch.h>
65931 +#include <linux/grsecurity.h>
65932
65933 #define CREATE_TRACE_POINTS
65934 #include <trace/events/module.h>
65935 @@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
65936
65937 /* Bounds of module allocation, for speeding __module_address.
65938 * Protected by module_mutex. */
65939 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
65940 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
65941 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
65942
65943 int register_module_notifier(struct notifier_block * nb)
65944 {
65945 @@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
65946 return true;
65947
65948 list_for_each_entry_rcu(mod, &modules, list) {
65949 - struct symsearch arr[] = {
65950 + struct symsearch modarr[] = {
65951 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
65952 NOT_GPL_ONLY, false },
65953 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
65954 @@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
65955 #endif
65956 };
65957
65958 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
65959 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
65960 return true;
65961 }
65962 return false;
65963 @@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
65964 static int percpu_modalloc(struct module *mod,
65965 unsigned long size, unsigned long align)
65966 {
65967 - if (align > PAGE_SIZE) {
65968 + if (align-1 >= PAGE_SIZE) {
65969 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
65970 mod->name, align, PAGE_SIZE);
65971 align = PAGE_SIZE;
65972 @@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
65973 */
65974 #ifdef CONFIG_SYSFS
65975
65976 -#ifdef CONFIG_KALLSYMS
65977 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65978 static inline bool sect_empty(const Elf_Shdr *sect)
65979 {
65980 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
65981 @@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base,
65982
65983 static void unset_module_core_ro_nx(struct module *mod)
65984 {
65985 - set_page_attributes(mod->module_core + mod->core_text_size,
65986 - mod->module_core + mod->core_size,
65987 + set_page_attributes(mod->module_core_rw,
65988 + mod->module_core_rw + mod->core_size_rw,
65989 set_memory_x);
65990 - set_page_attributes(mod->module_core,
65991 - mod->module_core + mod->core_ro_size,
65992 + set_page_attributes(mod->module_core_rx,
65993 + mod->module_core_rx + mod->core_size_rx,
65994 set_memory_rw);
65995 }
65996
65997 static void unset_module_init_ro_nx(struct module *mod)
65998 {
65999 - set_page_attributes(mod->module_init + mod->init_text_size,
66000 - mod->module_init + mod->init_size,
66001 + set_page_attributes(mod->module_init_rw,
66002 + mod->module_init_rw + mod->init_size_rw,
66003 set_memory_x);
66004 - set_page_attributes(mod->module_init,
66005 - mod->module_init + mod->init_ro_size,
66006 + set_page_attributes(mod->module_init_rx,
66007 + mod->module_init_rx + mod->init_size_rx,
66008 set_memory_rw);
66009 }
66010
66011 @@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
66012
66013 mutex_lock(&module_mutex);
66014 list_for_each_entry_rcu(mod, &modules, list) {
66015 - if ((mod->module_core) && (mod->core_text_size)) {
66016 - set_page_attributes(mod->module_core,
66017 - mod->module_core + mod->core_text_size,
66018 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
66019 + set_page_attributes(mod->module_core_rx,
66020 + mod->module_core_rx + mod->core_size_rx,
66021 set_memory_rw);
66022 }
66023 - if ((mod->module_init) && (mod->init_text_size)) {
66024 - set_page_attributes(mod->module_init,
66025 - mod->module_init + mod->init_text_size,
66026 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
66027 + set_page_attributes(mod->module_init_rx,
66028 + mod->module_init_rx + mod->init_size_rx,
66029 set_memory_rw);
66030 }
66031 }
66032 @@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
66033
66034 mutex_lock(&module_mutex);
66035 list_for_each_entry_rcu(mod, &modules, list) {
66036 - if ((mod->module_core) && (mod->core_text_size)) {
66037 - set_page_attributes(mod->module_core,
66038 - mod->module_core + mod->core_text_size,
66039 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
66040 + set_page_attributes(mod->module_core_rx,
66041 + mod->module_core_rx + mod->core_size_rx,
66042 set_memory_ro);
66043 }
66044 - if ((mod->module_init) && (mod->init_text_size)) {
66045 - set_page_attributes(mod->module_init,
66046 - mod->module_init + mod->init_text_size,
66047 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
66048 + set_page_attributes(mod->module_init_rx,
66049 + mod->module_init_rx + mod->init_size_rx,
66050 set_memory_ro);
66051 }
66052 }
66053 @@ -1748,16 +1750,19 @@ static void free_module(struct module *mod)
66054
66055 /* This may be NULL, but that's OK */
66056 unset_module_init_ro_nx(mod);
66057 - module_free(mod, mod->module_init);
66058 + module_free(mod, mod->module_init_rw);
66059 + module_free_exec(mod, mod->module_init_rx);
66060 kfree(mod->args);
66061 percpu_modfree(mod);
66062
66063 /* Free lock-classes: */
66064 - lockdep_free_key_range(mod->module_core, mod->core_size);
66065 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
66066 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
66067
66068 /* Finally, free the core (containing the module structure) */
66069 unset_module_core_ro_nx(mod);
66070 - module_free(mod, mod->module_core);
66071 + module_free_exec(mod, mod->module_core_rx);
66072 + module_free(mod, mod->module_core_rw);
66073
66074 #ifdef CONFIG_MPU
66075 update_protections(current->mm);
66076 @@ -1826,10 +1831,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66077 unsigned int i;
66078 int ret = 0;
66079 const struct kernel_symbol *ksym;
66080 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66081 + int is_fs_load = 0;
66082 + int register_filesystem_found = 0;
66083 + char *p;
66084 +
66085 + p = strstr(mod->args, "grsec_modharden_fs");
66086 + if (p) {
66087 + char *endptr = p + strlen("grsec_modharden_fs");
66088 + /* copy \0 as well */
66089 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
66090 + is_fs_load = 1;
66091 + }
66092 +#endif
66093
66094 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
66095 const char *name = info->strtab + sym[i].st_name;
66096
66097 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66098 + /* it's a real shame this will never get ripped and copied
66099 + upstream! ;(
66100 + */
66101 + if (is_fs_load && !strcmp(name, "register_filesystem"))
66102 + register_filesystem_found = 1;
66103 +#endif
66104 +
66105 switch (sym[i].st_shndx) {
66106 case SHN_COMMON:
66107 /* We compiled with -fno-common. These are not
66108 @@ -1850,7 +1876,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66109 ksym = resolve_symbol_wait(mod, info, name);
66110 /* Ok if resolved. */
66111 if (ksym && !IS_ERR(ksym)) {
66112 + pax_open_kernel();
66113 sym[i].st_value = ksym->value;
66114 + pax_close_kernel();
66115 break;
66116 }
66117
66118 @@ -1869,11 +1897,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66119 secbase = (unsigned long)mod_percpu(mod);
66120 else
66121 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
66122 + pax_open_kernel();
66123 sym[i].st_value += secbase;
66124 + pax_close_kernel();
66125 break;
66126 }
66127 }
66128
66129 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66130 + if (is_fs_load && !register_filesystem_found) {
66131 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
66132 + ret = -EPERM;
66133 + }
66134 +#endif
66135 +
66136 return ret;
66137 }
66138
66139 @@ -1977,22 +2014,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
66140 || s->sh_entsize != ~0UL
66141 || strstarts(sname, ".init"))
66142 continue;
66143 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
66144 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66145 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
66146 + else
66147 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
66148 DEBUGP("\t%s\n", name);
66149 }
66150 - switch (m) {
66151 - case 0: /* executable */
66152 - mod->core_size = debug_align(mod->core_size);
66153 - mod->core_text_size = mod->core_size;
66154 - break;
66155 - case 1: /* RO: text and ro-data */
66156 - mod->core_size = debug_align(mod->core_size);
66157 - mod->core_ro_size = mod->core_size;
66158 - break;
66159 - case 3: /* whole core */
66160 - mod->core_size = debug_align(mod->core_size);
66161 - break;
66162 - }
66163 }
66164
66165 DEBUGP("Init section allocation order:\n");
66166 @@ -2006,23 +2033,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
66167 || s->sh_entsize != ~0UL
66168 || !strstarts(sname, ".init"))
66169 continue;
66170 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
66171 - | INIT_OFFSET_MASK);
66172 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66173 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
66174 + else
66175 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
66176 + s->sh_entsize |= INIT_OFFSET_MASK;
66177 DEBUGP("\t%s\n", sname);
66178 }
66179 - switch (m) {
66180 - case 0: /* executable */
66181 - mod->init_size = debug_align(mod->init_size);
66182 - mod->init_text_size = mod->init_size;
66183 - break;
66184 - case 1: /* RO: text and ro-data */
66185 - mod->init_size = debug_align(mod->init_size);
66186 - mod->init_ro_size = mod->init_size;
66187 - break;
66188 - case 3: /* whole init */
66189 - mod->init_size = debug_align(mod->init_size);
66190 - break;
66191 - }
66192 }
66193 }
66194
66195 @@ -2187,7 +2204,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
66196
66197 /* Put symbol section at end of init part of module. */
66198 symsect->sh_flags |= SHF_ALLOC;
66199 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
66200 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
66201 info->index.sym) | INIT_OFFSET_MASK;
66202 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
66203
66204 @@ -2204,19 +2221,19 @@ static void layout_symtab(struct module *mod, struct load_info *info)
66205 }
66206
66207 /* Append room for core symbols at end of core part. */
66208 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
66209 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
66210 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
66211 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
66212
66213 /* Put string table section at end of init part of module. */
66214 strsect->sh_flags |= SHF_ALLOC;
66215 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
66216 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
66217 info->index.str) | INIT_OFFSET_MASK;
66218 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
66219
66220 /* Append room for core symbols' strings at end of core part. */
66221 - info->stroffs = mod->core_size;
66222 + info->stroffs = mod->core_size_rx;
66223 __set_bit(0, info->strmap);
66224 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
66225 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
66226 }
66227
66228 static void add_kallsyms(struct module *mod, const struct load_info *info)
66229 @@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
66230 /* Make sure we get permanent strtab: don't use info->strtab. */
66231 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
66232
66233 + pax_open_kernel();
66234 +
66235 /* Set types up while we still have access to sections. */
66236 for (i = 0; i < mod->num_symtab; i++)
66237 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
66238
66239 - mod->core_symtab = dst = mod->module_core + info->symoffs;
66240 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
66241 src = mod->symtab;
66242 *dst = *src;
66243 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
66244 @@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
66245 }
66246 mod->core_num_syms = ndst;
66247
66248 - mod->core_strtab = s = mod->module_core + info->stroffs;
66249 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
66250 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
66251 if (test_bit(i, info->strmap))
66252 *++s = mod->strtab[i];
66253 +
66254 + pax_close_kernel();
66255 }
66256 #else
66257 static inline void layout_symtab(struct module *mod, struct load_info *info)
66258 @@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long size)
66259 return size == 0 ? NULL : vmalloc_exec(size);
66260 }
66261
66262 -static void *module_alloc_update_bounds(unsigned long size)
66263 +static void *module_alloc_update_bounds_rw(unsigned long size)
66264 {
66265 void *ret = module_alloc(size);
66266
66267 if (ret) {
66268 mutex_lock(&module_mutex);
66269 /* Update module bounds. */
66270 - if ((unsigned long)ret < module_addr_min)
66271 - module_addr_min = (unsigned long)ret;
66272 - if ((unsigned long)ret + size > module_addr_max)
66273 - module_addr_max = (unsigned long)ret + size;
66274 + if ((unsigned long)ret < module_addr_min_rw)
66275 + module_addr_min_rw = (unsigned long)ret;
66276 + if ((unsigned long)ret + size > module_addr_max_rw)
66277 + module_addr_max_rw = (unsigned long)ret + size;
66278 + mutex_unlock(&module_mutex);
66279 + }
66280 + return ret;
66281 +}
66282 +
66283 +static void *module_alloc_update_bounds_rx(unsigned long size)
66284 +{
66285 + void *ret = module_alloc_exec(size);
66286 +
66287 + if (ret) {
66288 + mutex_lock(&module_mutex);
66289 + /* Update module bounds. */
66290 + if ((unsigned long)ret < module_addr_min_rx)
66291 + module_addr_min_rx = (unsigned long)ret;
66292 + if ((unsigned long)ret + size > module_addr_max_rx)
66293 + module_addr_max_rx = (unsigned long)ret + size;
66294 mutex_unlock(&module_mutex);
66295 }
66296 return ret;
66297 @@ -2474,8 +2511,14 @@ static struct module *setup_load_info(struct load_info *info)
66298 static int check_modinfo(struct module *mod, struct load_info *info)
66299 {
66300 const char *modmagic = get_modinfo(info, "vermagic");
66301 + const char *license = get_modinfo(info, "license");
66302 int err;
66303
66304 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
66305 + if (!license || !license_is_gpl_compatible(license))
66306 + return -ENOEXEC;
66307 +#endif
66308 +
66309 /* This is allowed: modprobe --force will invalidate it. */
66310 if (!modmagic) {
66311 err = try_to_force_load(mod, "bad vermagic");
66312 @@ -2498,7 +2541,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
66313 }
66314
66315 /* Set up license info based on the info section */
66316 - set_license(mod, get_modinfo(info, "license"));
66317 + set_license(mod, license);
66318
66319 return 0;
66320 }
66321 @@ -2592,7 +2635,7 @@ static int move_module(struct module *mod, struct load_info *info)
66322 void *ptr;
66323
66324 /* Do the allocs. */
66325 - ptr = module_alloc_update_bounds(mod->core_size);
66326 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
66327 /*
66328 * The pointer to this block is stored in the module structure
66329 * which is inside the block. Just mark it as not being a
66330 @@ -2602,23 +2645,50 @@ static int move_module(struct module *mod, struct load_info *info)
66331 if (!ptr)
66332 return -ENOMEM;
66333
66334 - memset(ptr, 0, mod->core_size);
66335 - mod->module_core = ptr;
66336 + memset(ptr, 0, mod->core_size_rw);
66337 + mod->module_core_rw = ptr;
66338
66339 - ptr = module_alloc_update_bounds(mod->init_size);
66340 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
66341 /*
66342 * The pointer to this block is stored in the module structure
66343 * which is inside the block. This block doesn't need to be
66344 * scanned as it contains data and code that will be freed
66345 * after the module is initialized.
66346 */
66347 - kmemleak_ignore(ptr);
66348 - if (!ptr && mod->init_size) {
66349 - module_free(mod, mod->module_core);
66350 + kmemleak_not_leak(ptr);
66351 + if (!ptr && mod->init_size_rw) {
66352 + module_free(mod, mod->module_core_rw);
66353 return -ENOMEM;
66354 }
66355 - memset(ptr, 0, mod->init_size);
66356 - mod->module_init = ptr;
66357 + memset(ptr, 0, mod->init_size_rw);
66358 + mod->module_init_rw = ptr;
66359 +
66360 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
66361 + kmemleak_not_leak(ptr);
66362 + if (!ptr) {
66363 + module_free(mod, mod->module_init_rw);
66364 + module_free(mod, mod->module_core_rw);
66365 + return -ENOMEM;
66366 + }
66367 +
66368 + pax_open_kernel();
66369 + memset(ptr, 0, mod->core_size_rx);
66370 + pax_close_kernel();
66371 + mod->module_core_rx = ptr;
66372 +
66373 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
66374 + kmemleak_not_leak(ptr);
66375 + if (!ptr && mod->init_size_rx) {
66376 + module_free_exec(mod, mod->module_core_rx);
66377 + module_free(mod, mod->module_init_rw);
66378 + module_free(mod, mod->module_core_rw);
66379 + return -ENOMEM;
66380 + }
66381 +
66382 + pax_open_kernel();
66383 + memset(ptr, 0, mod->init_size_rx);
66384 + pax_close_kernel();
66385 + mod->module_init_rx = ptr;
66386
66387 /* Transfer each section which specifies SHF_ALLOC */
66388 DEBUGP("final section addresses:\n");
66389 @@ -2629,16 +2699,45 @@ static int move_module(struct module *mod, struct load_info *info)
66390 if (!(shdr->sh_flags & SHF_ALLOC))
66391 continue;
66392
66393 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
66394 - dest = mod->module_init
66395 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66396 - else
66397 - dest = mod->module_core + shdr->sh_entsize;
66398 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
66399 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66400 + dest = mod->module_init_rw
66401 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66402 + else
66403 + dest = mod->module_init_rx
66404 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66405 + } else {
66406 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66407 + dest = mod->module_core_rw + shdr->sh_entsize;
66408 + else
66409 + dest = mod->module_core_rx + shdr->sh_entsize;
66410 + }
66411 +
66412 + if (shdr->sh_type != SHT_NOBITS) {
66413 +
66414 +#ifdef CONFIG_PAX_KERNEXEC
66415 +#ifdef CONFIG_X86_64
66416 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
66417 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
66418 +#endif
66419 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
66420 + pax_open_kernel();
66421 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
66422 + pax_close_kernel();
66423 + } else
66424 +#endif
66425
66426 - if (shdr->sh_type != SHT_NOBITS)
66427 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
66428 + }
66429 /* Update sh_addr to point to copy in image. */
66430 - shdr->sh_addr = (unsigned long)dest;
66431 +
66432 +#ifdef CONFIG_PAX_KERNEXEC
66433 + if (shdr->sh_flags & SHF_EXECINSTR)
66434 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
66435 + else
66436 +#endif
66437 +
66438 + shdr->sh_addr = (unsigned long)dest;
66439 DEBUGP("\t0x%lx %s\n",
66440 shdr->sh_addr, info->secstrings + shdr->sh_name);
66441 }
66442 @@ -2689,12 +2788,12 @@ static void flush_module_icache(const struct module *mod)
66443 * Do it before processing of module parameters, so the module
66444 * can provide parameter accessor functions of its own.
66445 */
66446 - if (mod->module_init)
66447 - flush_icache_range((unsigned long)mod->module_init,
66448 - (unsigned long)mod->module_init
66449 - + mod->init_size);
66450 - flush_icache_range((unsigned long)mod->module_core,
66451 - (unsigned long)mod->module_core + mod->core_size);
66452 + if (mod->module_init_rx)
66453 + flush_icache_range((unsigned long)mod->module_init_rx,
66454 + (unsigned long)mod->module_init_rx
66455 + + mod->init_size_rx);
66456 + flush_icache_range((unsigned long)mod->module_core_rx,
66457 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
66458
66459 set_fs(old_fs);
66460 }
66461 @@ -2774,8 +2873,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
66462 {
66463 kfree(info->strmap);
66464 percpu_modfree(mod);
66465 - module_free(mod, mod->module_init);
66466 - module_free(mod, mod->module_core);
66467 + module_free_exec(mod, mod->module_init_rx);
66468 + module_free_exec(mod, mod->module_core_rx);
66469 + module_free(mod, mod->module_init_rw);
66470 + module_free(mod, mod->module_core_rw);
66471 }
66472
66473 int __weak module_finalize(const Elf_Ehdr *hdr,
66474 @@ -2839,9 +2940,38 @@ static struct module *load_module(void __user *umod,
66475 if (err)
66476 goto free_unload;
66477
66478 + /* Now copy in args */
66479 + mod->args = strndup_user(uargs, ~0UL >> 1);
66480 + if (IS_ERR(mod->args)) {
66481 + err = PTR_ERR(mod->args);
66482 + goto free_unload;
66483 + }
66484 +
66485 /* Set up MODINFO_ATTR fields */
66486 setup_modinfo(mod, &info);
66487
66488 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66489 + {
66490 + char *p, *p2;
66491 +
66492 + if (strstr(mod->args, "grsec_modharden_netdev")) {
66493 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
66494 + err = -EPERM;
66495 + goto free_modinfo;
66496 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
66497 + p += strlen("grsec_modharden_normal");
66498 + p2 = strstr(p, "_");
66499 + if (p2) {
66500 + *p2 = '\0';
66501 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
66502 + *p2 = '_';
66503 + }
66504 + err = -EPERM;
66505 + goto free_modinfo;
66506 + }
66507 + }
66508 +#endif
66509 +
66510 /* Fix up syms, so that st_value is a pointer to location. */
66511 err = simplify_symbols(mod, &info);
66512 if (err < 0)
66513 @@ -2857,13 +2987,6 @@ static struct module *load_module(void __user *umod,
66514
66515 flush_module_icache(mod);
66516
66517 - /* Now copy in args */
66518 - mod->args = strndup_user(uargs, ~0UL >> 1);
66519 - if (IS_ERR(mod->args)) {
66520 - err = PTR_ERR(mod->args);
66521 - goto free_arch_cleanup;
66522 - }
66523 -
66524 /* Mark state as coming so strong_try_module_get() ignores us. */
66525 mod->state = MODULE_STATE_COMING;
66526
66527 @@ -2921,11 +3044,10 @@ static struct module *load_module(void __user *umod,
66528 unlock:
66529 mutex_unlock(&module_mutex);
66530 synchronize_sched();
66531 - kfree(mod->args);
66532 - free_arch_cleanup:
66533 module_arch_cleanup(mod);
66534 free_modinfo:
66535 free_modinfo(mod);
66536 + kfree(mod->args);
66537 free_unload:
66538 module_unload_free(mod);
66539 free_module:
66540 @@ -2966,16 +3088,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
66541 MODULE_STATE_COMING, mod);
66542
66543 /* Set RO and NX regions for core */
66544 - set_section_ro_nx(mod->module_core,
66545 - mod->core_text_size,
66546 - mod->core_ro_size,
66547 - mod->core_size);
66548 + set_section_ro_nx(mod->module_core_rx,
66549 + mod->core_size_rx,
66550 + mod->core_size_rx,
66551 + mod->core_size_rx);
66552
66553 /* Set RO and NX regions for init */
66554 - set_section_ro_nx(mod->module_init,
66555 - mod->init_text_size,
66556 - mod->init_ro_size,
66557 - mod->init_size);
66558 + set_section_ro_nx(mod->module_init_rx,
66559 + mod->init_size_rx,
66560 + mod->init_size_rx,
66561 + mod->init_size_rx);
66562
66563 do_mod_ctors(mod);
66564 /* Start the module */
66565 @@ -3021,11 +3143,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
66566 mod->strtab = mod->core_strtab;
66567 #endif
66568 unset_module_init_ro_nx(mod);
66569 - module_free(mod, mod->module_init);
66570 - mod->module_init = NULL;
66571 - mod->init_size = 0;
66572 - mod->init_ro_size = 0;
66573 - mod->init_text_size = 0;
66574 + module_free(mod, mod->module_init_rw);
66575 + module_free_exec(mod, mod->module_init_rx);
66576 + mod->module_init_rw = NULL;
66577 + mod->module_init_rx = NULL;
66578 + mod->init_size_rw = 0;
66579 + mod->init_size_rx = 0;
66580 mutex_unlock(&module_mutex);
66581
66582 return 0;
66583 @@ -3056,10 +3179,16 @@ static const char *get_ksymbol(struct module *mod,
66584 unsigned long nextval;
66585
66586 /* At worse, next value is at end of module */
66587 - if (within_module_init(addr, mod))
66588 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
66589 + if (within_module_init_rx(addr, mod))
66590 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
66591 + else if (within_module_init_rw(addr, mod))
66592 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
66593 + else if (within_module_core_rx(addr, mod))
66594 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
66595 + else if (within_module_core_rw(addr, mod))
66596 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
66597 else
66598 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
66599 + return NULL;
66600
66601 /* Scan for closest preceding symbol, and next symbol. (ELF
66602 starts real symbols at 1). */
66603 @@ -3307,7 +3436,7 @@ static int m_show(struct seq_file *m, void *p)
66604 char buf[8];
66605
66606 seq_printf(m, "%s %u",
66607 - mod->name, mod->init_size + mod->core_size);
66608 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
66609 print_unload_info(m, mod);
66610
66611 /* Informative for users. */
66612 @@ -3316,7 +3445,7 @@ static int m_show(struct seq_file *m, void *p)
66613 mod->state == MODULE_STATE_COMING ? "Loading":
66614 "Live");
66615 /* Used by oprofile and other similar tools. */
66616 - seq_printf(m, " 0x%pK", mod->module_core);
66617 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
66618
66619 /* Taints info */
66620 if (mod->taints)
66621 @@ -3352,7 +3481,17 @@ static const struct file_operations proc_modules_operations = {
66622
66623 static int __init proc_modules_init(void)
66624 {
66625 +#ifndef CONFIG_GRKERNSEC_HIDESYM
66626 +#ifdef CONFIG_GRKERNSEC_PROC_USER
66627 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
66628 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66629 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
66630 +#else
66631 proc_create("modules", 0, NULL, &proc_modules_operations);
66632 +#endif
66633 +#else
66634 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
66635 +#endif
66636 return 0;
66637 }
66638 module_init(proc_modules_init);
66639 @@ -3411,12 +3550,12 @@ struct module *__module_address(unsigned long addr)
66640 {
66641 struct module *mod;
66642
66643 - if (addr < module_addr_min || addr > module_addr_max)
66644 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
66645 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
66646 return NULL;
66647
66648 list_for_each_entry_rcu(mod, &modules, list)
66649 - if (within_module_core(addr, mod)
66650 - || within_module_init(addr, mod))
66651 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
66652 return mod;
66653 return NULL;
66654 }
66655 @@ -3450,11 +3589,20 @@ bool is_module_text_address(unsigned long addr)
66656 */
66657 struct module *__module_text_address(unsigned long addr)
66658 {
66659 - struct module *mod = __module_address(addr);
66660 + struct module *mod;
66661 +
66662 +#ifdef CONFIG_X86_32
66663 + addr = ktla_ktva(addr);
66664 +#endif
66665 +
66666 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
66667 + return NULL;
66668 +
66669 + mod = __module_address(addr);
66670 +
66671 if (mod) {
66672 /* Make sure it's within the text section. */
66673 - if (!within(addr, mod->module_init, mod->init_text_size)
66674 - && !within(addr, mod->module_core, mod->core_text_size))
66675 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
66676 mod = NULL;
66677 }
66678 return mod;
66679 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
66680 index 7e3443f..b2a1e6b 100644
66681 --- a/kernel/mutex-debug.c
66682 +++ b/kernel/mutex-debug.c
66683 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
66684 }
66685
66686 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66687 - struct thread_info *ti)
66688 + struct task_struct *task)
66689 {
66690 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
66691
66692 /* Mark the current thread as blocked on the lock: */
66693 - ti->task->blocked_on = waiter;
66694 + task->blocked_on = waiter;
66695 }
66696
66697 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66698 - struct thread_info *ti)
66699 + struct task_struct *task)
66700 {
66701 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
66702 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
66703 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
66704 - ti->task->blocked_on = NULL;
66705 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
66706 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
66707 + task->blocked_on = NULL;
66708
66709 list_del_init(&waiter->list);
66710 waiter->task = NULL;
66711 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
66712 index 0799fd3..d06ae3b 100644
66713 --- a/kernel/mutex-debug.h
66714 +++ b/kernel/mutex-debug.h
66715 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
66716 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
66717 extern void debug_mutex_add_waiter(struct mutex *lock,
66718 struct mutex_waiter *waiter,
66719 - struct thread_info *ti);
66720 + struct task_struct *task);
66721 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
66722 - struct thread_info *ti);
66723 + struct task_struct *task);
66724 extern void debug_mutex_unlock(struct mutex *lock);
66725 extern void debug_mutex_init(struct mutex *lock, const char *name,
66726 struct lock_class_key *key);
66727 diff --git a/kernel/mutex.c b/kernel/mutex.c
66728 index 89096dd..f91ebc5 100644
66729 --- a/kernel/mutex.c
66730 +++ b/kernel/mutex.c
66731 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66732 spin_lock_mutex(&lock->wait_lock, flags);
66733
66734 debug_mutex_lock_common(lock, &waiter);
66735 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
66736 + debug_mutex_add_waiter(lock, &waiter, task);
66737
66738 /* add waiting tasks to the end of the waitqueue (FIFO): */
66739 list_add_tail(&waiter.list, &lock->wait_list);
66740 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66741 * TASK_UNINTERRUPTIBLE case.)
66742 */
66743 if (unlikely(signal_pending_state(state, task))) {
66744 - mutex_remove_waiter(lock, &waiter,
66745 - task_thread_info(task));
66746 + mutex_remove_waiter(lock, &waiter, task);
66747 mutex_release(&lock->dep_map, 1, ip);
66748 spin_unlock_mutex(&lock->wait_lock, flags);
66749
66750 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
66751 done:
66752 lock_acquired(&lock->dep_map, ip);
66753 /* got the lock - rejoice! */
66754 - mutex_remove_waiter(lock, &waiter, current_thread_info());
66755 + mutex_remove_waiter(lock, &waiter, task);
66756 mutex_set_owner(lock);
66757
66758 /* set it to 0 if there are no waiters left: */
66759 diff --git a/kernel/padata.c b/kernel/padata.c
66760 index b452599..5d68f4e 100644
66761 --- a/kernel/padata.c
66762 +++ b/kernel/padata.c
66763 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
66764 padata->pd = pd;
66765 padata->cb_cpu = cb_cpu;
66766
66767 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
66768 - atomic_set(&pd->seq_nr, -1);
66769 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
66770 + atomic_set_unchecked(&pd->seq_nr, -1);
66771
66772 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
66773 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
66774
66775 target_cpu = padata_cpu_hash(padata);
66776 queue = per_cpu_ptr(pd->pqueue, target_cpu);
66777 @@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
66778 padata_init_pqueues(pd);
66779 padata_init_squeues(pd);
66780 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
66781 - atomic_set(&pd->seq_nr, -1);
66782 + atomic_set_unchecked(&pd->seq_nr, -1);
66783 atomic_set(&pd->reorder_objects, 0);
66784 atomic_set(&pd->refcnt, 0);
66785 pd->pinst = pinst;
66786 diff --git a/kernel/panic.c b/kernel/panic.c
66787 index 3458469..342c500 100644
66788 --- a/kernel/panic.c
66789 +++ b/kernel/panic.c
66790 @@ -78,7 +78,11 @@ NORET_TYPE void panic(const char * fmt, ...)
66791 va_end(args);
66792 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
66793 #ifdef CONFIG_DEBUG_BUGVERBOSE
66794 - dump_stack();
66795 + /*
66796 + * Avoid nested stack-dumping if a panic occurs during oops processing
66797 + */
66798 + if (!oops_in_progress)
66799 + dump_stack();
66800 #endif
66801
66802 /*
66803 @@ -382,7 +386,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
66804 const char *board;
66805
66806 printk(KERN_WARNING "------------[ cut here ]------------\n");
66807 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
66808 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
66809 board = dmi_get_system_info(DMI_PRODUCT_NAME);
66810 if (board)
66811 printk(KERN_WARNING "Hardware name: %s\n", board);
66812 @@ -437,7 +441,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
66813 */
66814 void __stack_chk_fail(void)
66815 {
66816 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
66817 + dump_stack();
66818 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
66819 __builtin_return_address(0));
66820 }
66821 EXPORT_SYMBOL(__stack_chk_fail);
66822 diff --git a/kernel/pid.c b/kernel/pid.c
66823 index fa5f722..0c93e57 100644
66824 --- a/kernel/pid.c
66825 +++ b/kernel/pid.c
66826 @@ -33,6 +33,7 @@
66827 #include <linux/rculist.h>
66828 #include <linux/bootmem.h>
66829 #include <linux/hash.h>
66830 +#include <linux/security.h>
66831 #include <linux/pid_namespace.h>
66832 #include <linux/init_task.h>
66833 #include <linux/syscalls.h>
66834 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
66835
66836 int pid_max = PID_MAX_DEFAULT;
66837
66838 -#define RESERVED_PIDS 300
66839 +#define RESERVED_PIDS 500
66840
66841 int pid_max_min = RESERVED_PIDS + 1;
66842 int pid_max_max = PID_MAX_LIMIT;
66843 @@ -418,10 +419,18 @@ EXPORT_SYMBOL(pid_task);
66844 */
66845 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
66846 {
66847 + struct task_struct *task;
66848 +
66849 rcu_lockdep_assert(rcu_read_lock_held(),
66850 "find_task_by_pid_ns() needs rcu_read_lock()"
66851 " protection");
66852 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
66853 +
66854 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
66855 +
66856 + if (gr_pid_is_chrooted(task))
66857 + return NULL;
66858 +
66859 + return task;
66860 }
66861
66862 struct task_struct *find_task_by_vpid(pid_t vnr)
66863 @@ -429,6 +438,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
66864 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
66865 }
66866
66867 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
66868 +{
66869 + rcu_lockdep_assert(rcu_read_lock_held(),
66870 + "find_task_by_pid_ns() needs rcu_read_lock()"
66871 + " protection");
66872 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
66873 +}
66874 +
66875 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
66876 {
66877 struct pid *pid;
66878 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
66879 index e7cb76d..75eceb3 100644
66880 --- a/kernel/posix-cpu-timers.c
66881 +++ b/kernel/posix-cpu-timers.c
66882 @@ -6,6 +6,7 @@
66883 #include <linux/posix-timers.h>
66884 #include <linux/errno.h>
66885 #include <linux/math64.h>
66886 +#include <linux/security.h>
66887 #include <asm/uaccess.h>
66888 #include <linux/kernel_stat.h>
66889 #include <trace/events/timer.h>
66890 @@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
66891
66892 static __init int init_posix_cpu_timers(void)
66893 {
66894 - struct k_clock process = {
66895 + static struct k_clock process = {
66896 .clock_getres = process_cpu_clock_getres,
66897 .clock_get = process_cpu_clock_get,
66898 .timer_create = process_cpu_timer_create,
66899 .nsleep = process_cpu_nsleep,
66900 .nsleep_restart = process_cpu_nsleep_restart,
66901 };
66902 - struct k_clock thread = {
66903 + static struct k_clock thread = {
66904 .clock_getres = thread_cpu_clock_getres,
66905 .clock_get = thread_cpu_clock_get,
66906 .timer_create = thread_cpu_timer_create,
66907 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
66908 index 69185ae..cc2847a 100644
66909 --- a/kernel/posix-timers.c
66910 +++ b/kernel/posix-timers.c
66911 @@ -43,6 +43,7 @@
66912 #include <linux/idr.h>
66913 #include <linux/posix-clock.h>
66914 #include <linux/posix-timers.h>
66915 +#include <linux/grsecurity.h>
66916 #include <linux/syscalls.h>
66917 #include <linux/wait.h>
66918 #include <linux/workqueue.h>
66919 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
66920 * which we beg off on and pass to do_sys_settimeofday().
66921 */
66922
66923 -static struct k_clock posix_clocks[MAX_CLOCKS];
66924 +static struct k_clock *posix_clocks[MAX_CLOCKS];
66925
66926 /*
66927 * These ones are defined below.
66928 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
66929 */
66930 static __init int init_posix_timers(void)
66931 {
66932 - struct k_clock clock_realtime = {
66933 + static struct k_clock clock_realtime = {
66934 .clock_getres = hrtimer_get_res,
66935 .clock_get = posix_clock_realtime_get,
66936 .clock_set = posix_clock_realtime_set,
66937 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
66938 .timer_get = common_timer_get,
66939 .timer_del = common_timer_del,
66940 };
66941 - struct k_clock clock_monotonic = {
66942 + static struct k_clock clock_monotonic = {
66943 .clock_getres = hrtimer_get_res,
66944 .clock_get = posix_ktime_get_ts,
66945 .nsleep = common_nsleep,
66946 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
66947 .timer_get = common_timer_get,
66948 .timer_del = common_timer_del,
66949 };
66950 - struct k_clock clock_monotonic_raw = {
66951 + static struct k_clock clock_monotonic_raw = {
66952 .clock_getres = hrtimer_get_res,
66953 .clock_get = posix_get_monotonic_raw,
66954 };
66955 - struct k_clock clock_realtime_coarse = {
66956 + static struct k_clock clock_realtime_coarse = {
66957 .clock_getres = posix_get_coarse_res,
66958 .clock_get = posix_get_realtime_coarse,
66959 };
66960 - struct k_clock clock_monotonic_coarse = {
66961 + static struct k_clock clock_monotonic_coarse = {
66962 .clock_getres = posix_get_coarse_res,
66963 .clock_get = posix_get_monotonic_coarse,
66964 };
66965 - struct k_clock clock_boottime = {
66966 + static struct k_clock clock_boottime = {
66967 .clock_getres = hrtimer_get_res,
66968 .clock_get = posix_get_boottime,
66969 .nsleep = common_nsleep,
66970 @@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
66971 return;
66972 }
66973
66974 - posix_clocks[clock_id] = *new_clock;
66975 + posix_clocks[clock_id] = new_clock;
66976 }
66977 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
66978
66979 @@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
66980 return (id & CLOCKFD_MASK) == CLOCKFD ?
66981 &clock_posix_dynamic : &clock_posix_cpu;
66982
66983 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
66984 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
66985 return NULL;
66986 - return &posix_clocks[id];
66987 + return posix_clocks[id];
66988 }
66989
66990 static int common_timer_create(struct k_itimer *new_timer)
66991 @@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
66992 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
66993 return -EFAULT;
66994
66995 + /* only the CLOCK_REALTIME clock can be set, all other clocks
66996 + have their clock_set fptr set to a nosettime dummy function
66997 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
66998 + call common_clock_set, which calls do_sys_settimeofday, which
66999 + we hook
67000 + */
67001 +
67002 return kc->clock_set(which_clock, &new_tp);
67003 }
67004
67005 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
67006 index d523593..68197a4 100644
67007 --- a/kernel/power/poweroff.c
67008 +++ b/kernel/power/poweroff.c
67009 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
67010 .enable_mask = SYSRQ_ENABLE_BOOT,
67011 };
67012
67013 -static int pm_sysrq_init(void)
67014 +static int __init pm_sysrq_init(void)
67015 {
67016 register_sysrq_key('o', &sysrq_poweroff_op);
67017 return 0;
67018 diff --git a/kernel/power/process.c b/kernel/power/process.c
67019 index 3d4b954..11af930 100644
67020 --- a/kernel/power/process.c
67021 +++ b/kernel/power/process.c
67022 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only)
67023 u64 elapsed_csecs64;
67024 unsigned int elapsed_csecs;
67025 bool wakeup = false;
67026 + bool timedout = false;
67027
67028 do_gettimeofday(&start);
67029
67030 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_only)
67031
67032 while (true) {
67033 todo = 0;
67034 + if (time_after(jiffies, end_time))
67035 + timedout = true;
67036 read_lock(&tasklist_lock);
67037 do_each_thread(g, p) {
67038 if (frozen(p) || !freezable(p))
67039 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_only)
67040 * try_to_stop() after schedule() in ptrace/signal
67041 * stop sees TIF_FREEZE.
67042 */
67043 - if (!task_is_stopped_or_traced(p) &&
67044 - !freezer_should_skip(p))
67045 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
67046 todo++;
67047 + if (timedout) {
67048 + printk(KERN_ERR "Task refusing to freeze:\n");
67049 + sched_show_task(p);
67050 + }
67051 + }
67052 } while_each_thread(g, p);
67053 read_unlock(&tasklist_lock);
67054
67055 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_only)
67056 todo += wq_busy;
67057 }
67058
67059 - if (!todo || time_after(jiffies, end_time))
67060 + if (!todo || timedout)
67061 break;
67062
67063 if (pm_wakeup_pending()) {
67064 diff --git a/kernel/printk.c b/kernel/printk.c
67065 index 7982a0a..2095fdc 100644
67066 --- a/kernel/printk.c
67067 +++ b/kernel/printk.c
67068 @@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
67069 if (from_file && type != SYSLOG_ACTION_OPEN)
67070 return 0;
67071
67072 +#ifdef CONFIG_GRKERNSEC_DMESG
67073 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
67074 + return -EPERM;
67075 +#endif
67076 +
67077 if (syslog_action_restricted(type)) {
67078 if (capable(CAP_SYSLOG))
67079 return 0;
67080 diff --git a/kernel/profile.c b/kernel/profile.c
67081 index 76b8e77..a2930e8 100644
67082 --- a/kernel/profile.c
67083 +++ b/kernel/profile.c
67084 @@ -39,7 +39,7 @@ struct profile_hit {
67085 /* Oprofile timer tick hook */
67086 static int (*timer_hook)(struct pt_regs *) __read_mostly;
67087
67088 -static atomic_t *prof_buffer;
67089 +static atomic_unchecked_t *prof_buffer;
67090 static unsigned long prof_len, prof_shift;
67091
67092 int prof_on __read_mostly;
67093 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
67094 hits[i].pc = 0;
67095 continue;
67096 }
67097 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67098 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67099 hits[i].hits = hits[i].pc = 0;
67100 }
67101 }
67102 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67103 * Add the current hit(s) and flush the write-queue out
67104 * to the global buffer:
67105 */
67106 - atomic_add(nr_hits, &prof_buffer[pc]);
67107 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
67108 for (i = 0; i < NR_PROFILE_HIT; ++i) {
67109 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67110 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67111 hits[i].pc = hits[i].hits = 0;
67112 }
67113 out:
67114 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67115 {
67116 unsigned long pc;
67117 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
67118 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67119 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67120 }
67121 #endif /* !CONFIG_SMP */
67122
67123 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
67124 return -EFAULT;
67125 buf++; p++; count--; read++;
67126 }
67127 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
67128 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
67129 if (copy_to_user(buf, (void *)pnt, count))
67130 return -EFAULT;
67131 read += count;
67132 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
67133 }
67134 #endif
67135 profile_discard_flip_buffers();
67136 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
67137 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
67138 return count;
67139 }
67140
67141 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
67142 index 78ab24a..332c915 100644
67143 --- a/kernel/ptrace.c
67144 +++ b/kernel/ptrace.c
67145 @@ -172,7 +172,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
67146 return ret;
67147 }
67148
67149 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
67150 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
67151 + unsigned int log)
67152 {
67153 const struct cred *cred = current_cred(), *tcred;
67154
67155 @@ -198,7 +199,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
67156 cred->gid == tcred->sgid &&
67157 cred->gid == tcred->gid))
67158 goto ok;
67159 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
67160 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
67161 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
67162 goto ok;
67163 rcu_read_unlock();
67164 return -EPERM;
67165 @@ -207,7 +209,9 @@ ok:
67166 smp_rmb();
67167 if (task->mm)
67168 dumpable = get_dumpable(task->mm);
67169 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
67170 + if (!dumpable &&
67171 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
67172 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
67173 return -EPERM;
67174
67175 return security_ptrace_access_check(task, mode);
67176 @@ -217,7 +221,21 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
67177 {
67178 int err;
67179 task_lock(task);
67180 - err = __ptrace_may_access(task, mode);
67181 + err = __ptrace_may_access(task, mode, 0);
67182 + task_unlock(task);
67183 + return !err;
67184 +}
67185 +
67186 +bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode)
67187 +{
67188 + return __ptrace_may_access(task, mode, 0);
67189 +}
67190 +
67191 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
67192 +{
67193 + int err;
67194 + task_lock(task);
67195 + err = __ptrace_may_access(task, mode, 1);
67196 task_unlock(task);
67197 return !err;
67198 }
67199 @@ -262,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
67200 goto out;
67201
67202 task_lock(task);
67203 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
67204 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
67205 task_unlock(task);
67206 if (retval)
67207 goto unlock_creds;
67208 @@ -277,7 +295,7 @@ static int ptrace_attach(struct task_struct *task, long request,
67209 task->ptrace = PT_PTRACED;
67210 if (seize)
67211 task->ptrace |= PT_SEIZED;
67212 - if (task_ns_capable(task, CAP_SYS_PTRACE))
67213 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
67214 task->ptrace |= PT_PTRACE_CAP;
67215
67216 __ptrace_link(task, current);
67217 @@ -483,7 +501,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
67218 break;
67219 return -EIO;
67220 }
67221 - if (copy_to_user(dst, buf, retval))
67222 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
67223 return -EFAULT;
67224 copied += retval;
67225 src += retval;
67226 @@ -680,7 +698,7 @@ int ptrace_request(struct task_struct *child, long request,
67227 bool seized = child->ptrace & PT_SEIZED;
67228 int ret = -EIO;
67229 siginfo_t siginfo, *si;
67230 - void __user *datavp = (void __user *) data;
67231 + void __user *datavp = (__force void __user *) data;
67232 unsigned long __user *datalp = datavp;
67233 unsigned long flags;
67234
67235 @@ -882,14 +900,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
67236 goto out;
67237 }
67238
67239 + if (gr_handle_ptrace(child, request)) {
67240 + ret = -EPERM;
67241 + goto out_put_task_struct;
67242 + }
67243 +
67244 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67245 ret = ptrace_attach(child, request, data);
67246 /*
67247 * Some architectures need to do book-keeping after
67248 * a ptrace attach.
67249 */
67250 - if (!ret)
67251 + if (!ret) {
67252 arch_ptrace_attach(child);
67253 + gr_audit_ptrace(child);
67254 + }
67255 goto out_put_task_struct;
67256 }
67257
67258 @@ -915,7 +940,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
67259 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
67260 if (copied != sizeof(tmp))
67261 return -EIO;
67262 - return put_user(tmp, (unsigned long __user *)data);
67263 + return put_user(tmp, (__force unsigned long __user *)data);
67264 }
67265
67266 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
67267 @@ -1025,14 +1050,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
67268 goto out;
67269 }
67270
67271 + if (gr_handle_ptrace(child, request)) {
67272 + ret = -EPERM;
67273 + goto out_put_task_struct;
67274 + }
67275 +
67276 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67277 ret = ptrace_attach(child, request, data);
67278 /*
67279 * Some architectures need to do book-keeping after
67280 * a ptrace attach.
67281 */
67282 - if (!ret)
67283 + if (!ret) {
67284 arch_ptrace_attach(child);
67285 + gr_audit_ptrace(child);
67286 + }
67287 goto out_put_task_struct;
67288 }
67289
67290 diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
67291 index 636af6d..8af70ab 100644
67292 --- a/kernel/rcutiny.c
67293 +++ b/kernel/rcutiny.c
67294 @@ -46,7 +46,7 @@
67295 struct rcu_ctrlblk;
67296 static void invoke_rcu_callbacks(void);
67297 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
67298 -static void rcu_process_callbacks(struct softirq_action *unused);
67299 +static void rcu_process_callbacks(void);
67300 static void __call_rcu(struct rcu_head *head,
67301 void (*func)(struct rcu_head *rcu),
67302 struct rcu_ctrlblk *rcp);
67303 @@ -186,7 +186,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
67304 RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count));
67305 }
67306
67307 -static void rcu_process_callbacks(struct softirq_action *unused)
67308 +static void rcu_process_callbacks(void)
67309 {
67310 __rcu_process_callbacks(&rcu_sched_ctrlblk);
67311 __rcu_process_callbacks(&rcu_bh_ctrlblk);
67312 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
67313 index 764825c..3aa6ac4 100644
67314 --- a/kernel/rcutorture.c
67315 +++ b/kernel/rcutorture.c
67316 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
67317 { 0 };
67318 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
67319 { 0 };
67320 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
67321 -static atomic_t n_rcu_torture_alloc;
67322 -static atomic_t n_rcu_torture_alloc_fail;
67323 -static atomic_t n_rcu_torture_free;
67324 -static atomic_t n_rcu_torture_mberror;
67325 -static atomic_t n_rcu_torture_error;
67326 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
67327 +static atomic_unchecked_t n_rcu_torture_alloc;
67328 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
67329 +static atomic_unchecked_t n_rcu_torture_free;
67330 +static atomic_unchecked_t n_rcu_torture_mberror;
67331 +static atomic_unchecked_t n_rcu_torture_error;
67332 static long n_rcu_torture_boost_ktrerror;
67333 static long n_rcu_torture_boost_rterror;
67334 static long n_rcu_torture_boost_failure;
67335 @@ -223,11 +223,11 @@ rcu_torture_alloc(void)
67336
67337 spin_lock_bh(&rcu_torture_lock);
67338 if (list_empty(&rcu_torture_freelist)) {
67339 - atomic_inc(&n_rcu_torture_alloc_fail);
67340 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
67341 spin_unlock_bh(&rcu_torture_lock);
67342 return NULL;
67343 }
67344 - atomic_inc(&n_rcu_torture_alloc);
67345 + atomic_inc_unchecked(&n_rcu_torture_alloc);
67346 p = rcu_torture_freelist.next;
67347 list_del_init(p);
67348 spin_unlock_bh(&rcu_torture_lock);
67349 @@ -240,7 +240,7 @@ rcu_torture_alloc(void)
67350 static void
67351 rcu_torture_free(struct rcu_torture *p)
67352 {
67353 - atomic_inc(&n_rcu_torture_free);
67354 + atomic_inc_unchecked(&n_rcu_torture_free);
67355 spin_lock_bh(&rcu_torture_lock);
67356 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
67357 spin_unlock_bh(&rcu_torture_lock);
67358 @@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
67359 i = rp->rtort_pipe_count;
67360 if (i > RCU_TORTURE_PIPE_LEN)
67361 i = RCU_TORTURE_PIPE_LEN;
67362 - atomic_inc(&rcu_torture_wcount[i]);
67363 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67364 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67365 rp->rtort_mbtest = 0;
67366 rcu_torture_free(rp);
67367 @@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
67368 i = rp->rtort_pipe_count;
67369 if (i > RCU_TORTURE_PIPE_LEN)
67370 i = RCU_TORTURE_PIPE_LEN;
67371 - atomic_inc(&rcu_torture_wcount[i]);
67372 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67373 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67374 rp->rtort_mbtest = 0;
67375 list_del(&rp->rtort_free);
67376 @@ -872,7 +872,7 @@ rcu_torture_writer(void *arg)
67377 i = old_rp->rtort_pipe_count;
67378 if (i > RCU_TORTURE_PIPE_LEN)
67379 i = RCU_TORTURE_PIPE_LEN;
67380 - atomic_inc(&rcu_torture_wcount[i]);
67381 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67382 old_rp->rtort_pipe_count++;
67383 cur_ops->deferred_free(old_rp);
67384 }
67385 @@ -940,7 +940,7 @@ static void rcu_torture_timer(unsigned long unused)
67386 return;
67387 }
67388 if (p->rtort_mbtest == 0)
67389 - atomic_inc(&n_rcu_torture_mberror);
67390 + atomic_inc_unchecked(&n_rcu_torture_mberror);
67391 spin_lock(&rand_lock);
67392 cur_ops->read_delay(&rand);
67393 n_rcu_torture_timers++;
67394 @@ -1001,7 +1001,7 @@ rcu_torture_reader(void *arg)
67395 continue;
67396 }
67397 if (p->rtort_mbtest == 0)
67398 - atomic_inc(&n_rcu_torture_mberror);
67399 + atomic_inc_unchecked(&n_rcu_torture_mberror);
67400 cur_ops->read_delay(&rand);
67401 preempt_disable();
67402 pipe_count = p->rtort_pipe_count;
67403 @@ -1060,16 +1060,16 @@ rcu_torture_printk(char *page)
67404 rcu_torture_current,
67405 rcu_torture_current_version,
67406 list_empty(&rcu_torture_freelist),
67407 - atomic_read(&n_rcu_torture_alloc),
67408 - atomic_read(&n_rcu_torture_alloc_fail),
67409 - atomic_read(&n_rcu_torture_free),
67410 - atomic_read(&n_rcu_torture_mberror),
67411 + atomic_read_unchecked(&n_rcu_torture_alloc),
67412 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
67413 + atomic_read_unchecked(&n_rcu_torture_free),
67414 + atomic_read_unchecked(&n_rcu_torture_mberror),
67415 n_rcu_torture_boost_ktrerror,
67416 n_rcu_torture_boost_rterror,
67417 n_rcu_torture_boost_failure,
67418 n_rcu_torture_boosts,
67419 n_rcu_torture_timers);
67420 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
67421 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
67422 n_rcu_torture_boost_ktrerror != 0 ||
67423 n_rcu_torture_boost_rterror != 0 ||
67424 n_rcu_torture_boost_failure != 0)
67425 @@ -1077,7 +1077,7 @@ rcu_torture_printk(char *page)
67426 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
67427 if (i > 1) {
67428 cnt += sprintf(&page[cnt], "!!! ");
67429 - atomic_inc(&n_rcu_torture_error);
67430 + atomic_inc_unchecked(&n_rcu_torture_error);
67431 WARN_ON_ONCE(1);
67432 }
67433 cnt += sprintf(&page[cnt], "Reader Pipe: ");
67434 @@ -1091,7 +1091,7 @@ rcu_torture_printk(char *page)
67435 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
67436 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67437 cnt += sprintf(&page[cnt], " %d",
67438 - atomic_read(&rcu_torture_wcount[i]));
67439 + atomic_read_unchecked(&rcu_torture_wcount[i]));
67440 }
67441 cnt += sprintf(&page[cnt], "\n");
67442 if (cur_ops->stats)
67443 @@ -1401,7 +1401,7 @@ rcu_torture_cleanup(void)
67444
67445 if (cur_ops->cleanup)
67446 cur_ops->cleanup();
67447 - if (atomic_read(&n_rcu_torture_error))
67448 + if (atomic_read_unchecked(&n_rcu_torture_error))
67449 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
67450 else
67451 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
67452 @@ -1465,17 +1465,17 @@ rcu_torture_init(void)
67453
67454 rcu_torture_current = NULL;
67455 rcu_torture_current_version = 0;
67456 - atomic_set(&n_rcu_torture_alloc, 0);
67457 - atomic_set(&n_rcu_torture_alloc_fail, 0);
67458 - atomic_set(&n_rcu_torture_free, 0);
67459 - atomic_set(&n_rcu_torture_mberror, 0);
67460 - atomic_set(&n_rcu_torture_error, 0);
67461 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
67462 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
67463 + atomic_set_unchecked(&n_rcu_torture_free, 0);
67464 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
67465 + atomic_set_unchecked(&n_rcu_torture_error, 0);
67466 n_rcu_torture_boost_ktrerror = 0;
67467 n_rcu_torture_boost_rterror = 0;
67468 n_rcu_torture_boost_failure = 0;
67469 n_rcu_torture_boosts = 0;
67470 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
67471 - atomic_set(&rcu_torture_wcount[i], 0);
67472 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
67473 for_each_possible_cpu(cpu) {
67474 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67475 per_cpu(rcu_torture_count, cpu)[i] = 0;
67476 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
67477 index 6b76d81..7afc1b3 100644
67478 --- a/kernel/rcutree.c
67479 +++ b/kernel/rcutree.c
67480 @@ -367,9 +367,9 @@ void rcu_enter_nohz(void)
67481 trace_rcu_dyntick("Start");
67482 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
67483 smp_mb__before_atomic_inc(); /* See above. */
67484 - atomic_inc(&rdtp->dynticks);
67485 + atomic_inc_unchecked(&rdtp->dynticks);
67486 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
67487 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
67488 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
67489 local_irq_restore(flags);
67490 }
67491
67492 @@ -391,10 +391,10 @@ void rcu_exit_nohz(void)
67493 return;
67494 }
67495 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
67496 - atomic_inc(&rdtp->dynticks);
67497 + atomic_inc_unchecked(&rdtp->dynticks);
67498 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
67499 smp_mb__after_atomic_inc(); /* See above. */
67500 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
67501 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
67502 trace_rcu_dyntick("End");
67503 local_irq_restore(flags);
67504 }
67505 @@ -411,14 +411,14 @@ void rcu_nmi_enter(void)
67506 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
67507
67508 if (rdtp->dynticks_nmi_nesting == 0 &&
67509 - (atomic_read(&rdtp->dynticks) & 0x1))
67510 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
67511 return;
67512 rdtp->dynticks_nmi_nesting++;
67513 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
67514 - atomic_inc(&rdtp->dynticks);
67515 + atomic_inc_unchecked(&rdtp->dynticks);
67516 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
67517 smp_mb__after_atomic_inc(); /* See above. */
67518 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
67519 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
67520 }
67521
67522 /**
67523 @@ -437,9 +437,9 @@ void rcu_nmi_exit(void)
67524 return;
67525 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
67526 smp_mb__before_atomic_inc(); /* See above. */
67527 - atomic_inc(&rdtp->dynticks);
67528 + atomic_inc_unchecked(&rdtp->dynticks);
67529 smp_mb__after_atomic_inc(); /* Force delay to next write. */
67530 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
67531 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
67532 }
67533
67534 /**
67535 @@ -474,7 +474,7 @@ void rcu_irq_exit(void)
67536 */
67537 static int dyntick_save_progress_counter(struct rcu_data *rdp)
67538 {
67539 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
67540 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
67541 return 0;
67542 }
67543
67544 @@ -489,7 +489,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
67545 unsigned int curr;
67546 unsigned int snap;
67547
67548 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
67549 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
67550 snap = (unsigned int)rdp->dynticks_snap;
67551
67552 /*
67553 @@ -1552,7 +1552,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
67554 /*
67555 * Do RCU core processing for the current CPU.
67556 */
67557 -static void rcu_process_callbacks(struct softirq_action *unused)
67558 +static void rcu_process_callbacks(void)
67559 {
67560 trace_rcu_utilization("Start RCU core");
67561 __rcu_process_callbacks(&rcu_sched_state,
67562 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
67563 index 849ce9e..74bc9de 100644
67564 --- a/kernel/rcutree.h
67565 +++ b/kernel/rcutree.h
67566 @@ -86,7 +86,7 @@
67567 struct rcu_dynticks {
67568 int dynticks_nesting; /* Track irq/process nesting level. */
67569 int dynticks_nmi_nesting; /* Track NMI nesting level. */
67570 - atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
67571 + atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
67572 };
67573
67574 /* RCU's kthread states for tracing. */
67575 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
67576 index 4b9b9f8..2326053 100644
67577 --- a/kernel/rcutree_plugin.h
67578 +++ b/kernel/rcutree_plugin.h
67579 @@ -842,7 +842,7 @@ void synchronize_rcu_expedited(void)
67580
67581 /* Clean up and exit. */
67582 smp_mb(); /* ensure expedited GP seen before counter increment. */
67583 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
67584 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
67585 unlock_mb_ret:
67586 mutex_unlock(&sync_rcu_preempt_exp_mutex);
67587 mb_ret:
67588 @@ -1815,8 +1815,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
67589
67590 #else /* #ifndef CONFIG_SMP */
67591
67592 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
67593 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
67594 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
67595 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
67596
67597 static int synchronize_sched_expedited_cpu_stop(void *data)
67598 {
67599 @@ -1871,7 +1871,7 @@ void synchronize_sched_expedited(void)
67600 int firstsnap, s, snap, trycount = 0;
67601
67602 /* Note that atomic_inc_return() implies full memory barrier. */
67603 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
67604 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
67605 get_online_cpus();
67606
67607 /*
67608 @@ -1892,7 +1892,7 @@ void synchronize_sched_expedited(void)
67609 }
67610
67611 /* Check to see if someone else did our work for us. */
67612 - s = atomic_read(&sync_sched_expedited_done);
67613 + s = atomic_read_unchecked(&sync_sched_expedited_done);
67614 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
67615 smp_mb(); /* ensure test happens before caller kfree */
67616 return;
67617 @@ -1907,7 +1907,7 @@ void synchronize_sched_expedited(void)
67618 * grace period works for us.
67619 */
67620 get_online_cpus();
67621 - snap = atomic_read(&sync_sched_expedited_started) - 1;
67622 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
67623 smp_mb(); /* ensure read is before try_stop_cpus(). */
67624 }
67625
67626 @@ -1918,12 +1918,12 @@ void synchronize_sched_expedited(void)
67627 * than we did beat us to the punch.
67628 */
67629 do {
67630 - s = atomic_read(&sync_sched_expedited_done);
67631 + s = atomic_read_unchecked(&sync_sched_expedited_done);
67632 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
67633 smp_mb(); /* ensure test happens before caller kfree */
67634 break;
67635 }
67636 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
67637 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
67638
67639 put_online_cpus();
67640 }
67641 @@ -1985,7 +1985,7 @@ int rcu_needs_cpu(int cpu)
67642 for_each_online_cpu(thatcpu) {
67643 if (thatcpu == cpu)
67644 continue;
67645 - snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
67646 + snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks,
67647 thatcpu).dynticks);
67648 smp_mb(); /* Order sampling of snap with end of grace period. */
67649 if ((snap & 0x1) != 0) {
67650 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
67651 index 9feffa4..54058df 100644
67652 --- a/kernel/rcutree_trace.c
67653 +++ b/kernel/rcutree_trace.c
67654 @@ -69,7 +69,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
67655 rdp->qs_pending);
67656 #ifdef CONFIG_NO_HZ
67657 seq_printf(m, " dt=%d/%d/%d df=%lu",
67658 - atomic_read(&rdp->dynticks->dynticks),
67659 + atomic_read_unchecked(&rdp->dynticks->dynticks),
67660 rdp->dynticks->dynticks_nesting,
67661 rdp->dynticks->dynticks_nmi_nesting,
67662 rdp->dynticks_fqs);
67663 @@ -143,7 +143,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
67664 rdp->qs_pending);
67665 #ifdef CONFIG_NO_HZ
67666 seq_printf(m, ",%d,%d,%d,%lu",
67667 - atomic_read(&rdp->dynticks->dynticks),
67668 + atomic_read_unchecked(&rdp->dynticks->dynticks),
67669 rdp->dynticks->dynticks_nesting,
67670 rdp->dynticks->dynticks_nmi_nesting,
67671 rdp->dynticks_fqs);
67672 diff --git a/kernel/resource.c b/kernel/resource.c
67673 index 7640b3a..5879283 100644
67674 --- a/kernel/resource.c
67675 +++ b/kernel/resource.c
67676 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
67677
67678 static int __init ioresources_init(void)
67679 {
67680 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
67681 +#ifdef CONFIG_GRKERNSEC_PROC_USER
67682 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
67683 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
67684 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67685 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
67686 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
67687 +#endif
67688 +#else
67689 proc_create("ioports", 0, NULL, &proc_ioports_operations);
67690 proc_create("iomem", 0, NULL, &proc_iomem_operations);
67691 +#endif
67692 return 0;
67693 }
67694 __initcall(ioresources_init);
67695 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
67696 index 3d9f31c..7fefc9e 100644
67697 --- a/kernel/rtmutex-tester.c
67698 +++ b/kernel/rtmutex-tester.c
67699 @@ -20,7 +20,7 @@
67700 #define MAX_RT_TEST_MUTEXES 8
67701
67702 static spinlock_t rttest_lock;
67703 -static atomic_t rttest_event;
67704 +static atomic_unchecked_t rttest_event;
67705
67706 struct test_thread_data {
67707 int opcode;
67708 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67709
67710 case RTTEST_LOCKCONT:
67711 td->mutexes[td->opdata] = 1;
67712 - td->event = atomic_add_return(1, &rttest_event);
67713 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67714 return 0;
67715
67716 case RTTEST_RESET:
67717 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67718 return 0;
67719
67720 case RTTEST_RESETEVENT:
67721 - atomic_set(&rttest_event, 0);
67722 + atomic_set_unchecked(&rttest_event, 0);
67723 return 0;
67724
67725 default:
67726 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67727 return ret;
67728
67729 td->mutexes[id] = 1;
67730 - td->event = atomic_add_return(1, &rttest_event);
67731 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67732 rt_mutex_lock(&mutexes[id]);
67733 - td->event = atomic_add_return(1, &rttest_event);
67734 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67735 td->mutexes[id] = 4;
67736 return 0;
67737
67738 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67739 return ret;
67740
67741 td->mutexes[id] = 1;
67742 - td->event = atomic_add_return(1, &rttest_event);
67743 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67744 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
67745 - td->event = atomic_add_return(1, &rttest_event);
67746 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67747 td->mutexes[id] = ret ? 0 : 4;
67748 return ret ? -EINTR : 0;
67749
67750 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
67751 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
67752 return ret;
67753
67754 - td->event = atomic_add_return(1, &rttest_event);
67755 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67756 rt_mutex_unlock(&mutexes[id]);
67757 - td->event = atomic_add_return(1, &rttest_event);
67758 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67759 td->mutexes[id] = 0;
67760 return 0;
67761
67762 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67763 break;
67764
67765 td->mutexes[dat] = 2;
67766 - td->event = atomic_add_return(1, &rttest_event);
67767 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67768 break;
67769
67770 default:
67771 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67772 return;
67773
67774 td->mutexes[dat] = 3;
67775 - td->event = atomic_add_return(1, &rttest_event);
67776 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67777 break;
67778
67779 case RTTEST_LOCKNOWAIT:
67780 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
67781 return;
67782
67783 td->mutexes[dat] = 1;
67784 - td->event = atomic_add_return(1, &rttest_event);
67785 + td->event = atomic_add_return_unchecked(1, &rttest_event);
67786 return;
67787
67788 default:
67789 diff --git a/kernel/sched.c b/kernel/sched.c
67790 index d6b149c..896cbb8 100644
67791 --- a/kernel/sched.c
67792 +++ b/kernel/sched.c
67793 @@ -4389,6 +4389,19 @@ pick_next_task(struct rq *rq)
67794 BUG(); /* the idle class will always have a runnable task */
67795 }
67796
67797 +#ifdef CONFIG_GRKERNSEC_SETXID
67798 +extern void gr_delayed_cred_worker(void);
67799 +static inline void gr_cred_schedule(void)
67800 +{
67801 + if (unlikely(current->delayed_cred))
67802 + gr_delayed_cred_worker();
67803 +}
67804 +#else
67805 +static inline void gr_cred_schedule(void)
67806 +{
67807 +}
67808 +#endif
67809 +
67810 /*
67811 * __schedule() is the main scheduler function.
67812 */
67813 @@ -4408,6 +4421,8 @@ need_resched:
67814
67815 schedule_debug(prev);
67816
67817 + gr_cred_schedule();
67818 +
67819 if (sched_feat(HRTICK))
67820 hrtick_clear(rq);
67821
67822 @@ -5098,6 +5113,8 @@ int can_nice(const struct task_struct *p, const int nice)
67823 /* convert nice value [19,-20] to rlimit style value [1,40] */
67824 int nice_rlim = 20 - nice;
67825
67826 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
67827 +
67828 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
67829 capable(CAP_SYS_NICE));
67830 }
67831 @@ -5131,7 +5148,8 @@ SYSCALL_DEFINE1(nice, int, increment)
67832 if (nice > 19)
67833 nice = 19;
67834
67835 - if (increment < 0 && !can_nice(current, nice))
67836 + if (increment < 0 && (!can_nice(current, nice) ||
67837 + gr_handle_chroot_nice()))
67838 return -EPERM;
67839
67840 retval = security_task_setnice(current, nice);
67841 @@ -5288,6 +5306,7 @@ recheck:
67842 unsigned long rlim_rtprio =
67843 task_rlimit(p, RLIMIT_RTPRIO);
67844
67845 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
67846 /* can't set/change the rt policy */
67847 if (policy != p->policy && !rlim_rtprio)
67848 return -EPERM;
67849 diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
67850 index 429242f..d7cca82 100644
67851 --- a/kernel/sched_autogroup.c
67852 +++ b/kernel/sched_autogroup.c
67853 @@ -7,7 +7,7 @@
67854
67855 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
67856 static struct autogroup autogroup_default;
67857 -static atomic_t autogroup_seq_nr;
67858 +static atomic_unchecked_t autogroup_seq_nr;
67859
67860 static void __init autogroup_init(struct task_struct *init_task)
67861 {
67862 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
67863
67864 kref_init(&ag->kref);
67865 init_rwsem(&ag->lock);
67866 - ag->id = atomic_inc_return(&autogroup_seq_nr);
67867 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
67868 ag->tg = tg;
67869 #ifdef CONFIG_RT_GROUP_SCHED
67870 /*
67871 diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
67872 index 8a39fa3..34f3dbc 100644
67873 --- a/kernel/sched_fair.c
67874 +++ b/kernel/sched_fair.c
67875 @@ -4801,7 +4801,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
67876 * run_rebalance_domains is triggered when needed from the scheduler tick.
67877 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
67878 */
67879 -static void run_rebalance_domains(struct softirq_action *h)
67880 +static void run_rebalance_domains(void)
67881 {
67882 int this_cpu = smp_processor_id();
67883 struct rq *this_rq = cpu_rq(this_cpu);
67884 diff --git a/kernel/signal.c b/kernel/signal.c
67885 index 2065515..aed2987 100644
67886 --- a/kernel/signal.c
67887 +++ b/kernel/signal.c
67888 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
67889
67890 int print_fatal_signals __read_mostly;
67891
67892 -static void __user *sig_handler(struct task_struct *t, int sig)
67893 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
67894 {
67895 return t->sighand->action[sig - 1].sa.sa_handler;
67896 }
67897
67898 -static int sig_handler_ignored(void __user *handler, int sig)
67899 +static int sig_handler_ignored(__sighandler_t handler, int sig)
67900 {
67901 /* Is it explicitly or implicitly ignored? */
67902 return handler == SIG_IGN ||
67903 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
67904 static int sig_task_ignored(struct task_struct *t, int sig,
67905 int from_ancestor_ns)
67906 {
67907 - void __user *handler;
67908 + __sighandler_t handler;
67909
67910 handler = sig_handler(t, sig);
67911
67912 @@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
67913 atomic_inc(&user->sigpending);
67914 rcu_read_unlock();
67915
67916 + if (!override_rlimit)
67917 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
67918 +
67919 if (override_rlimit ||
67920 atomic_read(&user->sigpending) <=
67921 task_rlimit(t, RLIMIT_SIGPENDING)) {
67922 @@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
67923
67924 int unhandled_signal(struct task_struct *tsk, int sig)
67925 {
67926 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
67927 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
67928 if (is_global_init(tsk))
67929 return 1;
67930 if (handler != SIG_IGN && handler != SIG_DFL)
67931 @@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
67932 }
67933 }
67934
67935 + /* allow glibc communication via tgkill to other threads in our
67936 + thread group */
67937 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
67938 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
67939 + && gr_handle_signal(t, sig))
67940 + return -EPERM;
67941 +
67942 return security_task_kill(t, info, sig, 0);
67943 }
67944
67945 @@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
67946 return send_signal(sig, info, p, 1);
67947 }
67948
67949 -static int
67950 +int
67951 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
67952 {
67953 return send_signal(sig, info, t, 0);
67954 @@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
67955 unsigned long int flags;
67956 int ret, blocked, ignored;
67957 struct k_sigaction *action;
67958 + int is_unhandled = 0;
67959
67960 spin_lock_irqsave(&t->sighand->siglock, flags);
67961 action = &t->sighand->action[sig-1];
67962 @@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
67963 }
67964 if (action->sa.sa_handler == SIG_DFL)
67965 t->signal->flags &= ~SIGNAL_UNKILLABLE;
67966 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
67967 + is_unhandled = 1;
67968 ret = specific_send_sig_info(sig, info, t);
67969 spin_unlock_irqrestore(&t->sighand->siglock, flags);
67970
67971 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
67972 + normal operation */
67973 + if (is_unhandled) {
67974 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
67975 + gr_handle_crash(t, sig);
67976 + }
67977 +
67978 return ret;
67979 }
67980
67981 @@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
67982 ret = check_kill_permission(sig, info, p);
67983 rcu_read_unlock();
67984
67985 - if (!ret && sig)
67986 + if (!ret && sig) {
67987 ret = do_send_sig_info(sig, info, p, true);
67988 + if (!ret)
67989 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
67990 + }
67991
67992 return ret;
67993 }
67994 @@ -2754,7 +2777,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
67995 int error = -ESRCH;
67996
67997 rcu_read_lock();
67998 - p = find_task_by_vpid(pid);
67999 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68000 + /* allow glibc communication via tgkill to other threads in our
68001 + thread group */
68002 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
68003 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
68004 + p = find_task_by_vpid_unrestricted(pid);
68005 + else
68006 +#endif
68007 + p = find_task_by_vpid(pid);
68008 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
68009 error = check_kill_permission(sig, info, p);
68010 /*
68011 diff --git a/kernel/smp.c b/kernel/smp.c
68012 index db197d6..17aef0b 100644
68013 --- a/kernel/smp.c
68014 +++ b/kernel/smp.c
68015 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
68016 }
68017 EXPORT_SYMBOL(smp_call_function);
68018
68019 -void ipi_call_lock(void)
68020 +void ipi_call_lock(void) __acquires(call_function.lock)
68021 {
68022 raw_spin_lock(&call_function.lock);
68023 }
68024
68025 -void ipi_call_unlock(void)
68026 +void ipi_call_unlock(void) __releases(call_function.lock)
68027 {
68028 raw_spin_unlock(&call_function.lock);
68029 }
68030
68031 -void ipi_call_lock_irq(void)
68032 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
68033 {
68034 raw_spin_lock_irq(&call_function.lock);
68035 }
68036
68037 -void ipi_call_unlock_irq(void)
68038 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
68039 {
68040 raw_spin_unlock_irq(&call_function.lock);
68041 }
68042 diff --git a/kernel/softirq.c b/kernel/softirq.c
68043 index 2c71d91..1021f81 100644
68044 --- a/kernel/softirq.c
68045 +++ b/kernel/softirq.c
68046 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
68047
68048 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
68049
68050 -char *softirq_to_name[NR_SOFTIRQS] = {
68051 +const char * const softirq_to_name[NR_SOFTIRQS] = {
68052 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
68053 "TASKLET", "SCHED", "HRTIMER", "RCU"
68054 };
68055 @@ -235,7 +235,7 @@ restart:
68056 kstat_incr_softirqs_this_cpu(vec_nr);
68057
68058 trace_softirq_entry(vec_nr);
68059 - h->action(h);
68060 + h->action();
68061 trace_softirq_exit(vec_nr);
68062 if (unlikely(prev_count != preempt_count())) {
68063 printk(KERN_ERR "huh, entered softirq %u %s %p"
68064 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
68065 local_irq_restore(flags);
68066 }
68067
68068 -void open_softirq(int nr, void (*action)(struct softirq_action *))
68069 +void open_softirq(int nr, void (*action)(void))
68070 {
68071 - softirq_vec[nr].action = action;
68072 + pax_open_kernel();
68073 + *(void **)&softirq_vec[nr].action = action;
68074 + pax_close_kernel();
68075 }
68076
68077 /*
68078 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
68079
68080 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
68081
68082 -static void tasklet_action(struct softirq_action *a)
68083 +static void tasklet_action(void)
68084 {
68085 struct tasklet_struct *list;
68086
68087 @@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
68088 }
68089 }
68090
68091 -static void tasklet_hi_action(struct softirq_action *a)
68092 +static void tasklet_hi_action(void)
68093 {
68094 struct tasklet_struct *list;
68095
68096 diff --git a/kernel/sys.c b/kernel/sys.c
68097 index 481611f..0754d86 100644
68098 --- a/kernel/sys.c
68099 +++ b/kernel/sys.c
68100 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
68101 error = -EACCES;
68102 goto out;
68103 }
68104 +
68105 + if (gr_handle_chroot_setpriority(p, niceval)) {
68106 + error = -EACCES;
68107 + goto out;
68108 + }
68109 +
68110 no_nice = security_task_setnice(p, niceval);
68111 if (no_nice) {
68112 error = no_nice;
68113 @@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
68114 goto error;
68115 }
68116
68117 + if (gr_check_group_change(new->gid, new->egid, -1))
68118 + goto error;
68119 +
68120 if (rgid != (gid_t) -1 ||
68121 (egid != (gid_t) -1 && egid != old->gid))
68122 new->sgid = new->egid;
68123 @@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
68124 old = current_cred();
68125
68126 retval = -EPERM;
68127 +
68128 + if (gr_check_group_change(gid, gid, gid))
68129 + goto error;
68130 +
68131 if (nsown_capable(CAP_SETGID))
68132 new->gid = new->egid = new->sgid = new->fsgid = gid;
68133 else if (gid == old->gid || gid == old->sgid)
68134 @@ -618,7 +631,7 @@ error:
68135 /*
68136 * change the user struct in a credentials set to match the new UID
68137 */
68138 -static int set_user(struct cred *new)
68139 +int set_user(struct cred *new)
68140 {
68141 struct user_struct *new_user;
68142
68143 @@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
68144 goto error;
68145 }
68146
68147 + if (gr_check_user_change(new->uid, new->euid, -1))
68148 + goto error;
68149 +
68150 if (new->uid != old->uid) {
68151 retval = set_user(new);
68152 if (retval < 0)
68153 @@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
68154 old = current_cred();
68155
68156 retval = -EPERM;
68157 +
68158 + if (gr_check_crash_uid(uid))
68159 + goto error;
68160 + if (gr_check_user_change(uid, uid, uid))
68161 + goto error;
68162 +
68163 if (nsown_capable(CAP_SETUID)) {
68164 new->suid = new->uid = uid;
68165 if (uid != old->uid) {
68166 @@ -786,6 +808,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
68167 goto error;
68168 }
68169
68170 + if (gr_check_user_change(ruid, euid, -1))
68171 + goto error;
68172 +
68173 if (ruid != (uid_t) -1) {
68174 new->uid = ruid;
68175 if (ruid != old->uid) {
68176 @@ -850,6 +875,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
68177 goto error;
68178 }
68179
68180 + if (gr_check_group_change(rgid, egid, -1))
68181 + goto error;
68182 +
68183 if (rgid != (gid_t) -1)
68184 new->gid = rgid;
68185 if (egid != (gid_t) -1)
68186 @@ -896,6 +924,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
68187 old = current_cred();
68188 old_fsuid = old->fsuid;
68189
68190 + if (gr_check_user_change(-1, -1, uid))
68191 + goto error;
68192 +
68193 if (uid == old->uid || uid == old->euid ||
68194 uid == old->suid || uid == old->fsuid ||
68195 nsown_capable(CAP_SETUID)) {
68196 @@ -906,6 +937,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
68197 }
68198 }
68199
68200 +error:
68201 abort_creds(new);
68202 return old_fsuid;
68203
68204 @@ -932,12 +964,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
68205 if (gid == old->gid || gid == old->egid ||
68206 gid == old->sgid || gid == old->fsgid ||
68207 nsown_capable(CAP_SETGID)) {
68208 + if (gr_check_group_change(-1, -1, gid))
68209 + goto error;
68210 +
68211 if (gid != old_fsgid) {
68212 new->fsgid = gid;
68213 goto change_okay;
68214 }
68215 }
68216
68217 +error:
68218 abort_creds(new);
68219 return old_fsgid;
68220
68221 @@ -1189,7 +1225,10 @@ static int override_release(char __user *release, int len)
68222 }
68223 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
68224 snprintf(buf, len, "2.6.%u%s", v, rest);
68225 - ret = copy_to_user(release, buf, len);
68226 + if (len > sizeof(buf))
68227 + ret = -EFAULT;
68228 + else
68229 + ret = copy_to_user(release, buf, len);
68230 }
68231 return ret;
68232 }
68233 @@ -1243,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
68234 return -EFAULT;
68235
68236 down_read(&uts_sem);
68237 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
68238 + error = __copy_to_user(name->sysname, &utsname()->sysname,
68239 __OLD_UTS_LEN);
68240 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
68241 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
68242 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
68243 __OLD_UTS_LEN);
68244 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
68245 - error |= __copy_to_user(&name->release, &utsname()->release,
68246 + error |= __copy_to_user(name->release, &utsname()->release,
68247 __OLD_UTS_LEN);
68248 error |= __put_user(0, name->release + __OLD_UTS_LEN);
68249 - error |= __copy_to_user(&name->version, &utsname()->version,
68250 + error |= __copy_to_user(name->version, &utsname()->version,
68251 __OLD_UTS_LEN);
68252 error |= __put_user(0, name->version + __OLD_UTS_LEN);
68253 - error |= __copy_to_user(&name->machine, &utsname()->machine,
68254 + error |= __copy_to_user(name->machine, &utsname()->machine,
68255 __OLD_UTS_LEN);
68256 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
68257 up_read(&uts_sem);
68258 @@ -1720,7 +1759,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
68259 error = get_dumpable(me->mm);
68260 break;
68261 case PR_SET_DUMPABLE:
68262 - if (arg2 < 0 || arg2 > 1) {
68263 + if (arg2 > 1) {
68264 error = -EINVAL;
68265 break;
68266 }
68267 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
68268 index ae27196..7506d69 100644
68269 --- a/kernel/sysctl.c
68270 +++ b/kernel/sysctl.c
68271 @@ -86,6 +86,13 @@
68272
68273
68274 #if defined(CONFIG_SYSCTL)
68275 +#include <linux/grsecurity.h>
68276 +#include <linux/grinternal.h>
68277 +
68278 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
68279 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
68280 + const int op);
68281 +extern int gr_handle_chroot_sysctl(const int op);
68282
68283 /* External variables not in a header file. */
68284 extern int sysctl_overcommit_memory;
68285 @@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
68286 }
68287
68288 #endif
68289 +extern struct ctl_table grsecurity_table[];
68290
68291 static struct ctl_table root_table[];
68292 static struct ctl_table_root sysctl_table_root;
68293 @@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[];
68294 int sysctl_legacy_va_layout;
68295 #endif
68296
68297 +#ifdef CONFIG_PAX_SOFTMODE
68298 +static ctl_table pax_table[] = {
68299 + {
68300 + .procname = "softmode",
68301 + .data = &pax_softmode,
68302 + .maxlen = sizeof(unsigned int),
68303 + .mode = 0600,
68304 + .proc_handler = &proc_dointvec,
68305 + },
68306 +
68307 + { }
68308 +};
68309 +#endif
68310 +
68311 /* The default sysctl tables: */
68312
68313 static struct ctl_table root_table[] = {
68314 @@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000;
68315 #endif
68316
68317 static struct ctl_table kern_table[] = {
68318 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
68319 + {
68320 + .procname = "grsecurity",
68321 + .mode = 0500,
68322 + .child = grsecurity_table,
68323 + },
68324 +#endif
68325 +
68326 +#ifdef CONFIG_PAX_SOFTMODE
68327 + {
68328 + .procname = "pax",
68329 + .mode = 0500,
68330 + .child = pax_table,
68331 + },
68332 +#endif
68333 +
68334 {
68335 .procname = "sched_child_runs_first",
68336 .data = &sysctl_sched_child_runs_first,
68337 @@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = {
68338 .data = &modprobe_path,
68339 .maxlen = KMOD_PATH_LEN,
68340 .mode = 0644,
68341 - .proc_handler = proc_dostring,
68342 + .proc_handler = proc_dostring_modpriv,
68343 },
68344 {
68345 .procname = "modules_disabled",
68346 @@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = {
68347 .extra1 = &zero,
68348 .extra2 = &one,
68349 },
68350 +#endif
68351 {
68352 .procname = "kptr_restrict",
68353 .data = &kptr_restrict,
68354 .maxlen = sizeof(int),
68355 .mode = 0644,
68356 .proc_handler = proc_dmesg_restrict,
68357 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68358 + .extra1 = &two,
68359 +#else
68360 .extra1 = &zero,
68361 +#endif
68362 .extra2 = &two,
68363 },
68364 -#endif
68365 {
68366 .procname = "ngroups_max",
68367 .data = &ngroups_max,
68368 @@ -1216,6 +1258,13 @@ static struct ctl_table vm_table[] = {
68369 .proc_handler = proc_dointvec_minmax,
68370 .extra1 = &zero,
68371 },
68372 + {
68373 + .procname = "heap_stack_gap",
68374 + .data = &sysctl_heap_stack_gap,
68375 + .maxlen = sizeof(sysctl_heap_stack_gap),
68376 + .mode = 0644,
68377 + .proc_handler = proc_doulongvec_minmax,
68378 + },
68379 #else
68380 {
68381 .procname = "nr_trim_pages",
68382 @@ -1720,6 +1769,17 @@ static int test_perm(int mode, int op)
68383 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
68384 {
68385 int mode;
68386 + int error;
68387 +
68388 + if (table->parent != NULL && table->parent->procname != NULL &&
68389 + table->procname != NULL &&
68390 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
68391 + return -EACCES;
68392 + if (gr_handle_chroot_sysctl(op))
68393 + return -EACCES;
68394 + error = gr_handle_sysctl(table, op);
68395 + if (error)
68396 + return error;
68397
68398 if (root->permissions)
68399 mode = root->permissions(root, current->nsproxy, table);
68400 @@ -2124,6 +2184,16 @@ int proc_dostring(struct ctl_table *table, int write,
68401 buffer, lenp, ppos);
68402 }
68403
68404 +int proc_dostring_modpriv(struct ctl_table *table, int write,
68405 + void __user *buffer, size_t *lenp, loff_t *ppos)
68406 +{
68407 + if (write && !capable(CAP_SYS_MODULE))
68408 + return -EPERM;
68409 +
68410 + return _proc_do_string(table->data, table->maxlen, write,
68411 + buffer, lenp, ppos);
68412 +}
68413 +
68414 static size_t proc_skip_spaces(char **buf)
68415 {
68416 size_t ret;
68417 @@ -2229,6 +2299,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
68418 len = strlen(tmp);
68419 if (len > *size)
68420 len = *size;
68421 + if (len > sizeof(tmp))
68422 + len = sizeof(tmp);
68423 if (copy_to_user(*buf, tmp, len))
68424 return -EFAULT;
68425 *size -= len;
68426 @@ -2545,8 +2617,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
68427 *i = val;
68428 } else {
68429 val = convdiv * (*i) / convmul;
68430 - if (!first)
68431 + if (!first) {
68432 err = proc_put_char(&buffer, &left, '\t');
68433 + if (err)
68434 + break;
68435 + }
68436 err = proc_put_long(&buffer, &left, val, false);
68437 if (err)
68438 break;
68439 @@ -2941,6 +3016,12 @@ int proc_dostring(struct ctl_table *table, int write,
68440 return -ENOSYS;
68441 }
68442
68443 +int proc_dostring_modpriv(struct ctl_table *table, int write,
68444 + void __user *buffer, size_t *lenp, loff_t *ppos)
68445 +{
68446 + return -ENOSYS;
68447 +}
68448 +
68449 int proc_dointvec(struct ctl_table *table, int write,
68450 void __user *buffer, size_t *lenp, loff_t *ppos)
68451 {
68452 @@ -2997,6 +3078,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
68453 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
68454 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
68455 EXPORT_SYMBOL(proc_dostring);
68456 +EXPORT_SYMBOL(proc_dostring_modpriv);
68457 EXPORT_SYMBOL(proc_doulongvec_minmax);
68458 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
68459 EXPORT_SYMBOL(register_sysctl_table);
68460 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
68461 index a650694..aaeeb20 100644
68462 --- a/kernel/sysctl_binary.c
68463 +++ b/kernel/sysctl_binary.c
68464 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
68465 int i;
68466
68467 set_fs(KERNEL_DS);
68468 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68469 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68470 set_fs(old_fs);
68471 if (result < 0)
68472 goto out_kfree;
68473 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
68474 }
68475
68476 set_fs(KERNEL_DS);
68477 - result = vfs_write(file, buffer, str - buffer, &pos);
68478 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
68479 set_fs(old_fs);
68480 if (result < 0)
68481 goto out_kfree;
68482 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
68483 int i;
68484
68485 set_fs(KERNEL_DS);
68486 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68487 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68488 set_fs(old_fs);
68489 if (result < 0)
68490 goto out_kfree;
68491 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
68492 }
68493
68494 set_fs(KERNEL_DS);
68495 - result = vfs_write(file, buffer, str - buffer, &pos);
68496 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
68497 set_fs(old_fs);
68498 if (result < 0)
68499 goto out_kfree;
68500 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
68501 int i;
68502
68503 set_fs(KERNEL_DS);
68504 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
68505 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
68506 set_fs(old_fs);
68507 if (result < 0)
68508 goto out;
68509 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
68510 __le16 dnaddr;
68511
68512 set_fs(KERNEL_DS);
68513 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
68514 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
68515 set_fs(old_fs);
68516 if (result < 0)
68517 goto out;
68518 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
68519 le16_to_cpu(dnaddr) & 0x3ff);
68520
68521 set_fs(KERNEL_DS);
68522 - result = vfs_write(file, buf, len, &pos);
68523 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
68524 set_fs(old_fs);
68525 if (result < 0)
68526 goto out;
68527 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
68528 index 362da65..ab8ef8c 100644
68529 --- a/kernel/sysctl_check.c
68530 +++ b/kernel/sysctl_check.c
68531 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
68532 set_fail(&fail, table, "Directory with extra2");
68533 } else {
68534 if ((table->proc_handler == proc_dostring) ||
68535 + (table->proc_handler == proc_dostring_modpriv) ||
68536 (table->proc_handler == proc_dointvec) ||
68537 (table->proc_handler == proc_dointvec_minmax) ||
68538 (table->proc_handler == proc_dointvec_jiffies) ||
68539 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
68540 index e660464..c8b9e67 100644
68541 --- a/kernel/taskstats.c
68542 +++ b/kernel/taskstats.c
68543 @@ -27,9 +27,12 @@
68544 #include <linux/cgroup.h>
68545 #include <linux/fs.h>
68546 #include <linux/file.h>
68547 +#include <linux/grsecurity.h>
68548 #include <net/genetlink.h>
68549 #include <linux/atomic.h>
68550
68551 +extern int gr_is_taskstats_denied(int pid);
68552 +
68553 /*
68554 * Maximum length of a cpumask that can be specified in
68555 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
68556 @@ -556,6 +559,9 @@ err:
68557
68558 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
68559 {
68560 + if (gr_is_taskstats_denied(current->pid))
68561 + return -EACCES;
68562 +
68563 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
68564 return cmd_attr_register_cpumask(info);
68565 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
68566 diff --git a/kernel/time.c b/kernel/time.c
68567 index 73e416d..cfc6f69 100644
68568 --- a/kernel/time.c
68569 +++ b/kernel/time.c
68570 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
68571 return error;
68572
68573 if (tz) {
68574 + /* we log in do_settimeofday called below, so don't log twice
68575 + */
68576 + if (!tv)
68577 + gr_log_timechange();
68578 +
68579 /* SMP safe, global irq locking makes it work. */
68580 sys_tz = *tz;
68581 update_vsyscall_tz();
68582 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
68583 index 8a46f5d..bbe6f9c 100644
68584 --- a/kernel/time/alarmtimer.c
68585 +++ b/kernel/time/alarmtimer.c
68586 @@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
68587 struct platform_device *pdev;
68588 int error = 0;
68589 int i;
68590 - struct k_clock alarm_clock = {
68591 + static struct k_clock alarm_clock = {
68592 .clock_getres = alarm_clock_getres,
68593 .clock_get = alarm_clock_get,
68594 .timer_create = alarm_timer_create,
68595 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
68596 index fd4a7b1..fae5c2a 100644
68597 --- a/kernel/time/tick-broadcast.c
68598 +++ b/kernel/time/tick-broadcast.c
68599 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
68600 * then clear the broadcast bit.
68601 */
68602 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
68603 - int cpu = smp_processor_id();
68604 + cpu = smp_processor_id();
68605
68606 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
68607 tick_broadcast_clear_oneshot(cpu);
68608 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
68609 index 2378413..be455fd 100644
68610 --- a/kernel/time/timekeeping.c
68611 +++ b/kernel/time/timekeeping.c
68612 @@ -14,6 +14,7 @@
68613 #include <linux/init.h>
68614 #include <linux/mm.h>
68615 #include <linux/sched.h>
68616 +#include <linux/grsecurity.h>
68617 #include <linux/syscore_ops.h>
68618 #include <linux/clocksource.h>
68619 #include <linux/jiffies.h>
68620 @@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
68621 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
68622 return -EINVAL;
68623
68624 + gr_log_timechange();
68625 +
68626 write_seqlock_irqsave(&xtime_lock, flags);
68627
68628 timekeeping_forward_now();
68629 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
68630 index 3258455..f35227d 100644
68631 --- a/kernel/time/timer_list.c
68632 +++ b/kernel/time/timer_list.c
68633 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
68634
68635 static void print_name_offset(struct seq_file *m, void *sym)
68636 {
68637 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68638 + SEQ_printf(m, "<%p>", NULL);
68639 +#else
68640 char symname[KSYM_NAME_LEN];
68641
68642 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
68643 SEQ_printf(m, "<%pK>", sym);
68644 else
68645 SEQ_printf(m, "%s", symname);
68646 +#endif
68647 }
68648
68649 static void
68650 @@ -112,7 +116,11 @@ next_one:
68651 static void
68652 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
68653 {
68654 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68655 + SEQ_printf(m, " .base: %p\n", NULL);
68656 +#else
68657 SEQ_printf(m, " .base: %pK\n", base);
68658 +#endif
68659 SEQ_printf(m, " .index: %d\n",
68660 base->index);
68661 SEQ_printf(m, " .resolution: %Lu nsecs\n",
68662 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
68663 {
68664 struct proc_dir_entry *pe;
68665
68666 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
68667 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
68668 +#else
68669 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
68670 +#endif
68671 if (!pe)
68672 return -ENOMEM;
68673 return 0;
68674 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
68675 index 0b537f2..9e71eca 100644
68676 --- a/kernel/time/timer_stats.c
68677 +++ b/kernel/time/timer_stats.c
68678 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
68679 static unsigned long nr_entries;
68680 static struct entry entries[MAX_ENTRIES];
68681
68682 -static atomic_t overflow_count;
68683 +static atomic_unchecked_t overflow_count;
68684
68685 /*
68686 * The entries are in a hash-table, for fast lookup:
68687 @@ -140,7 +140,7 @@ static void reset_entries(void)
68688 nr_entries = 0;
68689 memset(entries, 0, sizeof(entries));
68690 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
68691 - atomic_set(&overflow_count, 0);
68692 + atomic_set_unchecked(&overflow_count, 0);
68693 }
68694
68695 static struct entry *alloc_entry(void)
68696 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
68697 if (likely(entry))
68698 entry->count++;
68699 else
68700 - atomic_inc(&overflow_count);
68701 + atomic_inc_unchecked(&overflow_count);
68702
68703 out_unlock:
68704 raw_spin_unlock_irqrestore(lock, flags);
68705 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
68706
68707 static void print_name_offset(struct seq_file *m, unsigned long addr)
68708 {
68709 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68710 + seq_printf(m, "<%p>", NULL);
68711 +#else
68712 char symname[KSYM_NAME_LEN];
68713
68714 if (lookup_symbol_name(addr, symname) < 0)
68715 seq_printf(m, "<%p>", (void *)addr);
68716 else
68717 seq_printf(m, "%s", symname);
68718 +#endif
68719 }
68720
68721 static int tstats_show(struct seq_file *m, void *v)
68722 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
68723
68724 seq_puts(m, "Timer Stats Version: v0.2\n");
68725 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
68726 - if (atomic_read(&overflow_count))
68727 + if (atomic_read_unchecked(&overflow_count))
68728 seq_printf(m, "Overflow: %d entries\n",
68729 - atomic_read(&overflow_count));
68730 + atomic_read_unchecked(&overflow_count));
68731
68732 for (i = 0; i < nr_entries; i++) {
68733 entry = entries + i;
68734 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
68735 {
68736 struct proc_dir_entry *pe;
68737
68738 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
68739 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
68740 +#else
68741 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
68742 +#endif
68743 if (!pe)
68744 return -ENOMEM;
68745 return 0;
68746 diff --git a/kernel/timer.c b/kernel/timer.c
68747 index 9c3c62b..441690e 100644
68748 --- a/kernel/timer.c
68749 +++ b/kernel/timer.c
68750 @@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
68751 /*
68752 * This function runs timers and the timer-tq in bottom half context.
68753 */
68754 -static void run_timer_softirq(struct softirq_action *h)
68755 +static void run_timer_softirq(void)
68756 {
68757 struct tvec_base *base = __this_cpu_read(tvec_bases);
68758
68759 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
68760 index 16fc34a..efd8bb8 100644
68761 --- a/kernel/trace/blktrace.c
68762 +++ b/kernel/trace/blktrace.c
68763 @@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
68764 struct blk_trace *bt = filp->private_data;
68765 char buf[16];
68766
68767 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
68768 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
68769
68770 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
68771 }
68772 @@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
68773 return 1;
68774
68775 bt = buf->chan->private_data;
68776 - atomic_inc(&bt->dropped);
68777 + atomic_inc_unchecked(&bt->dropped);
68778 return 0;
68779 }
68780
68781 @@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
68782
68783 bt->dir = dir;
68784 bt->dev = dev;
68785 - atomic_set(&bt->dropped, 0);
68786 + atomic_set_unchecked(&bt->dropped, 0);
68787
68788 ret = -EIO;
68789 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
68790 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
68791 index 25b4f4d..6f4772d 100644
68792 --- a/kernel/trace/ftrace.c
68793 +++ b/kernel/trace/ftrace.c
68794 @@ -1587,12 +1587,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
68795 if (unlikely(ftrace_disabled))
68796 return 0;
68797
68798 + ret = ftrace_arch_code_modify_prepare();
68799 + FTRACE_WARN_ON(ret);
68800 + if (ret)
68801 + return 0;
68802 +
68803 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
68804 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
68805 if (ret) {
68806 ftrace_bug(ret, ip);
68807 - return 0;
68808 }
68809 - return 1;
68810 + return ret ? 0 : 1;
68811 }
68812
68813 /*
68814 @@ -2608,7 +2613,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
68815
68816 int
68817 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
68818 - void *data)
68819 + void *data)
68820 {
68821 struct ftrace_func_probe *entry;
68822 struct ftrace_page *pg;
68823 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
68824 index f2bd275..adaf3a2 100644
68825 --- a/kernel/trace/trace.c
68826 +++ b/kernel/trace/trace.c
68827 @@ -4201,10 +4201,9 @@ static const struct file_operations tracing_dyn_info_fops = {
68828 };
68829 #endif
68830
68831 -static struct dentry *d_tracer;
68832 -
68833 struct dentry *tracing_init_dentry(void)
68834 {
68835 + static struct dentry *d_tracer;
68836 static int once;
68837
68838 if (d_tracer)
68839 @@ -4224,10 +4223,9 @@ struct dentry *tracing_init_dentry(void)
68840 return d_tracer;
68841 }
68842
68843 -static struct dentry *d_percpu;
68844 -
68845 struct dentry *tracing_dentry_percpu(void)
68846 {
68847 + static struct dentry *d_percpu;
68848 static int once;
68849 struct dentry *d_tracer;
68850
68851 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
68852 index c212a7f..7b02394 100644
68853 --- a/kernel/trace/trace_events.c
68854 +++ b/kernel/trace/trace_events.c
68855 @@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
68856 struct ftrace_module_file_ops {
68857 struct list_head list;
68858 struct module *mod;
68859 - struct file_operations id;
68860 - struct file_operations enable;
68861 - struct file_operations format;
68862 - struct file_operations filter;
68863 };
68864
68865 static struct ftrace_module_file_ops *
68866 @@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
68867
68868 file_ops->mod = mod;
68869
68870 - file_ops->id = ftrace_event_id_fops;
68871 - file_ops->id.owner = mod;
68872 -
68873 - file_ops->enable = ftrace_enable_fops;
68874 - file_ops->enable.owner = mod;
68875 -
68876 - file_ops->filter = ftrace_event_filter_fops;
68877 - file_ops->filter.owner = mod;
68878 -
68879 - file_ops->format = ftrace_event_format_fops;
68880 - file_ops->format.owner = mod;
68881 + pax_open_kernel();
68882 + *(void **)&mod->trace_id.owner = mod;
68883 + *(void **)&mod->trace_enable.owner = mod;
68884 + *(void **)&mod->trace_filter.owner = mod;
68885 + *(void **)&mod->trace_format.owner = mod;
68886 + pax_close_kernel();
68887
68888 list_add(&file_ops->list, &ftrace_module_file_list);
68889
68890 @@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
68891
68892 for_each_event(call, start, end) {
68893 __trace_add_event_call(*call, mod,
68894 - &file_ops->id, &file_ops->enable,
68895 - &file_ops->filter, &file_ops->format);
68896 + &mod->trace_id, &mod->trace_enable,
68897 + &mod->trace_filter, &mod->trace_format);
68898 }
68899 }
68900
68901 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
68902 index 00d527c..7c5b1a3 100644
68903 --- a/kernel/trace/trace_kprobe.c
68904 +++ b/kernel/trace/trace_kprobe.c
68905 @@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68906 long ret;
68907 int maxlen = get_rloc_len(*(u32 *)dest);
68908 u8 *dst = get_rloc_data(dest);
68909 - u8 *src = addr;
68910 + const u8 __user *src = (const u8 __force_user *)addr;
68911 mm_segment_t old_fs = get_fs();
68912 if (!maxlen)
68913 return;
68914 @@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68915 pagefault_disable();
68916 do
68917 ret = __copy_from_user_inatomic(dst++, src++, 1);
68918 - while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
68919 + while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
68920 dst[-1] = '\0';
68921 pagefault_enable();
68922 set_fs(old_fs);
68923 @@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
68924 ((u8 *)get_rloc_data(dest))[0] = '\0';
68925 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
68926 } else
68927 - *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
68928 + *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
68929 get_rloc_offs(*(u32 *)dest));
68930 }
68931 /* Return the length of string -- including null terminal byte */
68932 @@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
68933 set_fs(KERNEL_DS);
68934 pagefault_disable();
68935 do {
68936 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
68937 + ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
68938 len++;
68939 } while (c && ret == 0 && len < MAX_STRING_SIZE);
68940 pagefault_enable();
68941 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
68942 index fd3c8aa..5f324a6 100644
68943 --- a/kernel/trace/trace_mmiotrace.c
68944 +++ b/kernel/trace/trace_mmiotrace.c
68945 @@ -24,7 +24,7 @@ struct header_iter {
68946 static struct trace_array *mmio_trace_array;
68947 static bool overrun_detected;
68948 static unsigned long prev_overruns;
68949 -static atomic_t dropped_count;
68950 +static atomic_unchecked_t dropped_count;
68951
68952 static void mmio_reset_data(struct trace_array *tr)
68953 {
68954 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
68955
68956 static unsigned long count_overruns(struct trace_iterator *iter)
68957 {
68958 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
68959 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
68960 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
68961
68962 if (over > prev_overruns)
68963 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
68964 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
68965 sizeof(*entry), 0, pc);
68966 if (!event) {
68967 - atomic_inc(&dropped_count);
68968 + atomic_inc_unchecked(&dropped_count);
68969 return;
68970 }
68971 entry = ring_buffer_event_data(event);
68972 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
68973 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
68974 sizeof(*entry), 0, pc);
68975 if (!event) {
68976 - atomic_inc(&dropped_count);
68977 + atomic_inc_unchecked(&dropped_count);
68978 return;
68979 }
68980 entry = ring_buffer_event_data(event);
68981 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
68982 index 5199930..26c73a0 100644
68983 --- a/kernel/trace/trace_output.c
68984 +++ b/kernel/trace/trace_output.c
68985 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
68986
68987 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
68988 if (!IS_ERR(p)) {
68989 - p = mangle_path(s->buffer + s->len, p, "\n");
68990 + p = mangle_path(s->buffer + s->len, p, "\n\\");
68991 if (p) {
68992 s->len = p - s->buffer;
68993 return 1;
68994 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
68995 index 77575b3..6e623d1 100644
68996 --- a/kernel/trace/trace_stack.c
68997 +++ b/kernel/trace/trace_stack.c
68998 @@ -50,7 +50,7 @@ static inline void check_stack(void)
68999 return;
69000
69001 /* we do not handle interrupt stacks yet */
69002 - if (!object_is_on_stack(&this_size))
69003 + if (!object_starts_on_stack(&this_size))
69004 return;
69005
69006 local_irq_save(flags);
69007 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
69008 index 209b379..7f76423 100644
69009 --- a/kernel/trace/trace_workqueue.c
69010 +++ b/kernel/trace/trace_workqueue.c
69011 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
69012 int cpu;
69013 pid_t pid;
69014 /* Can be inserted from interrupt or user context, need to be atomic */
69015 - atomic_t inserted;
69016 + atomic_unchecked_t inserted;
69017 /*
69018 * Don't need to be atomic, works are serialized in a single workqueue thread
69019 * on a single CPU.
69020 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
69021 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
69022 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
69023 if (node->pid == wq_thread->pid) {
69024 - atomic_inc(&node->inserted);
69025 + atomic_inc_unchecked(&node->inserted);
69026 goto found;
69027 }
69028 }
69029 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
69030 tsk = get_pid_task(pid, PIDTYPE_PID);
69031 if (tsk) {
69032 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
69033 - atomic_read(&cws->inserted), cws->executed,
69034 + atomic_read_unchecked(&cws->inserted), cws->executed,
69035 tsk->comm);
69036 put_task_struct(tsk);
69037 }
69038 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
69039 index 82928f5..92da771 100644
69040 --- a/lib/Kconfig.debug
69041 +++ b/lib/Kconfig.debug
69042 @@ -1103,6 +1103,7 @@ config LATENCYTOP
69043 depends on DEBUG_KERNEL
69044 depends on STACKTRACE_SUPPORT
69045 depends on PROC_FS
69046 + depends on !GRKERNSEC_HIDESYM
69047 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
69048 select KALLSYMS
69049 select KALLSYMS_ALL
69050 diff --git a/lib/bitmap.c b/lib/bitmap.c
69051 index 0d4a127..33a06c7 100644
69052 --- a/lib/bitmap.c
69053 +++ b/lib/bitmap.c
69054 @@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
69055 {
69056 int c, old_c, totaldigits, ndigits, nchunks, nbits;
69057 u32 chunk;
69058 - const char __user __force *ubuf = (const char __user __force *)buf;
69059 + const char __user *ubuf = (const char __force_user *)buf;
69060
69061 bitmap_zero(maskp, nmaskbits);
69062
69063 @@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
69064 {
69065 if (!access_ok(VERIFY_READ, ubuf, ulen))
69066 return -EFAULT;
69067 - return __bitmap_parse((const char __force *)ubuf,
69068 + return __bitmap_parse((const char __force_kernel *)ubuf,
69069 ulen, 1, maskp, nmaskbits);
69070
69071 }
69072 @@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
69073 {
69074 unsigned a, b;
69075 int c, old_c, totaldigits;
69076 - const char __user __force *ubuf = (const char __user __force *)buf;
69077 + const char __user *ubuf = (const char __force_user *)buf;
69078 int exp_digit, in_range;
69079
69080 totaldigits = c = 0;
69081 @@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
69082 {
69083 if (!access_ok(VERIFY_READ, ubuf, ulen))
69084 return -EFAULT;
69085 - return __bitmap_parselist((const char __force *)ubuf,
69086 + return __bitmap_parselist((const char __force_kernel *)ubuf,
69087 ulen, 1, maskp, nmaskbits);
69088 }
69089 EXPORT_SYMBOL(bitmap_parselist_user);
69090 diff --git a/lib/bug.c b/lib/bug.c
69091 index 1955209..cbbb2ad 100644
69092 --- a/lib/bug.c
69093 +++ b/lib/bug.c
69094 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
69095 return BUG_TRAP_TYPE_NONE;
69096
69097 bug = find_bug(bugaddr);
69098 + if (!bug)
69099 + return BUG_TRAP_TYPE_NONE;
69100
69101 file = NULL;
69102 line = 0;
69103 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
69104 index a78b7c6..2c73084 100644
69105 --- a/lib/debugobjects.c
69106 +++ b/lib/debugobjects.c
69107 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
69108 if (limit > 4)
69109 return;
69110
69111 - is_on_stack = object_is_on_stack(addr);
69112 + is_on_stack = object_starts_on_stack(addr);
69113 if (is_on_stack == onstack)
69114 return;
69115
69116 diff --git a/lib/devres.c b/lib/devres.c
69117 index 7c0e953..f642b5c 100644
69118 --- a/lib/devres.c
69119 +++ b/lib/devres.c
69120 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
69121 void devm_iounmap(struct device *dev, void __iomem *addr)
69122 {
69123 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
69124 - (void *)addr));
69125 + (void __force *)addr));
69126 iounmap(addr);
69127 }
69128 EXPORT_SYMBOL(devm_iounmap);
69129 @@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
69130 {
69131 ioport_unmap(addr);
69132 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
69133 - devm_ioport_map_match, (void *)addr));
69134 + devm_ioport_map_match, (void __force *)addr));
69135 }
69136 EXPORT_SYMBOL(devm_ioport_unmap);
69137
69138 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
69139 index fea790a..ebb0e82 100644
69140 --- a/lib/dma-debug.c
69141 +++ b/lib/dma-debug.c
69142 @@ -925,7 +925,7 @@ out:
69143
69144 static void check_for_stack(struct device *dev, void *addr)
69145 {
69146 - if (object_is_on_stack(addr))
69147 + if (object_starts_on_stack(addr))
69148 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
69149 "stack [addr=%p]\n", addr);
69150 }
69151 diff --git a/lib/extable.c b/lib/extable.c
69152 index 4cac81e..63e9b8f 100644
69153 --- a/lib/extable.c
69154 +++ b/lib/extable.c
69155 @@ -13,6 +13,7 @@
69156 #include <linux/init.h>
69157 #include <linux/sort.h>
69158 #include <asm/uaccess.h>
69159 +#include <asm/pgtable.h>
69160
69161 #ifndef ARCH_HAS_SORT_EXTABLE
69162 /*
69163 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
69164 void sort_extable(struct exception_table_entry *start,
69165 struct exception_table_entry *finish)
69166 {
69167 + pax_open_kernel();
69168 sort(start, finish - start, sizeof(struct exception_table_entry),
69169 cmp_ex, NULL);
69170 + pax_close_kernel();
69171 }
69172
69173 #ifdef CONFIG_MODULES
69174 diff --git a/lib/inflate.c b/lib/inflate.c
69175 index 013a761..c28f3fc 100644
69176 --- a/lib/inflate.c
69177 +++ b/lib/inflate.c
69178 @@ -269,7 +269,7 @@ static void free(void *where)
69179 malloc_ptr = free_mem_ptr;
69180 }
69181 #else
69182 -#define malloc(a) kmalloc(a, GFP_KERNEL)
69183 +#define malloc(a) kmalloc((a), GFP_KERNEL)
69184 #define free(a) kfree(a)
69185 #endif
69186
69187 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
69188 index bd2bea9..6b3c95e 100644
69189 --- a/lib/is_single_threaded.c
69190 +++ b/lib/is_single_threaded.c
69191 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
69192 struct task_struct *p, *t;
69193 bool ret;
69194
69195 + if (!mm)
69196 + return true;
69197 +
69198 if (atomic_read(&task->signal->live) != 1)
69199 return false;
69200
69201 diff --git a/lib/kref.c b/lib/kref.c
69202 index 3efb882..8492f4c 100644
69203 --- a/lib/kref.c
69204 +++ b/lib/kref.c
69205 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
69206 */
69207 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
69208 {
69209 - WARN_ON(release == NULL);
69210 + BUG_ON(release == NULL);
69211 WARN_ON(release == (void (*)(struct kref *))kfree);
69212
69213 if (atomic_dec_and_test(&kref->refcount)) {
69214 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
69215 index d9df745..e73c2fe 100644
69216 --- a/lib/radix-tree.c
69217 +++ b/lib/radix-tree.c
69218 @@ -80,7 +80,7 @@ struct radix_tree_preload {
69219 int nr;
69220 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
69221 };
69222 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
69223 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
69224
69225 static inline void *ptr_to_indirect(void *ptr)
69226 {
69227 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
69228 index 993599e..f1dbc14 100644
69229 --- a/lib/vsprintf.c
69230 +++ b/lib/vsprintf.c
69231 @@ -16,6 +16,9 @@
69232 * - scnprintf and vscnprintf
69233 */
69234
69235 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69236 +#define __INCLUDED_BY_HIDESYM 1
69237 +#endif
69238 #include <stdarg.h>
69239 #include <linux/module.h>
69240 #include <linux/types.h>
69241 @@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
69242 char sym[KSYM_SYMBOL_LEN];
69243 if (ext == 'B')
69244 sprint_backtrace(sym, value);
69245 - else if (ext != 'f' && ext != 's')
69246 + else if (ext != 'f' && ext != 's' && ext != 'a')
69247 sprint_symbol(sym, value);
69248 else
69249 kallsyms_lookup(value, NULL, NULL, NULL, sym);
69250 @@ -777,7 +780,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
69251 return string(buf, end, uuid, spec);
69252 }
69253
69254 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69255 +int kptr_restrict __read_mostly = 2;
69256 +#else
69257 int kptr_restrict __read_mostly;
69258 +#endif
69259
69260 /*
69261 * Show a '%p' thing. A kernel extension is that the '%p' is followed
69262 @@ -791,6 +798,8 @@ int kptr_restrict __read_mostly;
69263 * - 'S' For symbolic direct pointers with offset
69264 * - 's' For symbolic direct pointers without offset
69265 * - 'B' For backtraced symbolic direct pointers with offset
69266 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
69267 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
69268 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
69269 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
69270 * - 'M' For a 6-byte MAC address, it prints the address in the
69271 @@ -835,12 +844,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69272 {
69273 if (!ptr && *fmt != 'K') {
69274 /*
69275 - * Print (null) with the same width as a pointer so it makes
69276 + * Print (nil) with the same width as a pointer so it makes
69277 * tabular output look nice.
69278 */
69279 if (spec.field_width == -1)
69280 spec.field_width = 2 * sizeof(void *);
69281 - return string(buf, end, "(null)", spec);
69282 + return string(buf, end, "(nil)", spec);
69283 }
69284
69285 switch (*fmt) {
69286 @@ -850,6 +859,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69287 /* Fallthrough */
69288 case 'S':
69289 case 's':
69290 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69291 + break;
69292 +#else
69293 + return symbol_string(buf, end, ptr, spec, *fmt);
69294 +#endif
69295 + case 'A':
69296 + case 'a':
69297 case 'B':
69298 return symbol_string(buf, end, ptr, spec, *fmt);
69299 case 'R':
69300 @@ -878,9 +894,15 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69301 case 'U':
69302 return uuid_string(buf, end, ptr, spec, fmt);
69303 case 'V':
69304 - return buf + vsnprintf(buf, end > buf ? end - buf : 0,
69305 - ((struct va_format *)ptr)->fmt,
69306 - *(((struct va_format *)ptr)->va));
69307 + {
69308 + va_list va;
69309 +
69310 + va_copy(va, *((struct va_format *)ptr)->va);
69311 + buf += vsnprintf(buf, end > buf ? end - buf : 0,
69312 + ((struct va_format *)ptr)->fmt, va);
69313 + va_end(va);
69314 + return buf;
69315 + }
69316 case 'K':
69317 /*
69318 * %pK cannot be used in IRQ context because its test
69319 @@ -1608,11 +1630,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
69320 typeof(type) value; \
69321 if (sizeof(type) == 8) { \
69322 args = PTR_ALIGN(args, sizeof(u32)); \
69323 - *(u32 *)&value = *(u32 *)args; \
69324 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
69325 + *(u32 *)&value = *(const u32 *)args; \
69326 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
69327 } else { \
69328 args = PTR_ALIGN(args, sizeof(type)); \
69329 - value = *(typeof(type) *)args; \
69330 + value = *(const typeof(type) *)args; \
69331 } \
69332 args += sizeof(type); \
69333 value; \
69334 @@ -1675,7 +1697,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
69335 case FORMAT_TYPE_STR: {
69336 const char *str_arg = args;
69337 args += strlen(str_arg) + 1;
69338 - str = string(str, end, (char *)str_arg, spec);
69339 + str = string(str, end, str_arg, spec);
69340 break;
69341 }
69342
69343 diff --git a/localversion-grsec b/localversion-grsec
69344 new file mode 100644
69345 index 0000000..7cd6065
69346 --- /dev/null
69347 +++ b/localversion-grsec
69348 @@ -0,0 +1 @@
69349 +-grsec
69350 diff --git a/mm/Kconfig b/mm/Kconfig
69351 index 011b110..b492af2 100644
69352 --- a/mm/Kconfig
69353 +++ b/mm/Kconfig
69354 @@ -241,10 +241,10 @@ config KSM
69355 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
69356
69357 config DEFAULT_MMAP_MIN_ADDR
69358 - int "Low address space to protect from user allocation"
69359 + int "Low address space to protect from user allocation"
69360 depends on MMU
69361 - default 4096
69362 - help
69363 + default 65536
69364 + help
69365 This is the portion of low virtual memory which should be protected
69366 from userspace allocation. Keeping a user from writing to low pages
69367 can help reduce the impact of kernel NULL pointer bugs.
69368 diff --git a/mm/filemap.c b/mm/filemap.c
69369 index 03c5b0e..a01e793 100644
69370 --- a/mm/filemap.c
69371 +++ b/mm/filemap.c
69372 @@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
69373 struct address_space *mapping = file->f_mapping;
69374
69375 if (!mapping->a_ops->readpage)
69376 - return -ENOEXEC;
69377 + return -ENODEV;
69378 file_accessed(file);
69379 vma->vm_ops = &generic_file_vm_ops;
69380 vma->vm_flags |= VM_CAN_NONLINEAR;
69381 @@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
69382 *pos = i_size_read(inode);
69383
69384 if (limit != RLIM_INFINITY) {
69385 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
69386 if (*pos >= limit) {
69387 send_sig(SIGXFSZ, current, 0);
69388 return -EFBIG;
69389 diff --git a/mm/fremap.c b/mm/fremap.c
69390 index 9ed4fd4..c42648d 100644
69391 --- a/mm/fremap.c
69392 +++ b/mm/fremap.c
69393 @@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
69394 retry:
69395 vma = find_vma(mm, start);
69396
69397 +#ifdef CONFIG_PAX_SEGMEXEC
69398 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
69399 + goto out;
69400 +#endif
69401 +
69402 /*
69403 * Make sure the vma is shared, that it supports prefaulting,
69404 * and that the remapped range is valid and fully within
69405 diff --git a/mm/highmem.c b/mm/highmem.c
69406 index 57d82c6..e9e0552 100644
69407 --- a/mm/highmem.c
69408 +++ b/mm/highmem.c
69409 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
69410 * So no dangers, even with speculative execution.
69411 */
69412 page = pte_page(pkmap_page_table[i]);
69413 + pax_open_kernel();
69414 pte_clear(&init_mm, (unsigned long)page_address(page),
69415 &pkmap_page_table[i]);
69416 -
69417 + pax_close_kernel();
69418 set_page_address(page, NULL);
69419 need_flush = 1;
69420 }
69421 @@ -186,9 +187,11 @@ start:
69422 }
69423 }
69424 vaddr = PKMAP_ADDR(last_pkmap_nr);
69425 +
69426 + pax_open_kernel();
69427 set_pte_at(&init_mm, vaddr,
69428 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
69429 -
69430 + pax_close_kernel();
69431 pkmap_count[last_pkmap_nr] = 1;
69432 set_page_address(page, (void *)vaddr);
69433
69434 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
69435 index 8f005e9..1cb1036 100644
69436 --- a/mm/huge_memory.c
69437 +++ b/mm/huge_memory.c
69438 @@ -704,7 +704,7 @@ out:
69439 * run pte_offset_map on the pmd, if an huge pmd could
69440 * materialize from under us from a different thread.
69441 */
69442 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
69443 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
69444 return VM_FAULT_OOM;
69445 /* if an huge pmd materialized from under us just retry later */
69446 if (unlikely(pmd_trans_huge(*pmd)))
69447 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
69448 index 2316840..b418671 100644
69449 --- a/mm/hugetlb.c
69450 +++ b/mm/hugetlb.c
69451 @@ -2347,6 +2347,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
69452 return 1;
69453 }
69454
69455 +#ifdef CONFIG_PAX_SEGMEXEC
69456 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
69457 +{
69458 + struct mm_struct *mm = vma->vm_mm;
69459 + struct vm_area_struct *vma_m;
69460 + unsigned long address_m;
69461 + pte_t *ptep_m;
69462 +
69463 + vma_m = pax_find_mirror_vma(vma);
69464 + if (!vma_m)
69465 + return;
69466 +
69467 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69468 + address_m = address + SEGMEXEC_TASK_SIZE;
69469 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
69470 + get_page(page_m);
69471 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
69472 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
69473 +}
69474 +#endif
69475 +
69476 /*
69477 * Hugetlb_cow() should be called with page lock of the original hugepage held.
69478 */
69479 @@ -2450,6 +2471,11 @@ retry_avoidcopy:
69480 make_huge_pte(vma, new_page, 1));
69481 page_remove_rmap(old_page);
69482 hugepage_add_new_anon_rmap(new_page, vma, address);
69483 +
69484 +#ifdef CONFIG_PAX_SEGMEXEC
69485 + pax_mirror_huge_pte(vma, address, new_page);
69486 +#endif
69487 +
69488 /* Make the old page be freed below */
69489 new_page = old_page;
69490 mmu_notifier_invalidate_range_end(mm,
69491 @@ -2601,6 +2627,10 @@ retry:
69492 && (vma->vm_flags & VM_SHARED)));
69493 set_huge_pte_at(mm, address, ptep, new_pte);
69494
69495 +#ifdef CONFIG_PAX_SEGMEXEC
69496 + pax_mirror_huge_pte(vma, address, page);
69497 +#endif
69498 +
69499 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
69500 /* Optimization, do the COW without a second fault */
69501 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
69502 @@ -2630,6 +2660,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69503 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
69504 struct hstate *h = hstate_vma(vma);
69505
69506 +#ifdef CONFIG_PAX_SEGMEXEC
69507 + struct vm_area_struct *vma_m;
69508 +#endif
69509 +
69510 ptep = huge_pte_offset(mm, address);
69511 if (ptep) {
69512 entry = huge_ptep_get(ptep);
69513 @@ -2641,6 +2675,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69514 VM_FAULT_SET_HINDEX(h - hstates);
69515 }
69516
69517 +#ifdef CONFIG_PAX_SEGMEXEC
69518 + vma_m = pax_find_mirror_vma(vma);
69519 + if (vma_m) {
69520 + unsigned long address_m;
69521 +
69522 + if (vma->vm_start > vma_m->vm_start) {
69523 + address_m = address;
69524 + address -= SEGMEXEC_TASK_SIZE;
69525 + vma = vma_m;
69526 + h = hstate_vma(vma);
69527 + } else
69528 + address_m = address + SEGMEXEC_TASK_SIZE;
69529 +
69530 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
69531 + return VM_FAULT_OOM;
69532 + address_m &= HPAGE_MASK;
69533 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
69534 + }
69535 +#endif
69536 +
69537 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
69538 if (!ptep)
69539 return VM_FAULT_OOM;
69540 diff --git a/mm/internal.h b/mm/internal.h
69541 index 2189af4..f2ca332 100644
69542 --- a/mm/internal.h
69543 +++ b/mm/internal.h
69544 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
69545 * in mm/page_alloc.c
69546 */
69547 extern void __free_pages_bootmem(struct page *page, unsigned int order);
69548 +extern void free_compound_page(struct page *page);
69549 extern void prep_compound_page(struct page *page, unsigned long order);
69550 #ifdef CONFIG_MEMORY_FAILURE
69551 extern bool is_free_buddy_page(struct page *page);
69552 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
69553 index f3b2a00..61da94d 100644
69554 --- a/mm/kmemleak.c
69555 +++ b/mm/kmemleak.c
69556 @@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq,
69557
69558 for (i = 0; i < object->trace_len; i++) {
69559 void *ptr = (void *)object->trace[i];
69560 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
69561 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
69562 }
69563 }
69564
69565 diff --git a/mm/maccess.c b/mm/maccess.c
69566 index d53adf9..03a24bf 100644
69567 --- a/mm/maccess.c
69568 +++ b/mm/maccess.c
69569 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
69570 set_fs(KERNEL_DS);
69571 pagefault_disable();
69572 ret = __copy_from_user_inatomic(dst,
69573 - (__force const void __user *)src, size);
69574 + (const void __force_user *)src, size);
69575 pagefault_enable();
69576 set_fs(old_fs);
69577
69578 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
69579
69580 set_fs(KERNEL_DS);
69581 pagefault_disable();
69582 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
69583 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
69584 pagefault_enable();
69585 set_fs(old_fs);
69586
69587 diff --git a/mm/madvise.c b/mm/madvise.c
69588 index 74bf193..feb6fd3 100644
69589 --- a/mm/madvise.c
69590 +++ b/mm/madvise.c
69591 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
69592 pgoff_t pgoff;
69593 unsigned long new_flags = vma->vm_flags;
69594
69595 +#ifdef CONFIG_PAX_SEGMEXEC
69596 + struct vm_area_struct *vma_m;
69597 +#endif
69598 +
69599 switch (behavior) {
69600 case MADV_NORMAL:
69601 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
69602 @@ -110,6 +114,13 @@ success:
69603 /*
69604 * vm_flags is protected by the mmap_sem held in write mode.
69605 */
69606 +
69607 +#ifdef CONFIG_PAX_SEGMEXEC
69608 + vma_m = pax_find_mirror_vma(vma);
69609 + if (vma_m)
69610 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
69611 +#endif
69612 +
69613 vma->vm_flags = new_flags;
69614
69615 out:
69616 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
69617 struct vm_area_struct ** prev,
69618 unsigned long start, unsigned long end)
69619 {
69620 +
69621 +#ifdef CONFIG_PAX_SEGMEXEC
69622 + struct vm_area_struct *vma_m;
69623 +#endif
69624 +
69625 *prev = vma;
69626 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
69627 return -EINVAL;
69628 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
69629 zap_page_range(vma, start, end - start, &details);
69630 } else
69631 zap_page_range(vma, start, end - start, NULL);
69632 +
69633 +#ifdef CONFIG_PAX_SEGMEXEC
69634 + vma_m = pax_find_mirror_vma(vma);
69635 + if (vma_m) {
69636 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
69637 + struct zap_details details = {
69638 + .nonlinear_vma = vma_m,
69639 + .last_index = ULONG_MAX,
69640 + };
69641 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
69642 + } else
69643 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
69644 + }
69645 +#endif
69646 +
69647 return 0;
69648 }
69649
69650 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
69651 if (end < start)
69652 goto out;
69653
69654 +#ifdef CONFIG_PAX_SEGMEXEC
69655 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
69656 + if (end > SEGMEXEC_TASK_SIZE)
69657 + goto out;
69658 + } else
69659 +#endif
69660 +
69661 + if (end > TASK_SIZE)
69662 + goto out;
69663 +
69664 error = 0;
69665 if (end == start)
69666 goto out;
69667 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
69668 index 06d3479..0778eef 100644
69669 --- a/mm/memory-failure.c
69670 +++ b/mm/memory-failure.c
69671 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
69672
69673 int sysctl_memory_failure_recovery __read_mostly = 1;
69674
69675 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
69676 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
69677
69678 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
69679
69680 @@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
69681 si.si_signo = SIGBUS;
69682 si.si_errno = 0;
69683 si.si_code = BUS_MCEERR_AO;
69684 - si.si_addr = (void *)addr;
69685 + si.si_addr = (void __user *)addr;
69686 #ifdef __ARCH_SI_TRAPNO
69687 si.si_trapno = trapno;
69688 #endif
69689 @@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
69690 }
69691
69692 nr_pages = 1 << compound_trans_order(hpage);
69693 - atomic_long_add(nr_pages, &mce_bad_pages);
69694 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
69695
69696 /*
69697 * We need/can do nothing about count=0 pages.
69698 @@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
69699 if (!PageHWPoison(hpage)
69700 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
69701 || (p != hpage && TestSetPageHWPoison(hpage))) {
69702 - atomic_long_sub(nr_pages, &mce_bad_pages);
69703 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69704 return 0;
69705 }
69706 set_page_hwpoison_huge_page(hpage);
69707 @@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
69708 }
69709 if (hwpoison_filter(p)) {
69710 if (TestClearPageHWPoison(p))
69711 - atomic_long_sub(nr_pages, &mce_bad_pages);
69712 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69713 unlock_page(hpage);
69714 put_page(hpage);
69715 return 0;
69716 @@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
69717 return 0;
69718 }
69719 if (TestClearPageHWPoison(p))
69720 - atomic_long_sub(nr_pages, &mce_bad_pages);
69721 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69722 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
69723 return 0;
69724 }
69725 @@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
69726 */
69727 if (TestClearPageHWPoison(page)) {
69728 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
69729 - atomic_long_sub(nr_pages, &mce_bad_pages);
69730 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
69731 freeit = 1;
69732 if (PageHuge(page))
69733 clear_page_hwpoison_huge_page(page);
69734 @@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
69735 }
69736 done:
69737 if (!PageHWPoison(hpage))
69738 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
69739 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
69740 set_page_hwpoison_huge_page(hpage);
69741 dequeue_hwpoisoned_huge_page(hpage);
69742 /* keep elevated page count for bad page */
69743 @@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
69744 return ret;
69745
69746 done:
69747 - atomic_long_add(1, &mce_bad_pages);
69748 + atomic_long_add_unchecked(1, &mce_bad_pages);
69749 SetPageHWPoison(page);
69750 /* keep elevated page count for bad page */
69751 return ret;
69752 diff --git a/mm/memory.c b/mm/memory.c
69753 index 829d437..3d3926a 100644
69754 --- a/mm/memory.c
69755 +++ b/mm/memory.c
69756 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
69757 return;
69758
69759 pmd = pmd_offset(pud, start);
69760 +
69761 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
69762 pud_clear(pud);
69763 pmd_free_tlb(tlb, pmd, start);
69764 +#endif
69765 +
69766 }
69767
69768 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
69769 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
69770 if (end - 1 > ceiling - 1)
69771 return;
69772
69773 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
69774 pud = pud_offset(pgd, start);
69775 pgd_clear(pgd);
69776 pud_free_tlb(tlb, pud, start);
69777 +#endif
69778 +
69779 }
69780
69781 /*
69782 @@ -1566,12 +1573,6 @@ no_page_table:
69783 return page;
69784 }
69785
69786 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
69787 -{
69788 - return stack_guard_page_start(vma, addr) ||
69789 - stack_guard_page_end(vma, addr+PAGE_SIZE);
69790 -}
69791 -
69792 /**
69793 * __get_user_pages() - pin user pages in memory
69794 * @tsk: task_struct of target task
69795 @@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69796 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
69797 i = 0;
69798
69799 - do {
69800 + while (nr_pages) {
69801 struct vm_area_struct *vma;
69802
69803 - vma = find_extend_vma(mm, start);
69804 + vma = find_vma(mm, start);
69805 if (!vma && in_gate_area(mm, start)) {
69806 unsigned long pg = start & PAGE_MASK;
69807 pgd_t *pgd;
69808 @@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69809 goto next_page;
69810 }
69811
69812 - if (!vma ||
69813 + if (!vma || start < vma->vm_start ||
69814 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
69815 !(vm_flags & vma->vm_flags))
69816 return i ? : -EFAULT;
69817 @@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
69818 int ret;
69819 unsigned int fault_flags = 0;
69820
69821 - /* For mlock, just skip the stack guard page. */
69822 - if (foll_flags & FOLL_MLOCK) {
69823 - if (stack_guard_page(vma, start))
69824 - goto next_page;
69825 - }
69826 if (foll_flags & FOLL_WRITE)
69827 fault_flags |= FAULT_FLAG_WRITE;
69828 if (nonblocking)
69829 @@ -1800,7 +1796,7 @@ next_page:
69830 start += PAGE_SIZE;
69831 nr_pages--;
69832 } while (nr_pages && start < vma->vm_end);
69833 - } while (nr_pages);
69834 + }
69835 return i;
69836 }
69837 EXPORT_SYMBOL(__get_user_pages);
69838 @@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
69839 page_add_file_rmap(page);
69840 set_pte_at(mm, addr, pte, mk_pte(page, prot));
69841
69842 +#ifdef CONFIG_PAX_SEGMEXEC
69843 + pax_mirror_file_pte(vma, addr, page, ptl);
69844 +#endif
69845 +
69846 retval = 0;
69847 pte_unmap_unlock(pte, ptl);
69848 return retval;
69849 @@ -2041,10 +2041,22 @@ out:
69850 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
69851 struct page *page)
69852 {
69853 +
69854 +#ifdef CONFIG_PAX_SEGMEXEC
69855 + struct vm_area_struct *vma_m;
69856 +#endif
69857 +
69858 if (addr < vma->vm_start || addr >= vma->vm_end)
69859 return -EFAULT;
69860 if (!page_count(page))
69861 return -EINVAL;
69862 +
69863 +#ifdef CONFIG_PAX_SEGMEXEC
69864 + vma_m = pax_find_mirror_vma(vma);
69865 + if (vma_m)
69866 + vma_m->vm_flags |= VM_INSERTPAGE;
69867 +#endif
69868 +
69869 vma->vm_flags |= VM_INSERTPAGE;
69870 return insert_page(vma, addr, page, vma->vm_page_prot);
69871 }
69872 @@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
69873 unsigned long pfn)
69874 {
69875 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
69876 + BUG_ON(vma->vm_mirror);
69877
69878 if (addr < vma->vm_start || addr >= vma->vm_end)
69879 return -EFAULT;
69880 @@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
69881 copy_user_highpage(dst, src, va, vma);
69882 }
69883
69884 +#ifdef CONFIG_PAX_SEGMEXEC
69885 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
69886 +{
69887 + struct mm_struct *mm = vma->vm_mm;
69888 + spinlock_t *ptl;
69889 + pte_t *pte, entry;
69890 +
69891 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
69892 + entry = *pte;
69893 + if (!pte_present(entry)) {
69894 + if (!pte_none(entry)) {
69895 + BUG_ON(pte_file(entry));
69896 + free_swap_and_cache(pte_to_swp_entry(entry));
69897 + pte_clear_not_present_full(mm, address, pte, 0);
69898 + }
69899 + } else {
69900 + struct page *page;
69901 +
69902 + flush_cache_page(vma, address, pte_pfn(entry));
69903 + entry = ptep_clear_flush(vma, address, pte);
69904 + BUG_ON(pte_dirty(entry));
69905 + page = vm_normal_page(vma, address, entry);
69906 + if (page) {
69907 + update_hiwater_rss(mm);
69908 + if (PageAnon(page))
69909 + dec_mm_counter_fast(mm, MM_ANONPAGES);
69910 + else
69911 + dec_mm_counter_fast(mm, MM_FILEPAGES);
69912 + page_remove_rmap(page);
69913 + page_cache_release(page);
69914 + }
69915 + }
69916 + pte_unmap_unlock(pte, ptl);
69917 +}
69918 +
69919 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
69920 + *
69921 + * the ptl of the lower mapped page is held on entry and is not released on exit
69922 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
69923 + */
69924 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
69925 +{
69926 + struct mm_struct *mm = vma->vm_mm;
69927 + unsigned long address_m;
69928 + spinlock_t *ptl_m;
69929 + struct vm_area_struct *vma_m;
69930 + pmd_t *pmd_m;
69931 + pte_t *pte_m, entry_m;
69932 +
69933 + BUG_ON(!page_m || !PageAnon(page_m));
69934 +
69935 + vma_m = pax_find_mirror_vma(vma);
69936 + if (!vma_m)
69937 + return;
69938 +
69939 + BUG_ON(!PageLocked(page_m));
69940 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69941 + address_m = address + SEGMEXEC_TASK_SIZE;
69942 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
69943 + pte_m = pte_offset_map(pmd_m, address_m);
69944 + ptl_m = pte_lockptr(mm, pmd_m);
69945 + if (ptl != ptl_m) {
69946 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
69947 + if (!pte_none(*pte_m))
69948 + goto out;
69949 + }
69950 +
69951 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
69952 + page_cache_get(page_m);
69953 + page_add_anon_rmap(page_m, vma_m, address_m);
69954 + inc_mm_counter_fast(mm, MM_ANONPAGES);
69955 + set_pte_at(mm, address_m, pte_m, entry_m);
69956 + update_mmu_cache(vma_m, address_m, entry_m);
69957 +out:
69958 + if (ptl != ptl_m)
69959 + spin_unlock(ptl_m);
69960 + pte_unmap(pte_m);
69961 + unlock_page(page_m);
69962 +}
69963 +
69964 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
69965 +{
69966 + struct mm_struct *mm = vma->vm_mm;
69967 + unsigned long address_m;
69968 + spinlock_t *ptl_m;
69969 + struct vm_area_struct *vma_m;
69970 + pmd_t *pmd_m;
69971 + pte_t *pte_m, entry_m;
69972 +
69973 + BUG_ON(!page_m || PageAnon(page_m));
69974 +
69975 + vma_m = pax_find_mirror_vma(vma);
69976 + if (!vma_m)
69977 + return;
69978 +
69979 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69980 + address_m = address + SEGMEXEC_TASK_SIZE;
69981 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
69982 + pte_m = pte_offset_map(pmd_m, address_m);
69983 + ptl_m = pte_lockptr(mm, pmd_m);
69984 + if (ptl != ptl_m) {
69985 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
69986 + if (!pte_none(*pte_m))
69987 + goto out;
69988 + }
69989 +
69990 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
69991 + page_cache_get(page_m);
69992 + page_add_file_rmap(page_m);
69993 + inc_mm_counter_fast(mm, MM_FILEPAGES);
69994 + set_pte_at(mm, address_m, pte_m, entry_m);
69995 + update_mmu_cache(vma_m, address_m, entry_m);
69996 +out:
69997 + if (ptl != ptl_m)
69998 + spin_unlock(ptl_m);
69999 + pte_unmap(pte_m);
70000 +}
70001 +
70002 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
70003 +{
70004 + struct mm_struct *mm = vma->vm_mm;
70005 + unsigned long address_m;
70006 + spinlock_t *ptl_m;
70007 + struct vm_area_struct *vma_m;
70008 + pmd_t *pmd_m;
70009 + pte_t *pte_m, entry_m;
70010 +
70011 + vma_m = pax_find_mirror_vma(vma);
70012 + if (!vma_m)
70013 + return;
70014 +
70015 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70016 + address_m = address + SEGMEXEC_TASK_SIZE;
70017 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70018 + pte_m = pte_offset_map(pmd_m, address_m);
70019 + ptl_m = pte_lockptr(mm, pmd_m);
70020 + if (ptl != ptl_m) {
70021 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70022 + if (!pte_none(*pte_m))
70023 + goto out;
70024 + }
70025 +
70026 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
70027 + set_pte_at(mm, address_m, pte_m, entry_m);
70028 +out:
70029 + if (ptl != ptl_m)
70030 + spin_unlock(ptl_m);
70031 + pte_unmap(pte_m);
70032 +}
70033 +
70034 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
70035 +{
70036 + struct page *page_m;
70037 + pte_t entry;
70038 +
70039 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
70040 + goto out;
70041 +
70042 + entry = *pte;
70043 + page_m = vm_normal_page(vma, address, entry);
70044 + if (!page_m)
70045 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
70046 + else if (PageAnon(page_m)) {
70047 + if (pax_find_mirror_vma(vma)) {
70048 + pte_unmap_unlock(pte, ptl);
70049 + lock_page(page_m);
70050 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
70051 + if (pte_same(entry, *pte))
70052 + pax_mirror_anon_pte(vma, address, page_m, ptl);
70053 + else
70054 + unlock_page(page_m);
70055 + }
70056 + } else
70057 + pax_mirror_file_pte(vma, address, page_m, ptl);
70058 +
70059 +out:
70060 + pte_unmap_unlock(pte, ptl);
70061 +}
70062 +#endif
70063 +
70064 /*
70065 * This routine handles present pages, when users try to write
70066 * to a shared page. It is done by copying the page to a new address
70067 @@ -2656,6 +2849,12 @@ gotten:
70068 */
70069 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70070 if (likely(pte_same(*page_table, orig_pte))) {
70071 +
70072 +#ifdef CONFIG_PAX_SEGMEXEC
70073 + if (pax_find_mirror_vma(vma))
70074 + BUG_ON(!trylock_page(new_page));
70075 +#endif
70076 +
70077 if (old_page) {
70078 if (!PageAnon(old_page)) {
70079 dec_mm_counter_fast(mm, MM_FILEPAGES);
70080 @@ -2707,6 +2906,10 @@ gotten:
70081 page_remove_rmap(old_page);
70082 }
70083
70084 +#ifdef CONFIG_PAX_SEGMEXEC
70085 + pax_mirror_anon_pte(vma, address, new_page, ptl);
70086 +#endif
70087 +
70088 /* Free the old page.. */
70089 new_page = old_page;
70090 ret |= VM_FAULT_WRITE;
70091 @@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70092 swap_free(entry);
70093 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
70094 try_to_free_swap(page);
70095 +
70096 +#ifdef CONFIG_PAX_SEGMEXEC
70097 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
70098 +#endif
70099 +
70100 unlock_page(page);
70101 if (swapcache) {
70102 /*
70103 @@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70104
70105 /* No need to invalidate - it was non-present before */
70106 update_mmu_cache(vma, address, page_table);
70107 +
70108 +#ifdef CONFIG_PAX_SEGMEXEC
70109 + pax_mirror_anon_pte(vma, address, page, ptl);
70110 +#endif
70111 +
70112 unlock:
70113 pte_unmap_unlock(page_table, ptl);
70114 out:
70115 @@ -3028,40 +3241,6 @@ out_release:
70116 }
70117
70118 /*
70119 - * This is like a special single-page "expand_{down|up}wards()",
70120 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
70121 - * doesn't hit another vma.
70122 - */
70123 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
70124 -{
70125 - address &= PAGE_MASK;
70126 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
70127 - struct vm_area_struct *prev = vma->vm_prev;
70128 -
70129 - /*
70130 - * Is there a mapping abutting this one below?
70131 - *
70132 - * That's only ok if it's the same stack mapping
70133 - * that has gotten split..
70134 - */
70135 - if (prev && prev->vm_end == address)
70136 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
70137 -
70138 - expand_downwards(vma, address - PAGE_SIZE);
70139 - }
70140 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
70141 - struct vm_area_struct *next = vma->vm_next;
70142 -
70143 - /* As VM_GROWSDOWN but s/below/above/ */
70144 - if (next && next->vm_start == address + PAGE_SIZE)
70145 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
70146 -
70147 - expand_upwards(vma, address + PAGE_SIZE);
70148 - }
70149 - return 0;
70150 -}
70151 -
70152 -/*
70153 * We enter with non-exclusive mmap_sem (to exclude vma changes,
70154 * but allow concurrent faults), and pte mapped but not yet locked.
70155 * We return with mmap_sem still held, but pte unmapped and unlocked.
70156 @@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70157 unsigned long address, pte_t *page_table, pmd_t *pmd,
70158 unsigned int flags)
70159 {
70160 - struct page *page;
70161 + struct page *page = NULL;
70162 spinlock_t *ptl;
70163 pte_t entry;
70164
70165 - pte_unmap(page_table);
70166 -
70167 - /* Check if we need to add a guard page to the stack */
70168 - if (check_stack_guard_page(vma, address) < 0)
70169 - return VM_FAULT_SIGBUS;
70170 -
70171 - /* Use the zero-page for reads */
70172 if (!(flags & FAULT_FLAG_WRITE)) {
70173 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
70174 vma->vm_page_prot));
70175 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70176 + ptl = pte_lockptr(mm, pmd);
70177 + spin_lock(ptl);
70178 if (!pte_none(*page_table))
70179 goto unlock;
70180 goto setpte;
70181 }
70182
70183 /* Allocate our own private page. */
70184 + pte_unmap(page_table);
70185 +
70186 if (unlikely(anon_vma_prepare(vma)))
70187 goto oom;
70188 page = alloc_zeroed_user_highpage_movable(vma, address);
70189 @@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70190 if (!pte_none(*page_table))
70191 goto release;
70192
70193 +#ifdef CONFIG_PAX_SEGMEXEC
70194 + if (pax_find_mirror_vma(vma))
70195 + BUG_ON(!trylock_page(page));
70196 +#endif
70197 +
70198 inc_mm_counter_fast(mm, MM_ANONPAGES);
70199 page_add_new_anon_rmap(page, vma, address);
70200 setpte:
70201 @@ -3116,6 +3296,12 @@ setpte:
70202
70203 /* No need to invalidate - it was non-present before */
70204 update_mmu_cache(vma, address, page_table);
70205 +
70206 +#ifdef CONFIG_PAX_SEGMEXEC
70207 + if (page)
70208 + pax_mirror_anon_pte(vma, address, page, ptl);
70209 +#endif
70210 +
70211 unlock:
70212 pte_unmap_unlock(page_table, ptl);
70213 return 0;
70214 @@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70215 */
70216 /* Only go through if we didn't race with anybody else... */
70217 if (likely(pte_same(*page_table, orig_pte))) {
70218 +
70219 +#ifdef CONFIG_PAX_SEGMEXEC
70220 + if (anon && pax_find_mirror_vma(vma))
70221 + BUG_ON(!trylock_page(page));
70222 +#endif
70223 +
70224 flush_icache_page(vma, page);
70225 entry = mk_pte(page, vma->vm_page_prot);
70226 if (flags & FAULT_FLAG_WRITE)
70227 @@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70228
70229 /* no need to invalidate: a not-present page won't be cached */
70230 update_mmu_cache(vma, address, page_table);
70231 +
70232 +#ifdef CONFIG_PAX_SEGMEXEC
70233 + if (anon)
70234 + pax_mirror_anon_pte(vma, address, page, ptl);
70235 + else
70236 + pax_mirror_file_pte(vma, address, page, ptl);
70237 +#endif
70238 +
70239 } else {
70240 if (cow_page)
70241 mem_cgroup_uncharge_page(cow_page);
70242 @@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *mm,
70243 if (flags & FAULT_FLAG_WRITE)
70244 flush_tlb_fix_spurious_fault(vma, address);
70245 }
70246 +
70247 +#ifdef CONFIG_PAX_SEGMEXEC
70248 + pax_mirror_pte(vma, address, pte, pmd, ptl);
70249 + return 0;
70250 +#endif
70251 +
70252 unlock:
70253 pte_unmap_unlock(pte, ptl);
70254 return 0;
70255 @@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70256 pmd_t *pmd;
70257 pte_t *pte;
70258
70259 +#ifdef CONFIG_PAX_SEGMEXEC
70260 + struct vm_area_struct *vma_m;
70261 +#endif
70262 +
70263 __set_current_state(TASK_RUNNING);
70264
70265 count_vm_event(PGFAULT);
70266 @@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70267 if (unlikely(is_vm_hugetlb_page(vma)))
70268 return hugetlb_fault(mm, vma, address, flags);
70269
70270 +#ifdef CONFIG_PAX_SEGMEXEC
70271 + vma_m = pax_find_mirror_vma(vma);
70272 + if (vma_m) {
70273 + unsigned long address_m;
70274 + pgd_t *pgd_m;
70275 + pud_t *pud_m;
70276 + pmd_t *pmd_m;
70277 +
70278 + if (vma->vm_start > vma_m->vm_start) {
70279 + address_m = address;
70280 + address -= SEGMEXEC_TASK_SIZE;
70281 + vma = vma_m;
70282 + } else
70283 + address_m = address + SEGMEXEC_TASK_SIZE;
70284 +
70285 + pgd_m = pgd_offset(mm, address_m);
70286 + pud_m = pud_alloc(mm, pgd_m, address_m);
70287 + if (!pud_m)
70288 + return VM_FAULT_OOM;
70289 + pmd_m = pmd_alloc(mm, pud_m, address_m);
70290 + if (!pmd_m)
70291 + return VM_FAULT_OOM;
70292 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
70293 + return VM_FAULT_OOM;
70294 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
70295 + }
70296 +#endif
70297 +
70298 pgd = pgd_offset(mm, address);
70299 pud = pud_alloc(mm, pgd, address);
70300 if (!pud)
70301 @@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70302 * run pte_offset_map on the pmd, if an huge pmd could
70303 * materialize from under us from a different thread.
70304 */
70305 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
70306 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
70307 return VM_FAULT_OOM;
70308 /* if an huge pmd materialized from under us just retry later */
70309 if (unlikely(pmd_trans_huge(*pmd)))
70310 @@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
70311 gate_vma.vm_start = FIXADDR_USER_START;
70312 gate_vma.vm_end = FIXADDR_USER_END;
70313 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
70314 - gate_vma.vm_page_prot = __P101;
70315 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
70316 /*
70317 * Make sure the vDSO gets into every core dump.
70318 * Dumping its contents makes post-mortem fully interpretable later
70319 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
70320 index c3fdbcb..2e8ef90 100644
70321 --- a/mm/mempolicy.c
70322 +++ b/mm/mempolicy.c
70323 @@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
70324 unsigned long vmstart;
70325 unsigned long vmend;
70326
70327 +#ifdef CONFIG_PAX_SEGMEXEC
70328 + struct vm_area_struct *vma_m;
70329 +#endif
70330 +
70331 vma = find_vma_prev(mm, start, &prev);
70332 if (!vma || vma->vm_start > start)
70333 return -EFAULT;
70334 @@ -678,6 +682,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
70335 err = policy_vma(vma, new_pol);
70336 if (err)
70337 goto out;
70338 +
70339 +#ifdef CONFIG_PAX_SEGMEXEC
70340 + vma_m = pax_find_mirror_vma(vma);
70341 + if (vma_m) {
70342 + err = policy_vma(vma_m, new_pol);
70343 + if (err)
70344 + goto out;
70345 + }
70346 +#endif
70347 +
70348 }
70349
70350 out:
70351 @@ -1111,6 +1125,17 @@ static long do_mbind(unsigned long start, unsigned long len,
70352
70353 if (end < start)
70354 return -EINVAL;
70355 +
70356 +#ifdef CONFIG_PAX_SEGMEXEC
70357 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
70358 + if (end > SEGMEXEC_TASK_SIZE)
70359 + return -EINVAL;
70360 + } else
70361 +#endif
70362 +
70363 + if (end > TASK_SIZE)
70364 + return -EINVAL;
70365 +
70366 if (end == start)
70367 return 0;
70368
70369 @@ -1329,6 +1354,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
70370 if (!mm)
70371 goto out;
70372
70373 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70374 + if (mm != current->mm &&
70375 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
70376 + err = -EPERM;
70377 + goto out;
70378 + }
70379 +#endif
70380 +
70381 /*
70382 * Check if this process has the right to modify the specified
70383 * process. The right exists if the process has administrative
70384 @@ -1338,8 +1371,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
70385 rcu_read_lock();
70386 tcred = __task_cred(task);
70387 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
70388 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
70389 - !capable(CAP_SYS_NICE)) {
70390 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
70391 rcu_read_unlock();
70392 err = -EPERM;
70393 goto out;
70394 diff --git a/mm/migrate.c b/mm/migrate.c
70395 index 177aca4..ab3a744 100644
70396 --- a/mm/migrate.c
70397 +++ b/mm/migrate.c
70398 @@ -1313,6 +1313,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
70399 if (!mm)
70400 return -EINVAL;
70401
70402 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70403 + if (mm != current->mm &&
70404 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
70405 + err = -EPERM;
70406 + goto out;
70407 + }
70408 +#endif
70409 +
70410 /*
70411 * Check if this process has the right to modify the specified
70412 * process. The right exists if the process has administrative
70413 @@ -1322,8 +1330,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
70414 rcu_read_lock();
70415 tcred = __task_cred(task);
70416 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
70417 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
70418 - !capable(CAP_SYS_NICE)) {
70419 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
70420 rcu_read_unlock();
70421 err = -EPERM;
70422 goto out;
70423 diff --git a/mm/mlock.c b/mm/mlock.c
70424 index 4f4f53b..9511904 100644
70425 --- a/mm/mlock.c
70426 +++ b/mm/mlock.c
70427 @@ -13,6 +13,7 @@
70428 #include <linux/pagemap.h>
70429 #include <linux/mempolicy.h>
70430 #include <linux/syscalls.h>
70431 +#include <linux/security.h>
70432 #include <linux/sched.h>
70433 #include <linux/export.h>
70434 #include <linux/rmap.h>
70435 @@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
70436 return -EINVAL;
70437 if (end == start)
70438 return 0;
70439 + if (end > TASK_SIZE)
70440 + return -EINVAL;
70441 +
70442 vma = find_vma_prev(current->mm, start, &prev);
70443 if (!vma || vma->vm_start > start)
70444 return -ENOMEM;
70445 @@ -395,6 +399,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
70446 for (nstart = start ; ; ) {
70447 vm_flags_t newflags;
70448
70449 +#ifdef CONFIG_PAX_SEGMEXEC
70450 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70451 + break;
70452 +#endif
70453 +
70454 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
70455
70456 newflags = vma->vm_flags | VM_LOCKED;
70457 @@ -500,6 +509,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
70458 lock_limit >>= PAGE_SHIFT;
70459
70460 /* check against resource limits */
70461 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
70462 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
70463 error = do_mlock(start, len, 1);
70464 up_write(&current->mm->mmap_sem);
70465 @@ -523,17 +533,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
70466 static int do_mlockall(int flags)
70467 {
70468 struct vm_area_struct * vma, * prev = NULL;
70469 - unsigned int def_flags = 0;
70470
70471 if (flags & MCL_FUTURE)
70472 - def_flags = VM_LOCKED;
70473 - current->mm->def_flags = def_flags;
70474 + current->mm->def_flags |= VM_LOCKED;
70475 + else
70476 + current->mm->def_flags &= ~VM_LOCKED;
70477 if (flags == MCL_FUTURE)
70478 goto out;
70479
70480 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
70481 vm_flags_t newflags;
70482
70483 +#ifdef CONFIG_PAX_SEGMEXEC
70484 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70485 + break;
70486 +#endif
70487 +
70488 + BUG_ON(vma->vm_end > TASK_SIZE);
70489 newflags = vma->vm_flags | VM_LOCKED;
70490 if (!(flags & MCL_CURRENT))
70491 newflags &= ~VM_LOCKED;
70492 @@ -566,6 +582,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
70493 lock_limit >>= PAGE_SHIFT;
70494
70495 ret = -ENOMEM;
70496 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
70497 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
70498 capable(CAP_IPC_LOCK))
70499 ret = do_mlockall(flags);
70500 diff --git a/mm/mmap.c b/mm/mmap.c
70501 index eae90af..c930262 100644
70502 --- a/mm/mmap.c
70503 +++ b/mm/mmap.c
70504 @@ -46,6 +46,16 @@
70505 #define arch_rebalance_pgtables(addr, len) (addr)
70506 #endif
70507
70508 +static inline void verify_mm_writelocked(struct mm_struct *mm)
70509 +{
70510 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
70511 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
70512 + up_read(&mm->mmap_sem);
70513 + BUG();
70514 + }
70515 +#endif
70516 +}
70517 +
70518 static void unmap_region(struct mm_struct *mm,
70519 struct vm_area_struct *vma, struct vm_area_struct *prev,
70520 unsigned long start, unsigned long end);
70521 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
70522 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
70523 *
70524 */
70525 -pgprot_t protection_map[16] = {
70526 +pgprot_t protection_map[16] __read_only = {
70527 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
70528 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
70529 };
70530
70531 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
70532 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
70533 {
70534 - return __pgprot(pgprot_val(protection_map[vm_flags &
70535 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
70536 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
70537 pgprot_val(arch_vm_get_page_prot(vm_flags)));
70538 +
70539 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70540 + if (!(__supported_pte_mask & _PAGE_NX) &&
70541 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
70542 + (vm_flags & (VM_READ | VM_WRITE)))
70543 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
70544 +#endif
70545 +
70546 + return prot;
70547 }
70548 EXPORT_SYMBOL(vm_get_page_prot);
70549
70550 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
70551 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
70552 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
70553 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
70554 /*
70555 * Make sure vm_committed_as in one cacheline and not cacheline shared with
70556 * other variables. It can be updated by several CPUs frequently.
70557 @@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
70558 struct vm_area_struct *next = vma->vm_next;
70559
70560 might_sleep();
70561 + BUG_ON(vma->vm_mirror);
70562 if (vma->vm_ops && vma->vm_ops->close)
70563 vma->vm_ops->close(vma);
70564 if (vma->vm_file) {
70565 @@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
70566 * not page aligned -Ram Gupta
70567 */
70568 rlim = rlimit(RLIMIT_DATA);
70569 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
70570 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
70571 (mm->end_data - mm->start_data) > rlim)
70572 goto out;
70573 @@ -689,6 +711,12 @@ static int
70574 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
70575 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
70576 {
70577 +
70578 +#ifdef CONFIG_PAX_SEGMEXEC
70579 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
70580 + return 0;
70581 +#endif
70582 +
70583 if (is_mergeable_vma(vma, file, vm_flags) &&
70584 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
70585 if (vma->vm_pgoff == vm_pgoff)
70586 @@ -708,6 +736,12 @@ static int
70587 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
70588 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
70589 {
70590 +
70591 +#ifdef CONFIG_PAX_SEGMEXEC
70592 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
70593 + return 0;
70594 +#endif
70595 +
70596 if (is_mergeable_vma(vma, file, vm_flags) &&
70597 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
70598 pgoff_t vm_pglen;
70599 @@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
70600 struct vm_area_struct *vma_merge(struct mm_struct *mm,
70601 struct vm_area_struct *prev, unsigned long addr,
70602 unsigned long end, unsigned long vm_flags,
70603 - struct anon_vma *anon_vma, struct file *file,
70604 + struct anon_vma *anon_vma, struct file *file,
70605 pgoff_t pgoff, struct mempolicy *policy)
70606 {
70607 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
70608 struct vm_area_struct *area, *next;
70609 int err;
70610
70611 +#ifdef CONFIG_PAX_SEGMEXEC
70612 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
70613 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
70614 +
70615 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
70616 +#endif
70617 +
70618 /*
70619 * We later require that vma->vm_flags == vm_flags,
70620 * so this tests vma->vm_flags & VM_SPECIAL, too.
70621 @@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70622 if (next && next->vm_end == end) /* cases 6, 7, 8 */
70623 next = next->vm_next;
70624
70625 +#ifdef CONFIG_PAX_SEGMEXEC
70626 + if (prev)
70627 + prev_m = pax_find_mirror_vma(prev);
70628 + if (area)
70629 + area_m = pax_find_mirror_vma(area);
70630 + if (next)
70631 + next_m = pax_find_mirror_vma(next);
70632 +#endif
70633 +
70634 /*
70635 * Can it merge with the predecessor?
70636 */
70637 @@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70638 /* cases 1, 6 */
70639 err = vma_adjust(prev, prev->vm_start,
70640 next->vm_end, prev->vm_pgoff, NULL);
70641 - } else /* cases 2, 5, 7 */
70642 +
70643 +#ifdef CONFIG_PAX_SEGMEXEC
70644 + if (!err && prev_m)
70645 + err = vma_adjust(prev_m, prev_m->vm_start,
70646 + next_m->vm_end, prev_m->vm_pgoff, NULL);
70647 +#endif
70648 +
70649 + } else { /* cases 2, 5, 7 */
70650 err = vma_adjust(prev, prev->vm_start,
70651 end, prev->vm_pgoff, NULL);
70652 +
70653 +#ifdef CONFIG_PAX_SEGMEXEC
70654 + if (!err && prev_m)
70655 + err = vma_adjust(prev_m, prev_m->vm_start,
70656 + end_m, prev_m->vm_pgoff, NULL);
70657 +#endif
70658 +
70659 + }
70660 if (err)
70661 return NULL;
70662 khugepaged_enter_vma_merge(prev);
70663 @@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70664 mpol_equal(policy, vma_policy(next)) &&
70665 can_vma_merge_before(next, vm_flags,
70666 anon_vma, file, pgoff+pglen)) {
70667 - if (prev && addr < prev->vm_end) /* case 4 */
70668 + if (prev && addr < prev->vm_end) { /* case 4 */
70669 err = vma_adjust(prev, prev->vm_start,
70670 addr, prev->vm_pgoff, NULL);
70671 - else /* cases 3, 8 */
70672 +
70673 +#ifdef CONFIG_PAX_SEGMEXEC
70674 + if (!err && prev_m)
70675 + err = vma_adjust(prev_m, prev_m->vm_start,
70676 + addr_m, prev_m->vm_pgoff, NULL);
70677 +#endif
70678 +
70679 + } else { /* cases 3, 8 */
70680 err = vma_adjust(area, addr, next->vm_end,
70681 next->vm_pgoff - pglen, NULL);
70682 +
70683 +#ifdef CONFIG_PAX_SEGMEXEC
70684 + if (!err && area_m)
70685 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
70686 + next_m->vm_pgoff - pglen, NULL);
70687 +#endif
70688 +
70689 + }
70690 if (err)
70691 return NULL;
70692 khugepaged_enter_vma_merge(area);
70693 @@ -921,14 +1001,11 @@ none:
70694 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
70695 struct file *file, long pages)
70696 {
70697 - const unsigned long stack_flags
70698 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
70699 -
70700 if (file) {
70701 mm->shared_vm += pages;
70702 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
70703 mm->exec_vm += pages;
70704 - } else if (flags & stack_flags)
70705 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
70706 mm->stack_vm += pages;
70707 if (flags & (VM_RESERVED|VM_IO))
70708 mm->reserved_vm += pages;
70709 @@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70710 * (the exception is when the underlying filesystem is noexec
70711 * mounted, in which case we dont add PROT_EXEC.)
70712 */
70713 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70714 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70715 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
70716 prot |= PROT_EXEC;
70717
70718 @@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70719 /* Obtain the address to map to. we verify (or select) it and ensure
70720 * that it represents a valid section of the address space.
70721 */
70722 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
70723 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
70724 if (addr & ~PAGE_MASK)
70725 return addr;
70726
70727 @@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70728 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
70729 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
70730
70731 +#ifdef CONFIG_PAX_MPROTECT
70732 + if (mm->pax_flags & MF_PAX_MPROTECT) {
70733 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
70734 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
70735 + gr_log_rwxmmap(file);
70736 +
70737 +#ifdef CONFIG_PAX_EMUPLT
70738 + vm_flags &= ~VM_EXEC;
70739 +#else
70740 + return -EPERM;
70741 +#endif
70742 +
70743 + }
70744 +
70745 + if (!(vm_flags & VM_EXEC))
70746 + vm_flags &= ~VM_MAYEXEC;
70747 +#else
70748 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
70749 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
70750 +#endif
70751 + else
70752 + vm_flags &= ~VM_MAYWRITE;
70753 + }
70754 +#endif
70755 +
70756 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70757 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
70758 + vm_flags &= ~VM_PAGEEXEC;
70759 +#endif
70760 +
70761 if (flags & MAP_LOCKED)
70762 if (!can_do_mlock())
70763 return -EPERM;
70764 @@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70765 locked += mm->locked_vm;
70766 lock_limit = rlimit(RLIMIT_MEMLOCK);
70767 lock_limit >>= PAGE_SHIFT;
70768 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
70769 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
70770 return -EAGAIN;
70771 }
70772 @@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70773 if (error)
70774 return error;
70775
70776 + if (!gr_acl_handle_mmap(file, prot))
70777 + return -EACCES;
70778 +
70779 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
70780 }
70781 EXPORT_SYMBOL(do_mmap_pgoff);
70782 @@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
70783 vm_flags_t vm_flags = vma->vm_flags;
70784
70785 /* If it was private or non-writable, the write bit is already clear */
70786 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
70787 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
70788 return 0;
70789
70790 /* The backer wishes to know when pages are first written to? */
70791 @@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
70792 unsigned long charged = 0;
70793 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
70794
70795 +#ifdef CONFIG_PAX_SEGMEXEC
70796 + struct vm_area_struct *vma_m = NULL;
70797 +#endif
70798 +
70799 + /*
70800 + * mm->mmap_sem is required to protect against another thread
70801 + * changing the mappings in case we sleep.
70802 + */
70803 + verify_mm_writelocked(mm);
70804 +
70805 /* Clear old maps */
70806 error = -ENOMEM;
70807 -munmap_back:
70808 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70809 if (vma && vma->vm_start < addr + len) {
70810 if (do_munmap(mm, addr, len))
70811 return -ENOMEM;
70812 - goto munmap_back;
70813 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
70814 + BUG_ON(vma && vma->vm_start < addr + len);
70815 }
70816
70817 /* Check against address space limit. */
70818 @@ -1258,6 +1379,16 @@ munmap_back:
70819 goto unacct_error;
70820 }
70821
70822 +#ifdef CONFIG_PAX_SEGMEXEC
70823 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
70824 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70825 + if (!vma_m) {
70826 + error = -ENOMEM;
70827 + goto free_vma;
70828 + }
70829 + }
70830 +#endif
70831 +
70832 vma->vm_mm = mm;
70833 vma->vm_start = addr;
70834 vma->vm_end = addr + len;
70835 @@ -1266,8 +1397,9 @@ munmap_back:
70836 vma->vm_pgoff = pgoff;
70837 INIT_LIST_HEAD(&vma->anon_vma_chain);
70838
70839 + error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
70840 +
70841 if (file) {
70842 - error = -EINVAL;
70843 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
70844 goto free_vma;
70845 if (vm_flags & VM_DENYWRITE) {
70846 @@ -1281,6 +1413,19 @@ munmap_back:
70847 error = file->f_op->mmap(file, vma);
70848 if (error)
70849 goto unmap_and_free_vma;
70850 +
70851 +#ifdef CONFIG_PAX_SEGMEXEC
70852 + if (vma_m && (vm_flags & VM_EXECUTABLE))
70853 + added_exe_file_vma(mm);
70854 +#endif
70855 +
70856 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70857 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
70858 + vma->vm_flags |= VM_PAGEEXEC;
70859 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
70860 + }
70861 +#endif
70862 +
70863 if (vm_flags & VM_EXECUTABLE)
70864 added_exe_file_vma(mm);
70865
70866 @@ -1293,6 +1438,8 @@ munmap_back:
70867 pgoff = vma->vm_pgoff;
70868 vm_flags = vma->vm_flags;
70869 } else if (vm_flags & VM_SHARED) {
70870 + if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
70871 + goto free_vma;
70872 error = shmem_zero_setup(vma);
70873 if (error)
70874 goto free_vma;
70875 @@ -1316,6 +1463,11 @@ munmap_back:
70876 vma_link(mm, vma, prev, rb_link, rb_parent);
70877 file = vma->vm_file;
70878
70879 +#ifdef CONFIG_PAX_SEGMEXEC
70880 + if (vma_m)
70881 + BUG_ON(pax_mirror_vma(vma_m, vma));
70882 +#endif
70883 +
70884 /* Once vma denies write, undo our temporary denial count */
70885 if (correct_wcount)
70886 atomic_inc(&inode->i_writecount);
70887 @@ -1324,6 +1476,7 @@ out:
70888
70889 mm->total_vm += len >> PAGE_SHIFT;
70890 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
70891 + track_exec_limit(mm, addr, addr + len, vm_flags);
70892 if (vm_flags & VM_LOCKED) {
70893 if (!mlock_vma_pages_range(vma, addr, addr + len))
70894 mm->locked_vm += (len >> PAGE_SHIFT);
70895 @@ -1341,6 +1494,12 @@ unmap_and_free_vma:
70896 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
70897 charged = 0;
70898 free_vma:
70899 +
70900 +#ifdef CONFIG_PAX_SEGMEXEC
70901 + if (vma_m)
70902 + kmem_cache_free(vm_area_cachep, vma_m);
70903 +#endif
70904 +
70905 kmem_cache_free(vm_area_cachep, vma);
70906 unacct_error:
70907 if (charged)
70908 @@ -1348,6 +1507,44 @@ unacct_error:
70909 return error;
70910 }
70911
70912 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
70913 +{
70914 + if (!vma) {
70915 +#ifdef CONFIG_STACK_GROWSUP
70916 + if (addr > sysctl_heap_stack_gap)
70917 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
70918 + else
70919 + vma = find_vma(current->mm, 0);
70920 + if (vma && (vma->vm_flags & VM_GROWSUP))
70921 + return false;
70922 +#endif
70923 + return true;
70924 + }
70925 +
70926 + if (addr + len > vma->vm_start)
70927 + return false;
70928 +
70929 + if (vma->vm_flags & VM_GROWSDOWN)
70930 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
70931 +#ifdef CONFIG_STACK_GROWSUP
70932 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
70933 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
70934 +#endif
70935 +
70936 + return true;
70937 +}
70938 +
70939 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
70940 +{
70941 + if (vma->vm_start < len)
70942 + return -ENOMEM;
70943 + if (!(vma->vm_flags & VM_GROWSDOWN))
70944 + return vma->vm_start - len;
70945 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
70946 + return vma->vm_start - len - sysctl_heap_stack_gap;
70947 + return -ENOMEM;
70948 +}
70949 +
70950 /* Get an address range which is currently unmapped.
70951 * For shmat() with addr=0.
70952 *
70953 @@ -1374,18 +1571,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
70954 if (flags & MAP_FIXED)
70955 return addr;
70956
70957 +#ifdef CONFIG_PAX_RANDMMAP
70958 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
70959 +#endif
70960 +
70961 if (addr) {
70962 addr = PAGE_ALIGN(addr);
70963 - vma = find_vma(mm, addr);
70964 - if (TASK_SIZE - len >= addr &&
70965 - (!vma || addr + len <= vma->vm_start))
70966 - return addr;
70967 + if (TASK_SIZE - len >= addr) {
70968 + vma = find_vma(mm, addr);
70969 + if (check_heap_stack_gap(vma, addr, len))
70970 + return addr;
70971 + }
70972 }
70973 if (len > mm->cached_hole_size) {
70974 - start_addr = addr = mm->free_area_cache;
70975 + start_addr = addr = mm->free_area_cache;
70976 } else {
70977 - start_addr = addr = TASK_UNMAPPED_BASE;
70978 - mm->cached_hole_size = 0;
70979 + start_addr = addr = mm->mmap_base;
70980 + mm->cached_hole_size = 0;
70981 }
70982
70983 full_search:
70984 @@ -1396,34 +1598,40 @@ full_search:
70985 * Start a new search - just in case we missed
70986 * some holes.
70987 */
70988 - if (start_addr != TASK_UNMAPPED_BASE) {
70989 - addr = TASK_UNMAPPED_BASE;
70990 - start_addr = addr;
70991 + if (start_addr != mm->mmap_base) {
70992 + start_addr = addr = mm->mmap_base;
70993 mm->cached_hole_size = 0;
70994 goto full_search;
70995 }
70996 return -ENOMEM;
70997 }
70998 - if (!vma || addr + len <= vma->vm_start) {
70999 - /*
71000 - * Remember the place where we stopped the search:
71001 - */
71002 - mm->free_area_cache = addr + len;
71003 - return addr;
71004 - }
71005 + if (check_heap_stack_gap(vma, addr, len))
71006 + break;
71007 if (addr + mm->cached_hole_size < vma->vm_start)
71008 mm->cached_hole_size = vma->vm_start - addr;
71009 addr = vma->vm_end;
71010 }
71011 +
71012 + /*
71013 + * Remember the place where we stopped the search:
71014 + */
71015 + mm->free_area_cache = addr + len;
71016 + return addr;
71017 }
71018 #endif
71019
71020 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
71021 {
71022 +
71023 +#ifdef CONFIG_PAX_SEGMEXEC
71024 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71025 + return;
71026 +#endif
71027 +
71028 /*
71029 * Is this a new hole at the lowest possible address?
71030 */
71031 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
71032 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
71033 mm->free_area_cache = addr;
71034 mm->cached_hole_size = ~0UL;
71035 }
71036 @@ -1441,7 +1649,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71037 {
71038 struct vm_area_struct *vma;
71039 struct mm_struct *mm = current->mm;
71040 - unsigned long addr = addr0;
71041 + unsigned long base = mm->mmap_base, addr = addr0;
71042
71043 /* requested length too big for entire address space */
71044 if (len > TASK_SIZE)
71045 @@ -1450,13 +1658,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71046 if (flags & MAP_FIXED)
71047 return addr;
71048
71049 +#ifdef CONFIG_PAX_RANDMMAP
71050 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71051 +#endif
71052 +
71053 /* requesting a specific address */
71054 if (addr) {
71055 addr = PAGE_ALIGN(addr);
71056 - vma = find_vma(mm, addr);
71057 - if (TASK_SIZE - len >= addr &&
71058 - (!vma || addr + len <= vma->vm_start))
71059 - return addr;
71060 + if (TASK_SIZE - len >= addr) {
71061 + vma = find_vma(mm, addr);
71062 + if (check_heap_stack_gap(vma, addr, len))
71063 + return addr;
71064 + }
71065 }
71066
71067 /* check if free_area_cache is useful for us */
71068 @@ -1471,7 +1684,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71069 /* make sure it can fit in the remaining address space */
71070 if (addr > len) {
71071 vma = find_vma(mm, addr-len);
71072 - if (!vma || addr <= vma->vm_start)
71073 + if (check_heap_stack_gap(vma, addr - len, len))
71074 /* remember the address as a hint for next time */
71075 return (mm->free_area_cache = addr-len);
71076 }
71077 @@ -1488,7 +1701,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71078 * return with success:
71079 */
71080 vma = find_vma(mm, addr);
71081 - if (!vma || addr+len <= vma->vm_start)
71082 + if (check_heap_stack_gap(vma, addr, len))
71083 /* remember the address as a hint for next time */
71084 return (mm->free_area_cache = addr);
71085
71086 @@ -1497,8 +1710,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71087 mm->cached_hole_size = vma->vm_start - addr;
71088
71089 /* try just below the current vma->vm_start */
71090 - addr = vma->vm_start-len;
71091 - } while (len < vma->vm_start);
71092 + addr = skip_heap_stack_gap(vma, len);
71093 + } while (!IS_ERR_VALUE(addr));
71094
71095 bottomup:
71096 /*
71097 @@ -1507,13 +1720,21 @@ bottomup:
71098 * can happen with large stack limits and large mmap()
71099 * allocations.
71100 */
71101 + mm->mmap_base = TASK_UNMAPPED_BASE;
71102 +
71103 +#ifdef CONFIG_PAX_RANDMMAP
71104 + if (mm->pax_flags & MF_PAX_RANDMMAP)
71105 + mm->mmap_base += mm->delta_mmap;
71106 +#endif
71107 +
71108 + mm->free_area_cache = mm->mmap_base;
71109 mm->cached_hole_size = ~0UL;
71110 - mm->free_area_cache = TASK_UNMAPPED_BASE;
71111 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
71112 /*
71113 * Restore the topdown base:
71114 */
71115 - mm->free_area_cache = mm->mmap_base;
71116 + mm->mmap_base = base;
71117 + mm->free_area_cache = base;
71118 mm->cached_hole_size = ~0UL;
71119
71120 return addr;
71121 @@ -1522,6 +1743,12 @@ bottomup:
71122
71123 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71124 {
71125 +
71126 +#ifdef CONFIG_PAX_SEGMEXEC
71127 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71128 + return;
71129 +#endif
71130 +
71131 /*
71132 * Is this a new hole at the highest possible address?
71133 */
71134 @@ -1529,8 +1756,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71135 mm->free_area_cache = addr;
71136
71137 /* dont allow allocations above current base */
71138 - if (mm->free_area_cache > mm->mmap_base)
71139 + if (mm->free_area_cache > mm->mmap_base) {
71140 mm->free_area_cache = mm->mmap_base;
71141 + mm->cached_hole_size = ~0UL;
71142 + }
71143 }
71144
71145 unsigned long
71146 @@ -1603,40 +1832,50 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
71147
71148 EXPORT_SYMBOL(find_vma);
71149
71150 -/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
71151 +/*
71152 + * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
71153 + */
71154 struct vm_area_struct *
71155 find_vma_prev(struct mm_struct *mm, unsigned long addr,
71156 struct vm_area_struct **pprev)
71157 {
71158 - struct vm_area_struct *vma = NULL, *prev = NULL;
71159 - struct rb_node *rb_node;
71160 - if (!mm)
71161 - goto out;
71162 -
71163 - /* Guard against addr being lower than the first VMA */
71164 - vma = mm->mmap;
71165 -
71166 - /* Go through the RB tree quickly. */
71167 - rb_node = mm->mm_rb.rb_node;
71168 -
71169 - while (rb_node) {
71170 - struct vm_area_struct *vma_tmp;
71171 - vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
71172 -
71173 - if (addr < vma_tmp->vm_end) {
71174 - rb_node = rb_node->rb_left;
71175 - } else {
71176 - prev = vma_tmp;
71177 - if (!prev->vm_next || (addr < prev->vm_next->vm_end))
71178 - break;
71179 + struct vm_area_struct *vma;
71180 +
71181 + vma = find_vma(mm, addr);
71182 + if (vma) {
71183 + *pprev = vma->vm_prev;
71184 + } else {
71185 + struct rb_node *rb_node = mm->mm_rb.rb_node;
71186 + *pprev = NULL;
71187 + while (rb_node) {
71188 + *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
71189 rb_node = rb_node->rb_right;
71190 }
71191 }
71192 + return vma;
71193 +}
71194 +
71195 +#ifdef CONFIG_PAX_SEGMEXEC
71196 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
71197 +{
71198 + struct vm_area_struct *vma_m;
71199
71200 -out:
71201 - *pprev = prev;
71202 - return prev ? prev->vm_next : vma;
71203 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
71204 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
71205 + BUG_ON(vma->vm_mirror);
71206 + return NULL;
71207 + }
71208 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
71209 + vma_m = vma->vm_mirror;
71210 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
71211 + BUG_ON(vma->vm_file != vma_m->vm_file);
71212 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
71213 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
71214 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
71215 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
71216 + return vma_m;
71217 }
71218 +#endif
71219
71220 /*
71221 * Verify that the stack growth is acceptable and
71222 @@ -1654,6 +1893,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71223 return -ENOMEM;
71224
71225 /* Stack limit test */
71226 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
71227 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
71228 return -ENOMEM;
71229
71230 @@ -1664,6 +1904,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71231 locked = mm->locked_vm + grow;
71232 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
71233 limit >>= PAGE_SHIFT;
71234 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
71235 if (locked > limit && !capable(CAP_IPC_LOCK))
71236 return -ENOMEM;
71237 }
71238 @@ -1694,37 +1935,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71239 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
71240 * vma is the last one with address > vma->vm_end. Have to extend vma.
71241 */
71242 +#ifndef CONFIG_IA64
71243 +static
71244 +#endif
71245 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
71246 {
71247 int error;
71248 + bool locknext;
71249
71250 if (!(vma->vm_flags & VM_GROWSUP))
71251 return -EFAULT;
71252
71253 + /* Also guard against wrapping around to address 0. */
71254 + if (address < PAGE_ALIGN(address+1))
71255 + address = PAGE_ALIGN(address+1);
71256 + else
71257 + return -ENOMEM;
71258 +
71259 /*
71260 * We must make sure the anon_vma is allocated
71261 * so that the anon_vma locking is not a noop.
71262 */
71263 if (unlikely(anon_vma_prepare(vma)))
71264 return -ENOMEM;
71265 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
71266 + if (locknext && anon_vma_prepare(vma->vm_next))
71267 + return -ENOMEM;
71268 vma_lock_anon_vma(vma);
71269 + if (locknext)
71270 + vma_lock_anon_vma(vma->vm_next);
71271
71272 /*
71273 * vma->vm_start/vm_end cannot change under us because the caller
71274 * is required to hold the mmap_sem in read mode. We need the
71275 - * anon_vma lock to serialize against concurrent expand_stacks.
71276 - * Also guard against wrapping around to address 0.
71277 + * anon_vma locks to serialize against concurrent expand_stacks
71278 + * and expand_upwards.
71279 */
71280 - if (address < PAGE_ALIGN(address+4))
71281 - address = PAGE_ALIGN(address+4);
71282 - else {
71283 - vma_unlock_anon_vma(vma);
71284 - return -ENOMEM;
71285 - }
71286 error = 0;
71287
71288 /* Somebody else might have raced and expanded it already */
71289 - if (address > vma->vm_end) {
71290 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
71291 + error = -ENOMEM;
71292 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
71293 unsigned long size, grow;
71294
71295 size = address - vma->vm_start;
71296 @@ -1739,6 +1991,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
71297 }
71298 }
71299 }
71300 + if (locknext)
71301 + vma_unlock_anon_vma(vma->vm_next);
71302 vma_unlock_anon_vma(vma);
71303 khugepaged_enter_vma_merge(vma);
71304 return error;
71305 @@ -1752,6 +2006,8 @@ int expand_downwards(struct vm_area_struct *vma,
71306 unsigned long address)
71307 {
71308 int error;
71309 + bool lockprev = false;
71310 + struct vm_area_struct *prev;
71311
71312 /*
71313 * We must make sure the anon_vma is allocated
71314 @@ -1765,6 +2021,15 @@ int expand_downwards(struct vm_area_struct *vma,
71315 if (error)
71316 return error;
71317
71318 + prev = vma->vm_prev;
71319 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
71320 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
71321 +#endif
71322 + if (lockprev && anon_vma_prepare(prev))
71323 + return -ENOMEM;
71324 + if (lockprev)
71325 + vma_lock_anon_vma(prev);
71326 +
71327 vma_lock_anon_vma(vma);
71328
71329 /*
71330 @@ -1774,9 +2039,17 @@ int expand_downwards(struct vm_area_struct *vma,
71331 */
71332
71333 /* Somebody else might have raced and expanded it already */
71334 - if (address < vma->vm_start) {
71335 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
71336 + error = -ENOMEM;
71337 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
71338 unsigned long size, grow;
71339
71340 +#ifdef CONFIG_PAX_SEGMEXEC
71341 + struct vm_area_struct *vma_m;
71342 +
71343 + vma_m = pax_find_mirror_vma(vma);
71344 +#endif
71345 +
71346 size = vma->vm_end - address;
71347 grow = (vma->vm_start - address) >> PAGE_SHIFT;
71348
71349 @@ -1786,11 +2059,22 @@ int expand_downwards(struct vm_area_struct *vma,
71350 if (!error) {
71351 vma->vm_start = address;
71352 vma->vm_pgoff -= grow;
71353 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
71354 +
71355 +#ifdef CONFIG_PAX_SEGMEXEC
71356 + if (vma_m) {
71357 + vma_m->vm_start -= grow << PAGE_SHIFT;
71358 + vma_m->vm_pgoff -= grow;
71359 + }
71360 +#endif
71361 +
71362 perf_event_mmap(vma);
71363 }
71364 }
71365 }
71366 vma_unlock_anon_vma(vma);
71367 + if (lockprev)
71368 + vma_unlock_anon_vma(prev);
71369 khugepaged_enter_vma_merge(vma);
71370 return error;
71371 }
71372 @@ -1860,6 +2144,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
71373 do {
71374 long nrpages = vma_pages(vma);
71375
71376 +#ifdef CONFIG_PAX_SEGMEXEC
71377 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
71378 + vma = remove_vma(vma);
71379 + continue;
71380 + }
71381 +#endif
71382 +
71383 mm->total_vm -= nrpages;
71384 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
71385 vma = remove_vma(vma);
71386 @@ -1905,6 +2196,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
71387 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
71388 vma->vm_prev = NULL;
71389 do {
71390 +
71391 +#ifdef CONFIG_PAX_SEGMEXEC
71392 + if (vma->vm_mirror) {
71393 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
71394 + vma->vm_mirror->vm_mirror = NULL;
71395 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
71396 + vma->vm_mirror = NULL;
71397 + }
71398 +#endif
71399 +
71400 rb_erase(&vma->vm_rb, &mm->mm_rb);
71401 mm->map_count--;
71402 tail_vma = vma;
71403 @@ -1933,14 +2234,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71404 struct vm_area_struct *new;
71405 int err = -ENOMEM;
71406
71407 +#ifdef CONFIG_PAX_SEGMEXEC
71408 + struct vm_area_struct *vma_m, *new_m = NULL;
71409 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
71410 +#endif
71411 +
71412 if (is_vm_hugetlb_page(vma) && (addr &
71413 ~(huge_page_mask(hstate_vma(vma)))))
71414 return -EINVAL;
71415
71416 +#ifdef CONFIG_PAX_SEGMEXEC
71417 + vma_m = pax_find_mirror_vma(vma);
71418 +#endif
71419 +
71420 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71421 if (!new)
71422 goto out_err;
71423
71424 +#ifdef CONFIG_PAX_SEGMEXEC
71425 + if (vma_m) {
71426 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71427 + if (!new_m) {
71428 + kmem_cache_free(vm_area_cachep, new);
71429 + goto out_err;
71430 + }
71431 + }
71432 +#endif
71433 +
71434 /* most fields are the same, copy all, and then fixup */
71435 *new = *vma;
71436
71437 @@ -1953,6 +2273,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71438 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
71439 }
71440
71441 +#ifdef CONFIG_PAX_SEGMEXEC
71442 + if (vma_m) {
71443 + *new_m = *vma_m;
71444 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
71445 + new_m->vm_mirror = new;
71446 + new->vm_mirror = new_m;
71447 +
71448 + if (new_below)
71449 + new_m->vm_end = addr_m;
71450 + else {
71451 + new_m->vm_start = addr_m;
71452 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
71453 + }
71454 + }
71455 +#endif
71456 +
71457 pol = mpol_dup(vma_policy(vma));
71458 if (IS_ERR(pol)) {
71459 err = PTR_ERR(pol);
71460 @@ -1978,6 +2314,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71461 else
71462 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
71463
71464 +#ifdef CONFIG_PAX_SEGMEXEC
71465 + if (!err && vma_m) {
71466 + if (anon_vma_clone(new_m, vma_m))
71467 + goto out_free_mpol;
71468 +
71469 + mpol_get(pol);
71470 + vma_set_policy(new_m, pol);
71471 +
71472 + if (new_m->vm_file) {
71473 + get_file(new_m->vm_file);
71474 + if (vma_m->vm_flags & VM_EXECUTABLE)
71475 + added_exe_file_vma(mm);
71476 + }
71477 +
71478 + if (new_m->vm_ops && new_m->vm_ops->open)
71479 + new_m->vm_ops->open(new_m);
71480 +
71481 + if (new_below)
71482 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
71483 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
71484 + else
71485 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
71486 +
71487 + if (err) {
71488 + if (new_m->vm_ops && new_m->vm_ops->close)
71489 + new_m->vm_ops->close(new_m);
71490 + if (new_m->vm_file) {
71491 + if (vma_m->vm_flags & VM_EXECUTABLE)
71492 + removed_exe_file_vma(mm);
71493 + fput(new_m->vm_file);
71494 + }
71495 + mpol_put(pol);
71496 + }
71497 + }
71498 +#endif
71499 +
71500 /* Success. */
71501 if (!err)
71502 return 0;
71503 @@ -1990,10 +2362,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71504 removed_exe_file_vma(mm);
71505 fput(new->vm_file);
71506 }
71507 - unlink_anon_vmas(new);
71508 out_free_mpol:
71509 mpol_put(pol);
71510 out_free_vma:
71511 +
71512 +#ifdef CONFIG_PAX_SEGMEXEC
71513 + if (new_m) {
71514 + unlink_anon_vmas(new_m);
71515 + kmem_cache_free(vm_area_cachep, new_m);
71516 + }
71517 +#endif
71518 +
71519 + unlink_anon_vmas(new);
71520 kmem_cache_free(vm_area_cachep, new);
71521 out_err:
71522 return err;
71523 @@ -2006,6 +2386,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71524 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71525 unsigned long addr, int new_below)
71526 {
71527 +
71528 +#ifdef CONFIG_PAX_SEGMEXEC
71529 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
71530 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
71531 + if (mm->map_count >= sysctl_max_map_count-1)
71532 + return -ENOMEM;
71533 + } else
71534 +#endif
71535 +
71536 if (mm->map_count >= sysctl_max_map_count)
71537 return -ENOMEM;
71538
71539 @@ -2017,11 +2406,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71540 * work. This now handles partial unmappings.
71541 * Jeremy Fitzhardinge <jeremy@goop.org>
71542 */
71543 +#ifdef CONFIG_PAX_SEGMEXEC
71544 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71545 {
71546 + int ret = __do_munmap(mm, start, len);
71547 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
71548 + return ret;
71549 +
71550 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
71551 +}
71552 +
71553 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71554 +#else
71555 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71556 +#endif
71557 +{
71558 unsigned long end;
71559 struct vm_area_struct *vma, *prev, *last;
71560
71561 + /*
71562 + * mm->mmap_sem is required to protect against another thread
71563 + * changing the mappings in case we sleep.
71564 + */
71565 + verify_mm_writelocked(mm);
71566 +
71567 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
71568 return -EINVAL;
71569
71570 @@ -2096,6 +2504,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71571 /* Fix up all other VM information */
71572 remove_vma_list(mm, vma);
71573
71574 + track_exec_limit(mm, start, end, 0UL);
71575 +
71576 return 0;
71577 }
71578
71579 @@ -2108,22 +2518,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
71580
71581 profile_munmap(addr);
71582
71583 +#ifdef CONFIG_PAX_SEGMEXEC
71584 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
71585 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
71586 + return -EINVAL;
71587 +#endif
71588 +
71589 down_write(&mm->mmap_sem);
71590 ret = do_munmap(mm, addr, len);
71591 up_write(&mm->mmap_sem);
71592 return ret;
71593 }
71594
71595 -static inline void verify_mm_writelocked(struct mm_struct *mm)
71596 -{
71597 -#ifdef CONFIG_DEBUG_VM
71598 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
71599 - WARN_ON(1);
71600 - up_read(&mm->mmap_sem);
71601 - }
71602 -#endif
71603 -}
71604 -
71605 /*
71606 * this is really a simplified "do_mmap". it only handles
71607 * anonymous maps. eventually we may be able to do some
71608 @@ -2137,6 +2543,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71609 struct rb_node ** rb_link, * rb_parent;
71610 pgoff_t pgoff = addr >> PAGE_SHIFT;
71611 int error;
71612 + unsigned long charged;
71613
71614 len = PAGE_ALIGN(len);
71615 if (!len)
71616 @@ -2148,16 +2555,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71617
71618 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
71619
71620 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
71621 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
71622 + flags &= ~VM_EXEC;
71623 +
71624 +#ifdef CONFIG_PAX_MPROTECT
71625 + if (mm->pax_flags & MF_PAX_MPROTECT)
71626 + flags &= ~VM_MAYEXEC;
71627 +#endif
71628 +
71629 + }
71630 +#endif
71631 +
71632 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
71633 if (error & ~PAGE_MASK)
71634 return error;
71635
71636 + charged = len >> PAGE_SHIFT;
71637 +
71638 /*
71639 * mlock MCL_FUTURE?
71640 */
71641 if (mm->def_flags & VM_LOCKED) {
71642 unsigned long locked, lock_limit;
71643 - locked = len >> PAGE_SHIFT;
71644 + locked = charged;
71645 locked += mm->locked_vm;
71646 lock_limit = rlimit(RLIMIT_MEMLOCK);
71647 lock_limit >>= PAGE_SHIFT;
71648 @@ -2174,22 +2595,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71649 /*
71650 * Clear old maps. this also does some error checking for us
71651 */
71652 - munmap_back:
71653 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71654 if (vma && vma->vm_start < addr + len) {
71655 if (do_munmap(mm, addr, len))
71656 return -ENOMEM;
71657 - goto munmap_back;
71658 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71659 + BUG_ON(vma && vma->vm_start < addr + len);
71660 }
71661
71662 /* Check against address space limits *after* clearing old maps... */
71663 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
71664 + if (!may_expand_vm(mm, charged))
71665 return -ENOMEM;
71666
71667 if (mm->map_count > sysctl_max_map_count)
71668 return -ENOMEM;
71669
71670 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
71671 + if (security_vm_enough_memory(charged))
71672 return -ENOMEM;
71673
71674 /* Can we just expand an old private anonymous mapping? */
71675 @@ -2203,7 +2624,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71676 */
71677 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71678 if (!vma) {
71679 - vm_unacct_memory(len >> PAGE_SHIFT);
71680 + vm_unacct_memory(charged);
71681 return -ENOMEM;
71682 }
71683
71684 @@ -2217,11 +2638,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71685 vma_link(mm, vma, prev, rb_link, rb_parent);
71686 out:
71687 perf_event_mmap(vma);
71688 - mm->total_vm += len >> PAGE_SHIFT;
71689 + mm->total_vm += charged;
71690 if (flags & VM_LOCKED) {
71691 if (!mlock_vma_pages_range(vma, addr, addr + len))
71692 - mm->locked_vm += (len >> PAGE_SHIFT);
71693 + mm->locked_vm += charged;
71694 }
71695 + track_exec_limit(mm, addr, addr + len, flags);
71696 return addr;
71697 }
71698
71699 @@ -2268,8 +2690,10 @@ void exit_mmap(struct mm_struct *mm)
71700 * Walk the list again, actually closing and freeing it,
71701 * with preemption enabled, without holding any MM locks.
71702 */
71703 - while (vma)
71704 + while (vma) {
71705 + vma->vm_mirror = NULL;
71706 vma = remove_vma(vma);
71707 + }
71708
71709 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
71710 }
71711 @@ -2283,6 +2707,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
71712 struct vm_area_struct * __vma, * prev;
71713 struct rb_node ** rb_link, * rb_parent;
71714
71715 +#ifdef CONFIG_PAX_SEGMEXEC
71716 + struct vm_area_struct *vma_m = NULL;
71717 +#endif
71718 +
71719 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
71720 + return -EPERM;
71721 +
71722 /*
71723 * The vm_pgoff of a purely anonymous vma should be irrelevant
71724 * until its first write fault, when page's anon_vma and index
71725 @@ -2305,7 +2736,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
71726 if ((vma->vm_flags & VM_ACCOUNT) &&
71727 security_vm_enough_memory_mm(mm, vma_pages(vma)))
71728 return -ENOMEM;
71729 +
71730 +#ifdef CONFIG_PAX_SEGMEXEC
71731 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
71732 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71733 + if (!vma_m)
71734 + return -ENOMEM;
71735 + }
71736 +#endif
71737 +
71738 vma_link(mm, vma, prev, rb_link, rb_parent);
71739 +
71740 +#ifdef CONFIG_PAX_SEGMEXEC
71741 + if (vma_m)
71742 + BUG_ON(pax_mirror_vma(vma_m, vma));
71743 +#endif
71744 +
71745 return 0;
71746 }
71747
71748 @@ -2323,6 +2769,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
71749 struct rb_node **rb_link, *rb_parent;
71750 struct mempolicy *pol;
71751
71752 + BUG_ON(vma->vm_mirror);
71753 +
71754 /*
71755 * If anonymous vma has not yet been faulted, update new pgoff
71756 * to match new location, to increase its chance of merging.
71757 @@ -2373,6 +2821,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
71758 return NULL;
71759 }
71760
71761 +#ifdef CONFIG_PAX_SEGMEXEC
71762 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
71763 +{
71764 + struct vm_area_struct *prev_m;
71765 + struct rb_node **rb_link_m, *rb_parent_m;
71766 + struct mempolicy *pol_m;
71767 +
71768 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
71769 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
71770 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
71771 + *vma_m = *vma;
71772 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
71773 + if (anon_vma_clone(vma_m, vma))
71774 + return -ENOMEM;
71775 + pol_m = vma_policy(vma_m);
71776 + mpol_get(pol_m);
71777 + vma_set_policy(vma_m, pol_m);
71778 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
71779 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
71780 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
71781 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
71782 + if (vma_m->vm_file)
71783 + get_file(vma_m->vm_file);
71784 + if (vma_m->vm_ops && vma_m->vm_ops->open)
71785 + vma_m->vm_ops->open(vma_m);
71786 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
71787 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
71788 + vma_m->vm_mirror = vma;
71789 + vma->vm_mirror = vma_m;
71790 + return 0;
71791 +}
71792 +#endif
71793 +
71794 /*
71795 * Return true if the calling process may expand its vm space by the passed
71796 * number of pages
71797 @@ -2383,7 +2864,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
71798 unsigned long lim;
71799
71800 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
71801 -
71802 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
71803 if (cur + npages > lim)
71804 return 0;
71805 return 1;
71806 @@ -2454,6 +2935,22 @@ int install_special_mapping(struct mm_struct *mm,
71807 vma->vm_start = addr;
71808 vma->vm_end = addr + len;
71809
71810 +#ifdef CONFIG_PAX_MPROTECT
71811 + if (mm->pax_flags & MF_PAX_MPROTECT) {
71812 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
71813 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
71814 + return -EPERM;
71815 + if (!(vm_flags & VM_EXEC))
71816 + vm_flags &= ~VM_MAYEXEC;
71817 +#else
71818 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
71819 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
71820 +#endif
71821 + else
71822 + vm_flags &= ~VM_MAYWRITE;
71823 + }
71824 +#endif
71825 +
71826 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
71827 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
71828
71829 diff --git a/mm/mprotect.c b/mm/mprotect.c
71830 index 5a688a2..27e031c 100644
71831 --- a/mm/mprotect.c
71832 +++ b/mm/mprotect.c
71833 @@ -23,10 +23,16 @@
71834 #include <linux/mmu_notifier.h>
71835 #include <linux/migrate.h>
71836 #include <linux/perf_event.h>
71837 +
71838 +#ifdef CONFIG_PAX_MPROTECT
71839 +#include <linux/elf.h>
71840 +#endif
71841 +
71842 #include <asm/uaccess.h>
71843 #include <asm/pgtable.h>
71844 #include <asm/cacheflush.h>
71845 #include <asm/tlbflush.h>
71846 +#include <asm/mmu_context.h>
71847
71848 #ifndef pgprot_modify
71849 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
71850 @@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
71851 flush_tlb_range(vma, start, end);
71852 }
71853
71854 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
71855 +/* called while holding the mmap semaphor for writing except stack expansion */
71856 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
71857 +{
71858 + unsigned long oldlimit, newlimit = 0UL;
71859 +
71860 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
71861 + return;
71862 +
71863 + spin_lock(&mm->page_table_lock);
71864 + oldlimit = mm->context.user_cs_limit;
71865 + if ((prot & VM_EXEC) && oldlimit < end)
71866 + /* USER_CS limit moved up */
71867 + newlimit = end;
71868 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
71869 + /* USER_CS limit moved down */
71870 + newlimit = start;
71871 +
71872 + if (newlimit) {
71873 + mm->context.user_cs_limit = newlimit;
71874 +
71875 +#ifdef CONFIG_SMP
71876 + wmb();
71877 + cpus_clear(mm->context.cpu_user_cs_mask);
71878 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
71879 +#endif
71880 +
71881 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
71882 + }
71883 + spin_unlock(&mm->page_table_lock);
71884 + if (newlimit == end) {
71885 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
71886 +
71887 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
71888 + if (is_vm_hugetlb_page(vma))
71889 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
71890 + else
71891 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
71892 + }
71893 +}
71894 +#endif
71895 +
71896 int
71897 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71898 unsigned long start, unsigned long end, unsigned long newflags)
71899 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71900 int error;
71901 int dirty_accountable = 0;
71902
71903 +#ifdef CONFIG_PAX_SEGMEXEC
71904 + struct vm_area_struct *vma_m = NULL;
71905 + unsigned long start_m, end_m;
71906 +
71907 + start_m = start + SEGMEXEC_TASK_SIZE;
71908 + end_m = end + SEGMEXEC_TASK_SIZE;
71909 +#endif
71910 +
71911 if (newflags == oldflags) {
71912 *pprev = vma;
71913 return 0;
71914 }
71915
71916 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
71917 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
71918 +
71919 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
71920 + return -ENOMEM;
71921 +
71922 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
71923 + return -ENOMEM;
71924 + }
71925 +
71926 /*
71927 * If we make a private mapping writable we increase our commit;
71928 * but (without finer accounting) cannot reduce our commit if we
71929 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
71930 }
71931 }
71932
71933 +#ifdef CONFIG_PAX_SEGMEXEC
71934 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
71935 + if (start != vma->vm_start) {
71936 + error = split_vma(mm, vma, start, 1);
71937 + if (error)
71938 + goto fail;
71939 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
71940 + *pprev = (*pprev)->vm_next;
71941 + }
71942 +
71943 + if (end != vma->vm_end) {
71944 + error = split_vma(mm, vma, end, 0);
71945 + if (error)
71946 + goto fail;
71947 + }
71948 +
71949 + if (pax_find_mirror_vma(vma)) {
71950 + error = __do_munmap(mm, start_m, end_m - start_m);
71951 + if (error)
71952 + goto fail;
71953 + } else {
71954 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71955 + if (!vma_m) {
71956 + error = -ENOMEM;
71957 + goto fail;
71958 + }
71959 + vma->vm_flags = newflags;
71960 + error = pax_mirror_vma(vma_m, vma);
71961 + if (error) {
71962 + vma->vm_flags = oldflags;
71963 + goto fail;
71964 + }
71965 + }
71966 + }
71967 +#endif
71968 +
71969 /*
71970 * First try to merge with previous and/or next vma.
71971 */
71972 @@ -204,9 +306,21 @@ success:
71973 * vm_flags and vm_page_prot are protected by the mmap_sem
71974 * held in write mode.
71975 */
71976 +
71977 +#ifdef CONFIG_PAX_SEGMEXEC
71978 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
71979 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
71980 +#endif
71981 +
71982 vma->vm_flags = newflags;
71983 +
71984 +#ifdef CONFIG_PAX_MPROTECT
71985 + if (mm->binfmt && mm->binfmt->handle_mprotect)
71986 + mm->binfmt->handle_mprotect(vma, newflags);
71987 +#endif
71988 +
71989 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
71990 - vm_get_page_prot(newflags));
71991 + vm_get_page_prot(vma->vm_flags));
71992
71993 if (vma_wants_writenotify(vma)) {
71994 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
71995 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
71996 end = start + len;
71997 if (end <= start)
71998 return -ENOMEM;
71999 +
72000 +#ifdef CONFIG_PAX_SEGMEXEC
72001 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
72002 + if (end > SEGMEXEC_TASK_SIZE)
72003 + return -EINVAL;
72004 + } else
72005 +#endif
72006 +
72007 + if (end > TASK_SIZE)
72008 + return -EINVAL;
72009 +
72010 if (!arch_validate_prot(prot))
72011 return -EINVAL;
72012
72013 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72014 /*
72015 * Does the application expect PROT_READ to imply PROT_EXEC:
72016 */
72017 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
72018 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
72019 prot |= PROT_EXEC;
72020
72021 vm_flags = calc_vm_prot_bits(prot);
72022 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72023 if (start > vma->vm_start)
72024 prev = vma;
72025
72026 +#ifdef CONFIG_PAX_MPROTECT
72027 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
72028 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
72029 +#endif
72030 +
72031 for (nstart = start ; ; ) {
72032 unsigned long newflags;
72033
72034 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72035
72036 /* newflags >> 4 shift VM_MAY% in place of VM_% */
72037 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
72038 + if (prot & (PROT_WRITE | PROT_EXEC))
72039 + gr_log_rwxmprotect(vma->vm_file);
72040 +
72041 + error = -EACCES;
72042 + goto out;
72043 + }
72044 +
72045 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
72046 error = -EACCES;
72047 goto out;
72048 }
72049 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72050 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
72051 if (error)
72052 goto out;
72053 +
72054 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
72055 +
72056 nstart = tmp;
72057
72058 if (nstart < prev->vm_end)
72059 diff --git a/mm/mremap.c b/mm/mremap.c
72060 index d6959cb..18a402a 100644
72061 --- a/mm/mremap.c
72062 +++ b/mm/mremap.c
72063 @@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
72064 continue;
72065 pte = ptep_get_and_clear(mm, old_addr, old_pte);
72066 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
72067 +
72068 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72069 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
72070 + pte = pte_exprotect(pte);
72071 +#endif
72072 +
72073 set_pte_at(mm, new_addr, new_pte, pte);
72074 }
72075
72076 @@ -290,6 +296,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
72077 if (is_vm_hugetlb_page(vma))
72078 goto Einval;
72079
72080 +#ifdef CONFIG_PAX_SEGMEXEC
72081 + if (pax_find_mirror_vma(vma))
72082 + goto Einval;
72083 +#endif
72084 +
72085 /* We can't remap across vm area boundaries */
72086 if (old_len > vma->vm_end - addr)
72087 goto Efault;
72088 @@ -346,20 +357,25 @@ static unsigned long mremap_to(unsigned long addr,
72089 unsigned long ret = -EINVAL;
72090 unsigned long charged = 0;
72091 unsigned long map_flags;
72092 + unsigned long pax_task_size = TASK_SIZE;
72093
72094 if (new_addr & ~PAGE_MASK)
72095 goto out;
72096
72097 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
72098 +#ifdef CONFIG_PAX_SEGMEXEC
72099 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
72100 + pax_task_size = SEGMEXEC_TASK_SIZE;
72101 +#endif
72102 +
72103 + pax_task_size -= PAGE_SIZE;
72104 +
72105 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
72106 goto out;
72107
72108 /* Check if the location we're moving into overlaps the
72109 * old location at all, and fail if it does.
72110 */
72111 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
72112 - goto out;
72113 -
72114 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
72115 + if (addr + old_len > new_addr && new_addr + new_len > addr)
72116 goto out;
72117
72118 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72119 @@ -431,6 +447,7 @@ unsigned long do_mremap(unsigned long addr,
72120 struct vm_area_struct *vma;
72121 unsigned long ret = -EINVAL;
72122 unsigned long charged = 0;
72123 + unsigned long pax_task_size = TASK_SIZE;
72124
72125 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
72126 goto out;
72127 @@ -449,6 +466,17 @@ unsigned long do_mremap(unsigned long addr,
72128 if (!new_len)
72129 goto out;
72130
72131 +#ifdef CONFIG_PAX_SEGMEXEC
72132 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
72133 + pax_task_size = SEGMEXEC_TASK_SIZE;
72134 +#endif
72135 +
72136 + pax_task_size -= PAGE_SIZE;
72137 +
72138 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
72139 + old_len > pax_task_size || addr > pax_task_size-old_len)
72140 + goto out;
72141 +
72142 if (flags & MREMAP_FIXED) {
72143 if (flags & MREMAP_MAYMOVE)
72144 ret = mremap_to(addr, old_len, new_addr, new_len);
72145 @@ -498,6 +526,7 @@ unsigned long do_mremap(unsigned long addr,
72146 addr + new_len);
72147 }
72148 ret = addr;
72149 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
72150 goto out;
72151 }
72152 }
72153 @@ -524,7 +553,13 @@ unsigned long do_mremap(unsigned long addr,
72154 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72155 if (ret)
72156 goto out;
72157 +
72158 + map_flags = vma->vm_flags;
72159 ret = move_vma(vma, addr, old_len, new_len, new_addr);
72160 + if (!(ret & ~PAGE_MASK)) {
72161 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
72162 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
72163 + }
72164 }
72165 out:
72166 if (ret & ~PAGE_MASK)
72167 diff --git a/mm/nobootmem.c b/mm/nobootmem.c
72168 index 7fa41b4..6087460 100644
72169 --- a/mm/nobootmem.c
72170 +++ b/mm/nobootmem.c
72171 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
72172 unsigned long __init free_all_memory_core_early(int nodeid)
72173 {
72174 int i;
72175 - u64 start, end;
72176 + u64 start, end, startrange, endrange;
72177 unsigned long count = 0;
72178 - struct range *range = NULL;
72179 + struct range *range = NULL, rangerange = { 0, 0 };
72180 int nr_range;
72181
72182 nr_range = get_free_all_memory_range(&range, nodeid);
72183 + startrange = __pa(range) >> PAGE_SHIFT;
72184 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
72185
72186 for (i = 0; i < nr_range; i++) {
72187 start = range[i].start;
72188 end = range[i].end;
72189 + if (start <= endrange && startrange < end) {
72190 + BUG_ON(rangerange.start | rangerange.end);
72191 + rangerange = range[i];
72192 + continue;
72193 + }
72194 count += end - start;
72195 __free_pages_memory(start, end);
72196 }
72197 + start = rangerange.start;
72198 + end = rangerange.end;
72199 + count += end - start;
72200 + __free_pages_memory(start, end);
72201
72202 return count;
72203 }
72204 diff --git a/mm/nommu.c b/mm/nommu.c
72205 index f59e170..34e2a2b 100644
72206 --- a/mm/nommu.c
72207 +++ b/mm/nommu.c
72208 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
72209 int sysctl_overcommit_ratio = 50; /* default is 50% */
72210 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
72211 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
72212 -int heap_stack_gap = 0;
72213
72214 atomic_long_t mmap_pages_allocated;
72215
72216 @@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
72217 EXPORT_SYMBOL(find_vma);
72218
72219 /*
72220 - * find a VMA
72221 - * - we don't extend stack VMAs under NOMMU conditions
72222 - */
72223 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
72224 -{
72225 - return find_vma(mm, addr);
72226 -}
72227 -
72228 -/*
72229 * expand a stack to a given address
72230 * - not supported under NOMMU conditions
72231 */
72232 @@ -1555,6 +1545,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72233
72234 /* most fields are the same, copy all, and then fixup */
72235 *new = *vma;
72236 + INIT_LIST_HEAD(&new->anon_vma_chain);
72237 *region = *vma->vm_region;
72238 new->vm_region = region;
72239
72240 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
72241 index 485be89..c059ad3 100644
72242 --- a/mm/page_alloc.c
72243 +++ b/mm/page_alloc.c
72244 @@ -341,7 +341,7 @@ out:
72245 * This usage means that zero-order pages may not be compound.
72246 */
72247
72248 -static void free_compound_page(struct page *page)
72249 +void free_compound_page(struct page *page)
72250 {
72251 __free_pages_ok(page, compound_order(page));
72252 }
72253 @@ -654,6 +654,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72254 int i;
72255 int bad = 0;
72256
72257 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
72258 + unsigned long index = 1UL << order;
72259 +#endif
72260 +
72261 trace_mm_page_free_direct(page, order);
72262 kmemcheck_free_shadow(page, order);
72263
72264 @@ -669,6 +673,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72265 debug_check_no_obj_freed(page_address(page),
72266 PAGE_SIZE << order);
72267 }
72268 +
72269 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
72270 + for (; index; --index)
72271 + sanitize_highpage(page + index - 1);
72272 +#endif
72273 +
72274 arch_free_page(page, order);
72275 kernel_map_pages(page, 1 << order, 0);
72276
72277 @@ -784,8 +794,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
72278 arch_alloc_page(page, order);
72279 kernel_map_pages(page, 1 << order, 1);
72280
72281 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
72282 if (gfp_flags & __GFP_ZERO)
72283 prep_zero_page(page, order, gfp_flags);
72284 +#endif
72285
72286 if (order && (gfp_flags & __GFP_COMP))
72287 prep_compound_page(page, order);
72288 @@ -3357,7 +3369,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
72289 unsigned long pfn;
72290
72291 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
72292 +#ifdef CONFIG_X86_32
72293 + /* boot failures in VMware 8 on 32bit vanilla since
72294 + this change */
72295 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
72296 +#else
72297 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
72298 +#endif
72299 return 1;
72300 }
72301 return 0;
72302 diff --git a/mm/percpu.c b/mm/percpu.c
72303 index 716eb4a..8d10419 100644
72304 --- a/mm/percpu.c
72305 +++ b/mm/percpu.c
72306 @@ -121,7 +121,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
72307 static unsigned int pcpu_high_unit_cpu __read_mostly;
72308
72309 /* the address of the first chunk which starts with the kernel static area */
72310 -void *pcpu_base_addr __read_mostly;
72311 +void *pcpu_base_addr __read_only;
72312 EXPORT_SYMBOL_GPL(pcpu_base_addr);
72313
72314 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
72315 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
72316 index e920aa3..137702a 100644
72317 --- a/mm/process_vm_access.c
72318 +++ b/mm/process_vm_access.c
72319 @@ -13,6 +13,7 @@
72320 #include <linux/uio.h>
72321 #include <linux/sched.h>
72322 #include <linux/highmem.h>
72323 +#include <linux/security.h>
72324 #include <linux/ptrace.h>
72325 #include <linux/slab.h>
72326 #include <linux/syscalls.h>
72327 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
72328 size_t iov_l_curr_offset = 0;
72329 ssize_t iov_len;
72330
72331 + return -ENOSYS; // PaX: until properly audited
72332 +
72333 /*
72334 * Work out how many pages of struct pages we're going to need
72335 * when eventually calling get_user_pages
72336 */
72337 for (i = 0; i < riovcnt; i++) {
72338 iov_len = rvec[i].iov_len;
72339 - if (iov_len > 0) {
72340 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
72341 - + iov_len)
72342 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
72343 - / PAGE_SIZE + 1;
72344 - nr_pages = max(nr_pages, nr_pages_iov);
72345 - }
72346 + if (iov_len <= 0)
72347 + continue;
72348 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
72349 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
72350 + nr_pages = max(nr_pages, nr_pages_iov);
72351 }
72352
72353 if (nr_pages == 0)
72354 @@ -298,23 +299,23 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
72355 goto free_proc_pages;
72356 }
72357
72358 - task_lock(task);
72359 - if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
72360 - task_unlock(task);
72361 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
72362 rc = -EPERM;
72363 goto put_task_struct;
72364 }
72365 - mm = task->mm;
72366
72367 - if (!mm || (task->flags & PF_KTHREAD)) {
72368 - task_unlock(task);
72369 - rc = -EINVAL;
72370 + mm = mm_access(task, PTRACE_MODE_ATTACH);
72371 + if (!mm || IS_ERR(mm)) {
72372 + rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
72373 + /*
72374 + * Explicitly map EACCES to EPERM as EPERM is a more a
72375 + * appropriate error code for process_vw_readv/writev
72376 + */
72377 + if (rc == -EACCES)
72378 + rc = -EPERM;
72379 goto put_task_struct;
72380 }
72381
72382 - atomic_inc(&mm->mm_users);
72383 - task_unlock(task);
72384 -
72385 for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
72386 rc = process_vm_rw_single_vec(
72387 (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
72388 diff --git a/mm/rmap.c b/mm/rmap.c
72389 index a4fd368..e0ffec7 100644
72390 --- a/mm/rmap.c
72391 +++ b/mm/rmap.c
72392 @@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72393 struct anon_vma *anon_vma = vma->anon_vma;
72394 struct anon_vma_chain *avc;
72395
72396 +#ifdef CONFIG_PAX_SEGMEXEC
72397 + struct anon_vma_chain *avc_m = NULL;
72398 +#endif
72399 +
72400 might_sleep();
72401 if (unlikely(!anon_vma)) {
72402 struct mm_struct *mm = vma->vm_mm;
72403 @@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72404 if (!avc)
72405 goto out_enomem;
72406
72407 +#ifdef CONFIG_PAX_SEGMEXEC
72408 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
72409 + if (!avc_m)
72410 + goto out_enomem_free_avc;
72411 +#endif
72412 +
72413 anon_vma = find_mergeable_anon_vma(vma);
72414 allocated = NULL;
72415 if (!anon_vma) {
72416 @@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72417 /* page_table_lock to protect against threads */
72418 spin_lock(&mm->page_table_lock);
72419 if (likely(!vma->anon_vma)) {
72420 +
72421 +#ifdef CONFIG_PAX_SEGMEXEC
72422 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
72423 +
72424 + if (vma_m) {
72425 + BUG_ON(vma_m->anon_vma);
72426 + vma_m->anon_vma = anon_vma;
72427 + avc_m->anon_vma = anon_vma;
72428 + avc_m->vma = vma;
72429 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
72430 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
72431 + avc_m = NULL;
72432 + }
72433 +#endif
72434 +
72435 vma->anon_vma = anon_vma;
72436 avc->anon_vma = anon_vma;
72437 avc->vma = vma;
72438 @@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72439
72440 if (unlikely(allocated))
72441 put_anon_vma(allocated);
72442 +
72443 +#ifdef CONFIG_PAX_SEGMEXEC
72444 + if (unlikely(avc_m))
72445 + anon_vma_chain_free(avc_m);
72446 +#endif
72447 +
72448 if (unlikely(avc))
72449 anon_vma_chain_free(avc);
72450 }
72451 return 0;
72452
72453 out_enomem_free_avc:
72454 +
72455 +#ifdef CONFIG_PAX_SEGMEXEC
72456 + if (avc_m)
72457 + anon_vma_chain_free(avc_m);
72458 +#endif
72459 +
72460 anon_vma_chain_free(avc);
72461 out_enomem:
72462 return -ENOMEM;
72463 @@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
72464 * Attach the anon_vmas from src to dst.
72465 * Returns 0 on success, -ENOMEM on failure.
72466 */
72467 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
72468 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
72469 {
72470 struct anon_vma_chain *avc, *pavc;
72471 struct anon_vma *root = NULL;
72472 @@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
72473 * the corresponding VMA in the parent process is attached to.
72474 * Returns 0 on success, non-zero on failure.
72475 */
72476 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
72477 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
72478 {
72479 struct anon_vma_chain *avc;
72480 struct anon_vma *anon_vma;
72481 diff --git a/mm/shmem.c b/mm/shmem.c
72482 index 6c253f7..367e20a 100644
72483 --- a/mm/shmem.c
72484 +++ b/mm/shmem.c
72485 @@ -31,7 +31,7 @@
72486 #include <linux/export.h>
72487 #include <linux/swap.h>
72488
72489 -static struct vfsmount *shm_mnt;
72490 +struct vfsmount *shm_mnt;
72491
72492 #ifdef CONFIG_SHMEM
72493 /*
72494 @@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
72495 #define BOGO_DIRENT_SIZE 20
72496
72497 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
72498 -#define SHORT_SYMLINK_LEN 128
72499 +#define SHORT_SYMLINK_LEN 64
72500
72501 struct shmem_xattr {
72502 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
72503 @@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
72504 int err = -ENOMEM;
72505
72506 /* Round up to L1_CACHE_BYTES to resist false sharing */
72507 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
72508 - L1_CACHE_BYTES), GFP_KERNEL);
72509 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
72510 if (!sbinfo)
72511 return -ENOMEM;
72512
72513 diff --git a/mm/slab.c b/mm/slab.c
72514 index 83311c9a..fcf8f86 100644
72515 --- a/mm/slab.c
72516 +++ b/mm/slab.c
72517 @@ -151,7 +151,7 @@
72518
72519 /* Legal flag mask for kmem_cache_create(). */
72520 #if DEBUG
72521 -# define CREATE_MASK (SLAB_RED_ZONE | \
72522 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
72523 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
72524 SLAB_CACHE_DMA | \
72525 SLAB_STORE_USER | \
72526 @@ -159,7 +159,7 @@
72527 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
72528 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
72529 #else
72530 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
72531 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
72532 SLAB_CACHE_DMA | \
72533 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
72534 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
72535 @@ -288,7 +288,7 @@ struct kmem_list3 {
72536 * Need this for bootstrapping a per node allocator.
72537 */
72538 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
72539 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
72540 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
72541 #define CACHE_CACHE 0
72542 #define SIZE_AC MAX_NUMNODES
72543 #define SIZE_L3 (2 * MAX_NUMNODES)
72544 @@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
72545 if ((x)->max_freeable < i) \
72546 (x)->max_freeable = i; \
72547 } while (0)
72548 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
72549 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
72550 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
72551 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
72552 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
72553 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
72554 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
72555 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
72556 #else
72557 #define STATS_INC_ACTIVE(x) do { } while (0)
72558 #define STATS_DEC_ACTIVE(x) do { } while (0)
72559 @@ -538,7 +538,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
72560 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
72561 */
72562 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
72563 - const struct slab *slab, void *obj)
72564 + const struct slab *slab, const void *obj)
72565 {
72566 u32 offset = (obj - slab->s_mem);
72567 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
72568 @@ -564,7 +564,7 @@ struct cache_names {
72569 static struct cache_names __initdata cache_names[] = {
72570 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
72571 #include <linux/kmalloc_sizes.h>
72572 - {NULL,}
72573 + {NULL}
72574 #undef CACHE
72575 };
72576
72577 @@ -1572,7 +1572,7 @@ void __init kmem_cache_init(void)
72578 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
72579 sizes[INDEX_AC].cs_size,
72580 ARCH_KMALLOC_MINALIGN,
72581 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72582 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72583 NULL);
72584
72585 if (INDEX_AC != INDEX_L3) {
72586 @@ -1580,7 +1580,7 @@ void __init kmem_cache_init(void)
72587 kmem_cache_create(names[INDEX_L3].name,
72588 sizes[INDEX_L3].cs_size,
72589 ARCH_KMALLOC_MINALIGN,
72590 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72591 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72592 NULL);
72593 }
72594
72595 @@ -1598,7 +1598,7 @@ void __init kmem_cache_init(void)
72596 sizes->cs_cachep = kmem_cache_create(names->name,
72597 sizes->cs_size,
72598 ARCH_KMALLOC_MINALIGN,
72599 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72600 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72601 NULL);
72602 }
72603 #ifdef CONFIG_ZONE_DMA
72604 @@ -4322,10 +4322,10 @@ static int s_show(struct seq_file *m, void *p)
72605 }
72606 /* cpu stats */
72607 {
72608 - unsigned long allochit = atomic_read(&cachep->allochit);
72609 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
72610 - unsigned long freehit = atomic_read(&cachep->freehit);
72611 - unsigned long freemiss = atomic_read(&cachep->freemiss);
72612 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
72613 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
72614 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
72615 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
72616
72617 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
72618 allochit, allocmiss, freehit, freemiss);
72619 @@ -4584,13 +4584,62 @@ static int __init slab_proc_init(void)
72620 {
72621 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
72622 #ifdef CONFIG_DEBUG_SLAB_LEAK
72623 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
72624 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
72625 #endif
72626 return 0;
72627 }
72628 module_init(slab_proc_init);
72629 #endif
72630
72631 +void check_object_size(const void *ptr, unsigned long n, bool to)
72632 +{
72633 +
72634 +#ifdef CONFIG_PAX_USERCOPY
72635 + struct page *page;
72636 + struct kmem_cache *cachep = NULL;
72637 + struct slab *slabp;
72638 + unsigned int objnr;
72639 + unsigned long offset;
72640 + const char *type;
72641 +
72642 + if (!n)
72643 + return;
72644 +
72645 + type = "<null>";
72646 + if (ZERO_OR_NULL_PTR(ptr))
72647 + goto report;
72648 +
72649 + if (!virt_addr_valid(ptr))
72650 + return;
72651 +
72652 + page = virt_to_head_page(ptr);
72653 +
72654 + type = "<process stack>";
72655 + if (!PageSlab(page)) {
72656 + if (object_is_on_stack(ptr, n) == -1)
72657 + goto report;
72658 + return;
72659 + }
72660 +
72661 + cachep = page_get_cache(page);
72662 + type = cachep->name;
72663 + if (!(cachep->flags & SLAB_USERCOPY))
72664 + goto report;
72665 +
72666 + slabp = page_get_slab(page);
72667 + objnr = obj_to_index(cachep, slabp, ptr);
72668 + BUG_ON(objnr >= cachep->num);
72669 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
72670 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
72671 + return;
72672 +
72673 +report:
72674 + pax_report_usercopy(ptr, n, to, type);
72675 +#endif
72676 +
72677 +}
72678 +EXPORT_SYMBOL(check_object_size);
72679 +
72680 /**
72681 * ksize - get the actual amount of memory allocated for a given object
72682 * @objp: Pointer to the object
72683 diff --git a/mm/slob.c b/mm/slob.c
72684 index 8105be4..e045f96 100644
72685 --- a/mm/slob.c
72686 +++ b/mm/slob.c
72687 @@ -29,7 +29,7 @@
72688 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
72689 * alloc_pages() directly, allocating compound pages so the page order
72690 * does not have to be separately tracked, and also stores the exact
72691 - * allocation size in page->private so that it can be used to accurately
72692 + * allocation size in slob_page->size so that it can be used to accurately
72693 * provide ksize(). These objects are detected in kfree() because slob_page()
72694 * is false for them.
72695 *
72696 @@ -58,6 +58,7 @@
72697 */
72698
72699 #include <linux/kernel.h>
72700 +#include <linux/sched.h>
72701 #include <linux/slab.h>
72702 #include <linux/mm.h>
72703 #include <linux/swap.h> /* struct reclaim_state */
72704 @@ -102,7 +103,8 @@ struct slob_page {
72705 unsigned long flags; /* mandatory */
72706 atomic_t _count; /* mandatory */
72707 slobidx_t units; /* free units left in page */
72708 - unsigned long pad[2];
72709 + unsigned long pad[1];
72710 + unsigned long size; /* size when >=PAGE_SIZE */
72711 slob_t *free; /* first free slob_t in page */
72712 struct list_head list; /* linked list of free pages */
72713 };
72714 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
72715 */
72716 static inline int is_slob_page(struct slob_page *sp)
72717 {
72718 - return PageSlab((struct page *)sp);
72719 + return PageSlab((struct page *)sp) && !sp->size;
72720 }
72721
72722 static inline void set_slob_page(struct slob_page *sp)
72723 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
72724
72725 static inline struct slob_page *slob_page(const void *addr)
72726 {
72727 - return (struct slob_page *)virt_to_page(addr);
72728 + return (struct slob_page *)virt_to_head_page(addr);
72729 }
72730
72731 /*
72732 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
72733 /*
72734 * Return the size of a slob block.
72735 */
72736 -static slobidx_t slob_units(slob_t *s)
72737 +static slobidx_t slob_units(const slob_t *s)
72738 {
72739 if (s->units > 0)
72740 return s->units;
72741 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
72742 /*
72743 * Return the next free slob block pointer after this one.
72744 */
72745 -static slob_t *slob_next(slob_t *s)
72746 +static slob_t *slob_next(const slob_t *s)
72747 {
72748 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
72749 slobidx_t next;
72750 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
72751 /*
72752 * Returns true if s is the last free block in its page.
72753 */
72754 -static int slob_last(slob_t *s)
72755 +static int slob_last(const slob_t *s)
72756 {
72757 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
72758 }
72759 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
72760 if (!page)
72761 return NULL;
72762
72763 + set_slob_page(page);
72764 return page_address(page);
72765 }
72766
72767 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
72768 if (!b)
72769 return NULL;
72770 sp = slob_page(b);
72771 - set_slob_page(sp);
72772
72773 spin_lock_irqsave(&slob_lock, flags);
72774 sp->units = SLOB_UNITS(PAGE_SIZE);
72775 sp->free = b;
72776 + sp->size = 0;
72777 INIT_LIST_HEAD(&sp->list);
72778 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
72779 set_slob_page_free(sp, slob_list);
72780 @@ -476,10 +479,9 @@ out:
72781 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
72782 */
72783
72784 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72785 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
72786 {
72787 - unsigned int *m;
72788 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72789 + slob_t *m;
72790 void *ret;
72791
72792 gfp &= gfp_allowed_mask;
72793 @@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72794
72795 if (!m)
72796 return NULL;
72797 - *m = size;
72798 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
72799 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
72800 + m[0].units = size;
72801 + m[1].units = align;
72802 ret = (void *)m + align;
72803
72804 trace_kmalloc_node(_RET_IP_, ret,
72805 @@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72806 gfp |= __GFP_COMP;
72807 ret = slob_new_pages(gfp, order, node);
72808 if (ret) {
72809 - struct page *page;
72810 - page = virt_to_page(ret);
72811 - page->private = size;
72812 + struct slob_page *sp;
72813 + sp = slob_page(ret);
72814 + sp->size = size;
72815 }
72816
72817 trace_kmalloc_node(_RET_IP_, ret,
72818 size, PAGE_SIZE << order, gfp, node);
72819 }
72820
72821 - kmemleak_alloc(ret, size, 1, gfp);
72822 + return ret;
72823 +}
72824 +
72825 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
72826 +{
72827 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72828 + void *ret = __kmalloc_node_align(size, gfp, node, align);
72829 +
72830 + if (!ZERO_OR_NULL_PTR(ret))
72831 + kmemleak_alloc(ret, size, 1, gfp);
72832 return ret;
72833 }
72834 EXPORT_SYMBOL(__kmalloc_node);
72835 @@ -533,13 +547,92 @@ void kfree(const void *block)
72836 sp = slob_page(block);
72837 if (is_slob_page(sp)) {
72838 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72839 - unsigned int *m = (unsigned int *)(block - align);
72840 - slob_free(m, *m + align);
72841 - } else
72842 + slob_t *m = (slob_t *)(block - align);
72843 + slob_free(m, m[0].units + align);
72844 + } else {
72845 + clear_slob_page(sp);
72846 + free_slob_page(sp);
72847 + sp->size = 0;
72848 put_page(&sp->page);
72849 + }
72850 }
72851 EXPORT_SYMBOL(kfree);
72852
72853 +void check_object_size(const void *ptr, unsigned long n, bool to)
72854 +{
72855 +
72856 +#ifdef CONFIG_PAX_USERCOPY
72857 + struct slob_page *sp;
72858 + const slob_t *free;
72859 + const void *base;
72860 + unsigned long flags;
72861 + const char *type;
72862 +
72863 + if (!n)
72864 + return;
72865 +
72866 + type = "<null>";
72867 + if (ZERO_OR_NULL_PTR(ptr))
72868 + goto report;
72869 +
72870 + if (!virt_addr_valid(ptr))
72871 + return;
72872 +
72873 + type = "<process stack>";
72874 + sp = slob_page(ptr);
72875 + if (!PageSlab((struct page *)sp)) {
72876 + if (object_is_on_stack(ptr, n) == -1)
72877 + goto report;
72878 + return;
72879 + }
72880 +
72881 + type = "<slob>";
72882 + if (sp->size) {
72883 + base = page_address(&sp->page);
72884 + if (base <= ptr && n <= sp->size - (ptr - base))
72885 + return;
72886 + goto report;
72887 + }
72888 +
72889 + /* some tricky double walking to find the chunk */
72890 + spin_lock_irqsave(&slob_lock, flags);
72891 + base = (void *)((unsigned long)ptr & PAGE_MASK);
72892 + free = sp->free;
72893 +
72894 + while (!slob_last(free) && (void *)free <= ptr) {
72895 + base = free + slob_units(free);
72896 + free = slob_next(free);
72897 + }
72898 +
72899 + while (base < (void *)free) {
72900 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
72901 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
72902 + int offset;
72903 +
72904 + if (ptr < base + align)
72905 + break;
72906 +
72907 + offset = ptr - base - align;
72908 + if (offset >= m) {
72909 + base += size;
72910 + continue;
72911 + }
72912 +
72913 + if (n > m - offset)
72914 + break;
72915 +
72916 + spin_unlock_irqrestore(&slob_lock, flags);
72917 + return;
72918 + }
72919 +
72920 + spin_unlock_irqrestore(&slob_lock, flags);
72921 +report:
72922 + pax_report_usercopy(ptr, n, to, type);
72923 +#endif
72924 +
72925 +}
72926 +EXPORT_SYMBOL(check_object_size);
72927 +
72928 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
72929 size_t ksize(const void *block)
72930 {
72931 @@ -552,10 +645,10 @@ size_t ksize(const void *block)
72932 sp = slob_page(block);
72933 if (is_slob_page(sp)) {
72934 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
72935 - unsigned int *m = (unsigned int *)(block - align);
72936 - return SLOB_UNITS(*m) * SLOB_UNIT;
72937 + slob_t *m = (slob_t *)(block - align);
72938 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
72939 } else
72940 - return sp->page.private;
72941 + return sp->size;
72942 }
72943 EXPORT_SYMBOL(ksize);
72944
72945 @@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
72946 {
72947 struct kmem_cache *c;
72948
72949 +#ifdef CONFIG_PAX_USERCOPY
72950 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
72951 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
72952 +#else
72953 c = slob_alloc(sizeof(struct kmem_cache),
72954 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
72955 +#endif
72956
72957 if (c) {
72958 c->name = name;
72959 @@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
72960
72961 lockdep_trace_alloc(flags);
72962
72963 +#ifdef CONFIG_PAX_USERCOPY
72964 + b = __kmalloc_node_align(c->size, flags, node, c->align);
72965 +#else
72966 if (c->size < PAGE_SIZE) {
72967 b = slob_alloc(c->size, flags, c->align, node);
72968 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
72969 SLOB_UNITS(c->size) * SLOB_UNIT,
72970 flags, node);
72971 } else {
72972 + struct slob_page *sp;
72973 +
72974 b = slob_new_pages(flags, get_order(c->size), node);
72975 + sp = slob_page(b);
72976 + sp->size = c->size;
72977 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
72978 PAGE_SIZE << get_order(c->size),
72979 flags, node);
72980 }
72981 +#endif
72982
72983 if (c->ctor)
72984 c->ctor(b);
72985 @@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
72986
72987 static void __kmem_cache_free(void *b, int size)
72988 {
72989 - if (size < PAGE_SIZE)
72990 + struct slob_page *sp = slob_page(b);
72991 +
72992 + if (is_slob_page(sp))
72993 slob_free(b, size);
72994 - else
72995 + else {
72996 + clear_slob_page(sp);
72997 + free_slob_page(sp);
72998 + sp->size = 0;
72999 slob_free_pages(b, get_order(size));
73000 + }
73001 }
73002
73003 static void kmem_rcu_free(struct rcu_head *head)
73004 @@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
73005
73006 void kmem_cache_free(struct kmem_cache *c, void *b)
73007 {
73008 + int size = c->size;
73009 +
73010 +#ifdef CONFIG_PAX_USERCOPY
73011 + if (size + c->align < PAGE_SIZE) {
73012 + size += c->align;
73013 + b -= c->align;
73014 + }
73015 +#endif
73016 +
73017 kmemleak_free_recursive(b, c->flags);
73018 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
73019 struct slob_rcu *slob_rcu;
73020 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
73021 - slob_rcu->size = c->size;
73022 + slob_rcu = b + (size - sizeof(struct slob_rcu));
73023 + slob_rcu->size = size;
73024 call_rcu(&slob_rcu->head, kmem_rcu_free);
73025 } else {
73026 - __kmem_cache_free(b, c->size);
73027 + __kmem_cache_free(b, size);
73028 }
73029
73030 +#ifdef CONFIG_PAX_USERCOPY
73031 + trace_kfree(_RET_IP_, b);
73032 +#else
73033 trace_kmem_cache_free(_RET_IP_, b);
73034 +#endif
73035 +
73036 }
73037 EXPORT_SYMBOL(kmem_cache_free);
73038
73039 diff --git a/mm/slub.c b/mm/slub.c
73040 index 1a919f0..1739c9b 100644
73041 --- a/mm/slub.c
73042 +++ b/mm/slub.c
73043 @@ -208,7 +208,7 @@ struct track {
73044
73045 enum track_item { TRACK_ALLOC, TRACK_FREE };
73046
73047 -#ifdef CONFIG_SYSFS
73048 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73049 static int sysfs_slab_add(struct kmem_cache *);
73050 static int sysfs_slab_alias(struct kmem_cache *, const char *);
73051 static void sysfs_slab_remove(struct kmem_cache *);
73052 @@ -530,7 +530,7 @@ static void print_track(const char *s, struct track *t)
73053 if (!t->addr)
73054 return;
73055
73056 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
73057 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
73058 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
73059 #ifdef CONFIG_STACKTRACE
73060 {
73061 @@ -2559,6 +2559,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
73062
73063 page = virt_to_head_page(x);
73064
73065 + BUG_ON(!PageSlab(page));
73066 +
73067 slab_free(s, page, x, _RET_IP_);
73068
73069 trace_kmem_cache_free(_RET_IP_, x);
73070 @@ -2592,7 +2594,7 @@ static int slub_min_objects;
73071 * Merge control. If this is set then no merging of slab caches will occur.
73072 * (Could be removed. This was introduced to pacify the merge skeptics.)
73073 */
73074 -static int slub_nomerge;
73075 +static int slub_nomerge = 1;
73076
73077 /*
73078 * Calculate the order of allocation given an slab object size.
73079 @@ -3042,7 +3044,7 @@ static int kmem_cache_open(struct kmem_cache *s,
73080 else
73081 s->cpu_partial = 30;
73082
73083 - s->refcount = 1;
73084 + atomic_set(&s->refcount, 1);
73085 #ifdef CONFIG_NUMA
73086 s->remote_node_defrag_ratio = 1000;
73087 #endif
73088 @@ -3146,8 +3148,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
73089 void kmem_cache_destroy(struct kmem_cache *s)
73090 {
73091 down_write(&slub_lock);
73092 - s->refcount--;
73093 - if (!s->refcount) {
73094 + if (atomic_dec_and_test(&s->refcount)) {
73095 list_del(&s->list);
73096 up_write(&slub_lock);
73097 if (kmem_cache_close(s)) {
73098 @@ -3358,6 +3359,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
73099 EXPORT_SYMBOL(__kmalloc_node);
73100 #endif
73101
73102 +void check_object_size(const void *ptr, unsigned long n, bool to)
73103 +{
73104 +
73105 +#ifdef CONFIG_PAX_USERCOPY
73106 + struct page *page;
73107 + struct kmem_cache *s = NULL;
73108 + unsigned long offset;
73109 + const char *type;
73110 +
73111 + if (!n)
73112 + return;
73113 +
73114 + type = "<null>";
73115 + if (ZERO_OR_NULL_PTR(ptr))
73116 + goto report;
73117 +
73118 + if (!virt_addr_valid(ptr))
73119 + return;
73120 +
73121 + page = virt_to_head_page(ptr);
73122 +
73123 + type = "<process stack>";
73124 + if (!PageSlab(page)) {
73125 + if (object_is_on_stack(ptr, n) == -1)
73126 + goto report;
73127 + return;
73128 + }
73129 +
73130 + s = page->slab;
73131 + type = s->name;
73132 + if (!(s->flags & SLAB_USERCOPY))
73133 + goto report;
73134 +
73135 + offset = (ptr - page_address(page)) % s->size;
73136 + if (offset <= s->objsize && n <= s->objsize - offset)
73137 + return;
73138 +
73139 +report:
73140 + pax_report_usercopy(ptr, n, to, type);
73141 +#endif
73142 +
73143 +}
73144 +EXPORT_SYMBOL(check_object_size);
73145 +
73146 size_t ksize(const void *object)
73147 {
73148 struct page *page;
73149 @@ -3632,7 +3677,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
73150 int node;
73151
73152 list_add(&s->list, &slab_caches);
73153 - s->refcount = -1;
73154 + atomic_set(&s->refcount, -1);
73155
73156 for_each_node_state(node, N_NORMAL_MEMORY) {
73157 struct kmem_cache_node *n = get_node(s, node);
73158 @@ -3749,17 +3794,17 @@ void __init kmem_cache_init(void)
73159
73160 /* Caches that are not of the two-to-the-power-of size */
73161 if (KMALLOC_MIN_SIZE <= 32) {
73162 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
73163 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
73164 caches++;
73165 }
73166
73167 if (KMALLOC_MIN_SIZE <= 64) {
73168 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
73169 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
73170 caches++;
73171 }
73172
73173 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
73174 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
73175 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
73176 caches++;
73177 }
73178
73179 @@ -3827,7 +3872,7 @@ static int slab_unmergeable(struct kmem_cache *s)
73180 /*
73181 * We may have set a slab to be unmergeable during bootstrap.
73182 */
73183 - if (s->refcount < 0)
73184 + if (atomic_read(&s->refcount) < 0)
73185 return 1;
73186
73187 return 0;
73188 @@ -3886,7 +3931,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73189 down_write(&slub_lock);
73190 s = find_mergeable(size, align, flags, name, ctor);
73191 if (s) {
73192 - s->refcount++;
73193 + atomic_inc(&s->refcount);
73194 /*
73195 * Adjust the object sizes so that we clear
73196 * the complete object on kzalloc.
73197 @@ -3895,7 +3940,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73198 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
73199
73200 if (sysfs_slab_alias(s, name)) {
73201 - s->refcount--;
73202 + atomic_dec(&s->refcount);
73203 goto err;
73204 }
73205 up_write(&slub_lock);
73206 @@ -4023,7 +4068,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
73207 }
73208 #endif
73209
73210 -#ifdef CONFIG_SYSFS
73211 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73212 static int count_inuse(struct page *page)
73213 {
73214 return page->inuse;
73215 @@ -4410,12 +4455,12 @@ static void resiliency_test(void)
73216 validate_slab_cache(kmalloc_caches[9]);
73217 }
73218 #else
73219 -#ifdef CONFIG_SYSFS
73220 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73221 static void resiliency_test(void) {};
73222 #endif
73223 #endif
73224
73225 -#ifdef CONFIG_SYSFS
73226 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73227 enum slab_stat_type {
73228 SL_ALL, /* All slabs */
73229 SL_PARTIAL, /* Only partially allocated slabs */
73230 @@ -4656,7 +4701,7 @@ SLAB_ATTR_RO(ctor);
73231
73232 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
73233 {
73234 - return sprintf(buf, "%d\n", s->refcount - 1);
73235 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
73236 }
73237 SLAB_ATTR_RO(aliases);
73238
73239 @@ -5223,6 +5268,7 @@ static char *create_unique_id(struct kmem_cache *s)
73240 return name;
73241 }
73242
73243 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73244 static int sysfs_slab_add(struct kmem_cache *s)
73245 {
73246 int err;
73247 @@ -5285,6 +5331,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
73248 kobject_del(&s->kobj);
73249 kobject_put(&s->kobj);
73250 }
73251 +#endif
73252
73253 /*
73254 * Need to buffer aliases during bootup until sysfs becomes
73255 @@ -5298,6 +5345,7 @@ struct saved_alias {
73256
73257 static struct saved_alias *alias_list;
73258
73259 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73260 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73261 {
73262 struct saved_alias *al;
73263 @@ -5320,6 +5368,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73264 alias_list = al;
73265 return 0;
73266 }
73267 +#endif
73268
73269 static int __init slab_sysfs_init(void)
73270 {
73271 diff --git a/mm/swap.c b/mm/swap.c
73272 index 55b266d..a532537 100644
73273 --- a/mm/swap.c
73274 +++ b/mm/swap.c
73275 @@ -31,6 +31,7 @@
73276 #include <linux/backing-dev.h>
73277 #include <linux/memcontrol.h>
73278 #include <linux/gfp.h>
73279 +#include <linux/hugetlb.h>
73280
73281 #include "internal.h"
73282
73283 @@ -71,6 +72,8 @@ static void __put_compound_page(struct page *page)
73284
73285 __page_cache_release(page);
73286 dtor = get_compound_page_dtor(page);
73287 + if (!PageHuge(page))
73288 + BUG_ON(dtor != free_compound_page);
73289 (*dtor)(page);
73290 }
73291
73292 diff --git a/mm/swapfile.c b/mm/swapfile.c
73293 index b1cd120..aaae885 100644
73294 --- a/mm/swapfile.c
73295 +++ b/mm/swapfile.c
73296 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
73297
73298 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
73299 /* Activity counter to indicate that a swapon or swapoff has occurred */
73300 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
73301 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
73302
73303 static inline unsigned char swap_count(unsigned char ent)
73304 {
73305 @@ -1670,7 +1670,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
73306 }
73307 filp_close(swap_file, NULL);
73308 err = 0;
73309 - atomic_inc(&proc_poll_event);
73310 + atomic_inc_unchecked(&proc_poll_event);
73311 wake_up_interruptible(&proc_poll_wait);
73312
73313 out_dput:
73314 @@ -1686,8 +1686,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
73315
73316 poll_wait(file, &proc_poll_wait, wait);
73317
73318 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
73319 - seq->poll_event = atomic_read(&proc_poll_event);
73320 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
73321 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
73322 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
73323 }
73324
73325 @@ -1785,7 +1785,7 @@ static int swaps_open(struct inode *inode, struct file *file)
73326 return ret;
73327
73328 seq = file->private_data;
73329 - seq->poll_event = atomic_read(&proc_poll_event);
73330 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
73331 return 0;
73332 }
73333
73334 @@ -2123,7 +2123,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
73335 (p->flags & SWP_DISCARDABLE) ? "D" : "");
73336
73337 mutex_unlock(&swapon_mutex);
73338 - atomic_inc(&proc_poll_event);
73339 + atomic_inc_unchecked(&proc_poll_event);
73340 wake_up_interruptible(&proc_poll_wait);
73341
73342 if (S_ISREG(inode->i_mode))
73343 diff --git a/mm/util.c b/mm/util.c
73344 index 136ac4f..5117eef 100644
73345 --- a/mm/util.c
73346 +++ b/mm/util.c
73347 @@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
73348 * allocated buffer. Use this if you don't want to free the buffer immediately
73349 * like, for example, with RCU.
73350 */
73351 +#undef __krealloc
73352 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
73353 {
73354 void *ret;
73355 @@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
73356 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
73357 * %NULL pointer, the object pointed to is freed.
73358 */
73359 +#undef krealloc
73360 void *krealloc(const void *p, size_t new_size, gfp_t flags)
73361 {
73362 void *ret;
73363 @@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
73364 void arch_pick_mmap_layout(struct mm_struct *mm)
73365 {
73366 mm->mmap_base = TASK_UNMAPPED_BASE;
73367 +
73368 +#ifdef CONFIG_PAX_RANDMMAP
73369 + if (mm->pax_flags & MF_PAX_RANDMMAP)
73370 + mm->mmap_base += mm->delta_mmap;
73371 +#endif
73372 +
73373 mm->get_unmapped_area = arch_get_unmapped_area;
73374 mm->unmap_area = arch_unmap_area;
73375 }
73376 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
73377 index 27be2f0..0aef2c2 100644
73378 --- a/mm/vmalloc.c
73379 +++ b/mm/vmalloc.c
73380 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
73381
73382 pte = pte_offset_kernel(pmd, addr);
73383 do {
73384 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
73385 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
73386 +
73387 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73388 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
73389 + BUG_ON(!pte_exec(*pte));
73390 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
73391 + continue;
73392 + }
73393 +#endif
73394 +
73395 + {
73396 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
73397 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
73398 + }
73399 } while (pte++, addr += PAGE_SIZE, addr != end);
73400 }
73401
73402 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
73403 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
73404 {
73405 pte_t *pte;
73406 + int ret = -ENOMEM;
73407
73408 /*
73409 * nr is a running index into the array which helps higher level
73410 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
73411 pte = pte_alloc_kernel(pmd, addr);
73412 if (!pte)
73413 return -ENOMEM;
73414 +
73415 + pax_open_kernel();
73416 do {
73417 struct page *page = pages[*nr];
73418
73419 - if (WARN_ON(!pte_none(*pte)))
73420 - return -EBUSY;
73421 - if (WARN_ON(!page))
73422 - return -ENOMEM;
73423 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73424 + if (pgprot_val(prot) & _PAGE_NX)
73425 +#endif
73426 +
73427 + if (WARN_ON(!pte_none(*pte))) {
73428 + ret = -EBUSY;
73429 + goto out;
73430 + }
73431 + if (WARN_ON(!page)) {
73432 + ret = -ENOMEM;
73433 + goto out;
73434 + }
73435 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
73436 (*nr)++;
73437 } while (pte++, addr += PAGE_SIZE, addr != end);
73438 - return 0;
73439 + ret = 0;
73440 +out:
73441 + pax_close_kernel();
73442 + return ret;
73443 }
73444
73445 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
73446 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
73447 * and fall back on vmalloc() if that fails. Others
73448 * just put it in the vmalloc space.
73449 */
73450 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
73451 +#ifdef CONFIG_MODULES
73452 +#ifdef MODULES_VADDR
73453 unsigned long addr = (unsigned long)x;
73454 if (addr >= MODULES_VADDR && addr < MODULES_END)
73455 return 1;
73456 #endif
73457 +
73458 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73459 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
73460 + return 1;
73461 +#endif
73462 +
73463 +#endif
73464 +
73465 return is_vmalloc_addr(x);
73466 }
73467
73468 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
73469
73470 if (!pgd_none(*pgd)) {
73471 pud_t *pud = pud_offset(pgd, addr);
73472 +#ifdef CONFIG_X86
73473 + if (!pud_large(*pud))
73474 +#endif
73475 if (!pud_none(*pud)) {
73476 pmd_t *pmd = pmd_offset(pud, addr);
73477 +#ifdef CONFIG_X86
73478 + if (!pmd_large(*pmd))
73479 +#endif
73480 if (!pmd_none(*pmd)) {
73481 pte_t *ptep, pte;
73482
73483 @@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
73484 struct vm_struct *area;
73485
73486 BUG_ON(in_interrupt());
73487 +
73488 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73489 + if (flags & VM_KERNEXEC) {
73490 + if (start != VMALLOC_START || end != VMALLOC_END)
73491 + return NULL;
73492 + start = (unsigned long)MODULES_EXEC_VADDR;
73493 + end = (unsigned long)MODULES_EXEC_END;
73494 + }
73495 +#endif
73496 +
73497 if (flags & VM_IOREMAP) {
73498 int bit = fls(size);
73499
73500 @@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned int count,
73501 if (count > totalram_pages)
73502 return NULL;
73503
73504 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73505 + if (!(pgprot_val(prot) & _PAGE_NX))
73506 + flags |= VM_KERNEXEC;
73507 +#endif
73508 +
73509 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
73510 __builtin_return_address(0));
73511 if (!area)
73512 @@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
73513 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
73514 goto fail;
73515
73516 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73517 + if (!(pgprot_val(prot) & _PAGE_NX))
73518 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
73519 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
73520 + else
73521 +#endif
73522 +
73523 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
73524 start, end, node, gfp_mask, caller);
73525 if (!area)
73526 @@ -1679,6 +1741,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
73527 gfp_mask, prot, node, caller);
73528 }
73529
73530 +#undef __vmalloc
73531 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
73532 {
73533 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
73534 @@ -1702,6 +1765,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
73535 * For tight control over page level allocator and protection flags
73536 * use __vmalloc() instead.
73537 */
73538 +#undef vmalloc
73539 void *vmalloc(unsigned long size)
73540 {
73541 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
73542 @@ -1718,6 +1782,7 @@ EXPORT_SYMBOL(vmalloc);
73543 * For tight control over page level allocator and protection flags
73544 * use __vmalloc() instead.
73545 */
73546 +#undef vzalloc
73547 void *vzalloc(unsigned long size)
73548 {
73549 return __vmalloc_node_flags(size, -1,
73550 @@ -1732,6 +1797,7 @@ EXPORT_SYMBOL(vzalloc);
73551 * The resulting memory area is zeroed so it can be mapped to userspace
73552 * without leaking data.
73553 */
73554 +#undef vmalloc_user
73555 void *vmalloc_user(unsigned long size)
73556 {
73557 struct vm_struct *area;
73558 @@ -1759,6 +1825,7 @@ EXPORT_SYMBOL(vmalloc_user);
73559 * For tight control over page level allocator and protection flags
73560 * use __vmalloc() instead.
73561 */
73562 +#undef vmalloc_node
73563 void *vmalloc_node(unsigned long size, int node)
73564 {
73565 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
73566 @@ -1778,6 +1845,7 @@ EXPORT_SYMBOL(vmalloc_node);
73567 * For tight control over page level allocator and protection flags
73568 * use __vmalloc_node() instead.
73569 */
73570 +#undef vzalloc_node
73571 void *vzalloc_node(unsigned long size, int node)
73572 {
73573 return __vmalloc_node_flags(size, node,
73574 @@ -1800,10 +1868,10 @@ EXPORT_SYMBOL(vzalloc_node);
73575 * For tight control over page level allocator and protection flags
73576 * use __vmalloc() instead.
73577 */
73578 -
73579 +#undef vmalloc_exec
73580 void *vmalloc_exec(unsigned long size)
73581 {
73582 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
73583 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
73584 -1, __builtin_return_address(0));
73585 }
73586
73587 @@ -1822,6 +1890,7 @@ void *vmalloc_exec(unsigned long size)
73588 * Allocate enough 32bit PA addressable pages to cover @size from the
73589 * page level allocator and map them into contiguous kernel virtual space.
73590 */
73591 +#undef vmalloc_32
73592 void *vmalloc_32(unsigned long size)
73593 {
73594 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
73595 @@ -1836,6 +1905,7 @@ EXPORT_SYMBOL(vmalloc_32);
73596 * The resulting memory area is 32bit addressable and zeroed so it can be
73597 * mapped to userspace without leaking data.
73598 */
73599 +#undef vmalloc_32_user
73600 void *vmalloc_32_user(unsigned long size)
73601 {
73602 struct vm_struct *area;
73603 @@ -2098,6 +2168,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
73604 unsigned long uaddr = vma->vm_start;
73605 unsigned long usize = vma->vm_end - vma->vm_start;
73606
73607 + BUG_ON(vma->vm_mirror);
73608 +
73609 if ((PAGE_SIZE-1) & (unsigned long)addr)
73610 return -EINVAL;
73611
73612 diff --git a/mm/vmstat.c b/mm/vmstat.c
73613 index 8fd603b..cf0d930 100644
73614 --- a/mm/vmstat.c
73615 +++ b/mm/vmstat.c
73616 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
73617 *
73618 * vm_stat contains the global counters
73619 */
73620 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
73621 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
73622 EXPORT_SYMBOL(vm_stat);
73623
73624 #ifdef CONFIG_SMP
73625 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
73626 v = p->vm_stat_diff[i];
73627 p->vm_stat_diff[i] = 0;
73628 local_irq_restore(flags);
73629 - atomic_long_add(v, &zone->vm_stat[i]);
73630 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
73631 global_diff[i] += v;
73632 #ifdef CONFIG_NUMA
73633 /* 3 seconds idle till flush */
73634 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
73635
73636 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
73637 if (global_diff[i])
73638 - atomic_long_add(global_diff[i], &vm_stat[i]);
73639 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
73640 }
73641
73642 #endif
73643 @@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
73644 start_cpu_timer(cpu);
73645 #endif
73646 #ifdef CONFIG_PROC_FS
73647 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
73648 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
73649 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
73650 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
73651 + {
73652 + mode_t gr_mode = S_IRUGO;
73653 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
73654 + gr_mode = S_IRUSR;
73655 +#endif
73656 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
73657 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
73658 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
73659 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
73660 +#else
73661 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
73662 +#endif
73663 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
73664 + }
73665 #endif
73666 return 0;
73667 }
73668 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
73669 index 5471628..cef8398 100644
73670 --- a/net/8021q/vlan.c
73671 +++ b/net/8021q/vlan.c
73672 @@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
73673 err = -EPERM;
73674 if (!capable(CAP_NET_ADMIN))
73675 break;
73676 - if ((args.u.name_type >= 0) &&
73677 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
73678 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
73679 struct vlan_net *vn;
73680
73681 vn = net_generic(net, vlan_net_id);
73682 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
73683 index fdfdb57..38d368c 100644
73684 --- a/net/9p/trans_fd.c
73685 +++ b/net/9p/trans_fd.c
73686 @@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
73687 oldfs = get_fs();
73688 set_fs(get_ds());
73689 /* The cast to a user pointer is valid due to the set_fs() */
73690 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
73691 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
73692 set_fs(oldfs);
73693
73694 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
73695 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
73696 index f41f026..fe76ea8 100644
73697 --- a/net/atm/atm_misc.c
73698 +++ b/net/atm/atm_misc.c
73699 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
73700 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
73701 return 1;
73702 atm_return(vcc, truesize);
73703 - atomic_inc(&vcc->stats->rx_drop);
73704 + atomic_inc_unchecked(&vcc->stats->rx_drop);
73705 return 0;
73706 }
73707 EXPORT_SYMBOL(atm_charge);
73708 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
73709 }
73710 }
73711 atm_return(vcc, guess);
73712 - atomic_inc(&vcc->stats->rx_drop);
73713 + atomic_inc_unchecked(&vcc->stats->rx_drop);
73714 return NULL;
73715 }
73716 EXPORT_SYMBOL(atm_alloc_charge);
73717 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
73718
73719 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
73720 {
73721 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
73722 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
73723 __SONET_ITEMS
73724 #undef __HANDLE_ITEM
73725 }
73726 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
73727
73728 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
73729 {
73730 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
73731 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
73732 __SONET_ITEMS
73733 #undef __HANDLE_ITEM
73734 }
73735 diff --git a/net/atm/lec.h b/net/atm/lec.h
73736 index dfc0719..47c5322 100644
73737 --- a/net/atm/lec.h
73738 +++ b/net/atm/lec.h
73739 @@ -48,7 +48,7 @@ struct lane2_ops {
73740 const u8 *tlvs, u32 sizeoftlvs);
73741 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
73742 const u8 *tlvs, u32 sizeoftlvs);
73743 -};
73744 +} __no_const;
73745
73746 /*
73747 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
73748 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
73749 index 0919a88..a23d54e 100644
73750 --- a/net/atm/mpc.h
73751 +++ b/net/atm/mpc.h
73752 @@ -33,7 +33,7 @@ struct mpoa_client {
73753 struct mpc_parameters parameters; /* parameters for this client */
73754
73755 const struct net_device_ops *old_ops;
73756 - struct net_device_ops new_ops;
73757 + net_device_ops_no_const new_ops;
73758 };
73759
73760
73761 diff --git a/net/atm/proc.c b/net/atm/proc.c
73762 index 0d020de..011c7bb 100644
73763 --- a/net/atm/proc.c
73764 +++ b/net/atm/proc.c
73765 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
73766 const struct k_atm_aal_stats *stats)
73767 {
73768 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
73769 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
73770 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
73771 - atomic_read(&stats->rx_drop));
73772 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
73773 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
73774 + atomic_read_unchecked(&stats->rx_drop));
73775 }
73776
73777 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
73778 diff --git a/net/atm/resources.c b/net/atm/resources.c
73779 index 23f45ce..c748f1a 100644
73780 --- a/net/atm/resources.c
73781 +++ b/net/atm/resources.c
73782 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
73783 static void copy_aal_stats(struct k_atm_aal_stats *from,
73784 struct atm_aal_stats *to)
73785 {
73786 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
73787 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
73788 __AAL_STAT_ITEMS
73789 #undef __HANDLE_ITEM
73790 }
73791 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
73792 static void subtract_aal_stats(struct k_atm_aal_stats *from,
73793 struct atm_aal_stats *to)
73794 {
73795 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
73796 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
73797 __AAL_STAT_ITEMS
73798 #undef __HANDLE_ITEM
73799 }
73800 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
73801 index 3512e25..2b33401 100644
73802 --- a/net/batman-adv/bat_iv_ogm.c
73803 +++ b/net/batman-adv/bat_iv_ogm.c
73804 @@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
73805
73806 /* change sequence number to network order */
73807 batman_ogm_packet->seqno =
73808 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
73809 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
73810
73811 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
73812 batman_ogm_packet->tt_crc = htons((uint16_t)
73813 @@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
73814 else
73815 batman_ogm_packet->gw_flags = NO_FLAGS;
73816
73817 - atomic_inc(&hard_iface->seqno);
73818 + atomic_inc_unchecked(&hard_iface->seqno);
73819
73820 slide_own_bcast_window(hard_iface);
73821 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
73822 @@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
73823 return;
73824
73825 /* could be changed by schedule_own_packet() */
73826 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
73827 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
73828
73829 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
73830
73831 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
73832 index 7704df4..beb4e16 100644
73833 --- a/net/batman-adv/hard-interface.c
73834 +++ b/net/batman-adv/hard-interface.c
73835 @@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
73836 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
73837 dev_add_pack(&hard_iface->batman_adv_ptype);
73838
73839 - atomic_set(&hard_iface->seqno, 1);
73840 - atomic_set(&hard_iface->frag_seqno, 1);
73841 + atomic_set_unchecked(&hard_iface->seqno, 1);
73842 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
73843 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
73844 hard_iface->net_dev->name);
73845
73846 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
73847 index f9cc957..efd9dae 100644
73848 --- a/net/batman-adv/soft-interface.c
73849 +++ b/net/batman-adv/soft-interface.c
73850 @@ -634,7 +634,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
73851
73852 /* set broadcast sequence number */
73853 bcast_packet->seqno =
73854 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
73855 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
73856
73857 add_bcast_packet_to_list(bat_priv, skb, 1);
73858
73859 @@ -828,7 +828,7 @@ struct net_device *softif_create(const char *name)
73860 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
73861
73862 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
73863 - atomic_set(&bat_priv->bcast_seqno, 1);
73864 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
73865 atomic_set(&bat_priv->ttvn, 0);
73866 atomic_set(&bat_priv->tt_local_changes, 0);
73867 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
73868 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
73869 index ab8d0fe..ceba3fd 100644
73870 --- a/net/batman-adv/types.h
73871 +++ b/net/batman-adv/types.h
73872 @@ -38,8 +38,8 @@ struct hard_iface {
73873 int16_t if_num;
73874 char if_status;
73875 struct net_device *net_dev;
73876 - atomic_t seqno;
73877 - atomic_t frag_seqno;
73878 + atomic_unchecked_t seqno;
73879 + atomic_unchecked_t frag_seqno;
73880 unsigned char *packet_buff;
73881 int packet_len;
73882 struct kobject *hardif_obj;
73883 @@ -154,7 +154,7 @@ struct bat_priv {
73884 atomic_t orig_interval; /* uint */
73885 atomic_t hop_penalty; /* uint */
73886 atomic_t log_level; /* uint */
73887 - atomic_t bcast_seqno;
73888 + atomic_unchecked_t bcast_seqno;
73889 atomic_t bcast_queue_left;
73890 atomic_t batman_queue_left;
73891 atomic_t ttvn; /* translation table version number */
73892 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
73893 index 07d1c1d..7e9bea9 100644
73894 --- a/net/batman-adv/unicast.c
73895 +++ b/net/batman-adv/unicast.c
73896 @@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
73897 frag1->flags = UNI_FRAG_HEAD | large_tail;
73898 frag2->flags = large_tail;
73899
73900 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
73901 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
73902 frag1->seqno = htons(seqno - 1);
73903 frag2->seqno = htons(seqno);
73904
73905 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
73906 index c1c597e..05ebb40 100644
73907 --- a/net/bluetooth/hci_conn.c
73908 +++ b/net/bluetooth/hci_conn.c
73909 @@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
73910 memset(&cp, 0, sizeof(cp));
73911
73912 cp.handle = cpu_to_le16(conn->handle);
73913 - memcpy(cp.ltk, ltk, sizeof(ltk));
73914 + memcpy(cp.ltk, ltk, sizeof(cp.ltk));
73915
73916 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
73917 }
73918 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
73919 index 17b5b1c..826d872 100644
73920 --- a/net/bluetooth/l2cap_core.c
73921 +++ b/net/bluetooth/l2cap_core.c
73922 @@ -2176,8 +2176,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
73923 break;
73924
73925 case L2CAP_CONF_RFC:
73926 - if (olen == sizeof(rfc))
73927 - memcpy(&rfc, (void *)val, olen);
73928 + if (olen != sizeof(rfc))
73929 + break;
73930 +
73931 + memcpy(&rfc, (void *)val, olen);
73932
73933 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
73934 rfc.mode != chan->mode)
73935 @@ -2265,8 +2267,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
73936
73937 switch (type) {
73938 case L2CAP_CONF_RFC:
73939 - if (olen == sizeof(rfc))
73940 - memcpy(&rfc, (void *)val, olen);
73941 + if (olen != sizeof(rfc))
73942 + break;
73943 +
73944 + memcpy(&rfc, (void *)val, olen);
73945 goto done;
73946 }
73947 }
73948 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
73949 index a5f4e57..910ee6d 100644
73950 --- a/net/bridge/br_multicast.c
73951 +++ b/net/bridge/br_multicast.c
73952 @@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
73953 nexthdr = ip6h->nexthdr;
73954 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
73955
73956 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
73957 + if (nexthdr != IPPROTO_ICMPV6)
73958 return 0;
73959
73960 /* Okay, we found ICMPv6 header */
73961 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
73962 index 5864cc4..121f3a30 100644
73963 --- a/net/bridge/netfilter/ebtables.c
73964 +++ b/net/bridge/netfilter/ebtables.c
73965 @@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
73966 tmp.valid_hooks = t->table->valid_hooks;
73967 }
73968 mutex_unlock(&ebt_mutex);
73969 - if (copy_to_user(user, &tmp, *len) != 0){
73970 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
73971 BUGPRINT("c2u Didn't work\n");
73972 ret = -EFAULT;
73973 break;
73974 diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
73975 index a986280..13444a1 100644
73976 --- a/net/caif/caif_socket.c
73977 +++ b/net/caif/caif_socket.c
73978 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
73979 #ifdef CONFIG_DEBUG_FS
73980 struct debug_fs_counter {
73981 atomic_t caif_nr_socks;
73982 - atomic_t caif_sock_create;
73983 - atomic_t num_connect_req;
73984 - atomic_t num_connect_resp;
73985 - atomic_t num_connect_fail_resp;
73986 - atomic_t num_disconnect;
73987 - atomic_t num_remote_shutdown_ind;
73988 - atomic_t num_tx_flow_off_ind;
73989 - atomic_t num_tx_flow_on_ind;
73990 - atomic_t num_rx_flow_off;
73991 - atomic_t num_rx_flow_on;
73992 + atomic_unchecked_t caif_sock_create;
73993 + atomic_unchecked_t num_connect_req;
73994 + atomic_unchecked_t num_connect_resp;
73995 + atomic_unchecked_t num_connect_fail_resp;
73996 + atomic_unchecked_t num_disconnect;
73997 + atomic_unchecked_t num_remote_shutdown_ind;
73998 + atomic_unchecked_t num_tx_flow_off_ind;
73999 + atomic_unchecked_t num_tx_flow_on_ind;
74000 + atomic_unchecked_t num_rx_flow_off;
74001 + atomic_unchecked_t num_rx_flow_on;
74002 };
74003 static struct debug_fs_counter cnt;
74004 #define dbfs_atomic_inc(v) atomic_inc_return(v)
74005 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
74006 #define dbfs_atomic_dec(v) atomic_dec_return(v)
74007 #else
74008 #define dbfs_atomic_inc(v) 0
74009 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74010 atomic_read(&cf_sk->sk.sk_rmem_alloc),
74011 sk_rcvbuf_lowwater(cf_sk));
74012 set_rx_flow_off(cf_sk);
74013 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
74014 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
74015 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
74016 }
74017
74018 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74019 set_rx_flow_off(cf_sk);
74020 if (net_ratelimit())
74021 pr_debug("sending flow OFF due to rmem_schedule\n");
74022 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
74023 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
74024 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
74025 }
74026 skb->dev = NULL;
74027 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
74028 switch (flow) {
74029 case CAIF_CTRLCMD_FLOW_ON_IND:
74030 /* OK from modem to start sending again */
74031 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
74032 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
74033 set_tx_flow_on(cf_sk);
74034 cf_sk->sk.sk_state_change(&cf_sk->sk);
74035 break;
74036
74037 case CAIF_CTRLCMD_FLOW_OFF_IND:
74038 /* Modem asks us to shut up */
74039 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
74040 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
74041 set_tx_flow_off(cf_sk);
74042 cf_sk->sk.sk_state_change(&cf_sk->sk);
74043 break;
74044 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
74045 /* We're now connected */
74046 caif_client_register_refcnt(&cf_sk->layer,
74047 cfsk_hold, cfsk_put);
74048 - dbfs_atomic_inc(&cnt.num_connect_resp);
74049 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
74050 cf_sk->sk.sk_state = CAIF_CONNECTED;
74051 set_tx_flow_on(cf_sk);
74052 cf_sk->sk.sk_state_change(&cf_sk->sk);
74053 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
74054
74055 case CAIF_CTRLCMD_INIT_FAIL_RSP:
74056 /* Connect request failed */
74057 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
74058 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
74059 cf_sk->sk.sk_err = ECONNREFUSED;
74060 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
74061 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
74062 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
74063
74064 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
74065 /* Modem has closed this connection, or device is down. */
74066 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
74067 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
74068 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
74069 cf_sk->sk.sk_err = ECONNRESET;
74070 set_rx_flow_on(cf_sk);
74071 @@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
74072 return;
74073
74074 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
74075 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
74076 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
74077 set_rx_flow_on(cf_sk);
74078 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
74079 }
74080 @@ -854,7 +855,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
74081 /*ifindex = id of the interface.*/
74082 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
74083
74084 - dbfs_atomic_inc(&cnt.num_connect_req);
74085 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
74086 cf_sk->layer.receive = caif_sktrecv_cb;
74087
74088 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
74089 @@ -943,7 +944,7 @@ static int caif_release(struct socket *sock)
74090 spin_unlock_bh(&sk->sk_receive_queue.lock);
74091 sock->sk = NULL;
74092
74093 - dbfs_atomic_inc(&cnt.num_disconnect);
74094 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
74095
74096 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
74097 if (cf_sk->debugfs_socket_dir != NULL)
74098 @@ -1122,7 +1123,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
74099 cf_sk->conn_req.protocol = protocol;
74100 /* Increase the number of sockets created. */
74101 dbfs_atomic_inc(&cnt.caif_nr_socks);
74102 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
74103 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
74104 #ifdef CONFIG_DEBUG_FS
74105 if (!IS_ERR(debugfsdir)) {
74106
74107 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
74108 index 5cf5222..6f704ad 100644
74109 --- a/net/caif/cfctrl.c
74110 +++ b/net/caif/cfctrl.c
74111 @@ -9,6 +9,7 @@
74112 #include <linux/stddef.h>
74113 #include <linux/spinlock.h>
74114 #include <linux/slab.h>
74115 +#include <linux/sched.h>
74116 #include <net/caif/caif_layer.h>
74117 #include <net/caif/cfpkt.h>
74118 #include <net/caif/cfctrl.h>
74119 @@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
74120 memset(&dev_info, 0, sizeof(dev_info));
74121 dev_info.id = 0xff;
74122 cfsrvl_init(&this->serv, 0, &dev_info, false);
74123 - atomic_set(&this->req_seq_no, 1);
74124 - atomic_set(&this->rsp_seq_no, 1);
74125 + atomic_set_unchecked(&this->req_seq_no, 1);
74126 + atomic_set_unchecked(&this->rsp_seq_no, 1);
74127 this->serv.layer.receive = cfctrl_recv;
74128 sprintf(this->serv.layer.name, "ctrl");
74129 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
74130 @@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
74131 struct cfctrl_request_info *req)
74132 {
74133 spin_lock_bh(&ctrl->info_list_lock);
74134 - atomic_inc(&ctrl->req_seq_no);
74135 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
74136 + atomic_inc_unchecked(&ctrl->req_seq_no);
74137 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
74138 list_add_tail(&req->list, &ctrl->list);
74139 spin_unlock_bh(&ctrl->info_list_lock);
74140 }
74141 @@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
74142 if (p != first)
74143 pr_warn("Requests are not received in order\n");
74144
74145 - atomic_set(&ctrl->rsp_seq_no,
74146 + atomic_set_unchecked(&ctrl->rsp_seq_no,
74147 p->sequence_no);
74148 list_del(&p->list);
74149 goto out;
74150 diff --git a/net/can/gw.c b/net/can/gw.c
74151 index 3d79b12..8de85fa 100644
74152 --- a/net/can/gw.c
74153 +++ b/net/can/gw.c
74154 @@ -96,7 +96,7 @@ struct cf_mod {
74155 struct {
74156 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
74157 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
74158 - } csumfunc;
74159 + } __no_const csumfunc;
74160 };
74161
74162
74163 diff --git a/net/compat.c b/net/compat.c
74164 index 6def90e..c6992fa 100644
74165 --- a/net/compat.c
74166 +++ b/net/compat.c
74167 @@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
74168 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
74169 __get_user(kmsg->msg_flags, &umsg->msg_flags))
74170 return -EFAULT;
74171 - kmsg->msg_name = compat_ptr(tmp1);
74172 - kmsg->msg_iov = compat_ptr(tmp2);
74173 - kmsg->msg_control = compat_ptr(tmp3);
74174 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
74175 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
74176 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
74177 return 0;
74178 }
74179
74180 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74181
74182 if (kern_msg->msg_namelen) {
74183 if (mode == VERIFY_READ) {
74184 - int err = move_addr_to_kernel(kern_msg->msg_name,
74185 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
74186 kern_msg->msg_namelen,
74187 kern_address);
74188 if (err < 0)
74189 @@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74190 kern_msg->msg_name = NULL;
74191
74192 tot_len = iov_from_user_compat_to_kern(kern_iov,
74193 - (struct compat_iovec __user *)kern_msg->msg_iov,
74194 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
74195 kern_msg->msg_iovlen);
74196 if (tot_len >= 0)
74197 kern_msg->msg_iov = kern_iov;
74198 @@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74199
74200 #define CMSG_COMPAT_FIRSTHDR(msg) \
74201 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
74202 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
74203 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
74204 (struct compat_cmsghdr __user *)NULL)
74205
74206 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
74207 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
74208 (ucmlen) <= (unsigned long) \
74209 ((mhdr)->msg_controllen - \
74210 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
74211 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
74212
74213 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
74214 struct compat_cmsghdr __user *cmsg, int cmsg_len)
74215 {
74216 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
74217 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
74218 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
74219 msg->msg_controllen)
74220 return NULL;
74221 return (struct compat_cmsghdr __user *)ptr;
74222 @@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
74223 {
74224 struct compat_timeval ctv;
74225 struct compat_timespec cts[3];
74226 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74227 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74228 struct compat_cmsghdr cmhdr;
74229 int cmlen;
74230
74231 @@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
74232
74233 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
74234 {
74235 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74236 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74237 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
74238 int fdnum = scm->fp->count;
74239 struct file **fp = scm->fp->fp;
74240 @@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
74241 return -EFAULT;
74242 old_fs = get_fs();
74243 set_fs(KERNEL_DS);
74244 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
74245 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
74246 set_fs(old_fs);
74247
74248 return err;
74249 @@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
74250 len = sizeof(ktime);
74251 old_fs = get_fs();
74252 set_fs(KERNEL_DS);
74253 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
74254 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
74255 set_fs(old_fs);
74256
74257 if (!err) {
74258 @@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74259 case MCAST_JOIN_GROUP:
74260 case MCAST_LEAVE_GROUP:
74261 {
74262 - struct compat_group_req __user *gr32 = (void *)optval;
74263 + struct compat_group_req __user *gr32 = (void __user *)optval;
74264 struct group_req __user *kgr =
74265 compat_alloc_user_space(sizeof(struct group_req));
74266 u32 interface;
74267 @@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74268 case MCAST_BLOCK_SOURCE:
74269 case MCAST_UNBLOCK_SOURCE:
74270 {
74271 - struct compat_group_source_req __user *gsr32 = (void *)optval;
74272 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
74273 struct group_source_req __user *kgsr = compat_alloc_user_space(
74274 sizeof(struct group_source_req));
74275 u32 interface;
74276 @@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74277 }
74278 case MCAST_MSFILTER:
74279 {
74280 - struct compat_group_filter __user *gf32 = (void *)optval;
74281 + struct compat_group_filter __user *gf32 = (void __user *)optval;
74282 struct group_filter __user *kgf;
74283 u32 interface, fmode, numsrc;
74284
74285 @@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
74286 char __user *optval, int __user *optlen,
74287 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
74288 {
74289 - struct compat_group_filter __user *gf32 = (void *)optval;
74290 + struct compat_group_filter __user *gf32 = (void __user *)optval;
74291 struct group_filter __user *kgf;
74292 int __user *koptlen;
74293 u32 interface, fmode, numsrc;
74294 diff --git a/net/core/datagram.c b/net/core/datagram.c
74295 index 68bbf9f..5ef0d12 100644
74296 --- a/net/core/datagram.c
74297 +++ b/net/core/datagram.c
74298 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
74299 }
74300
74301 kfree_skb(skb);
74302 - atomic_inc(&sk->sk_drops);
74303 + atomic_inc_unchecked(&sk->sk_drops);
74304 sk_mem_reclaim_partial(sk);
74305
74306 return err;
74307 diff --git a/net/core/dev.c b/net/core/dev.c
74308 index c56cacf..b28e35f 100644
74309 --- a/net/core/dev.c
74310 +++ b/net/core/dev.c
74311 @@ -1139,10 +1139,14 @@ void dev_load(struct net *net, const char *name)
74312 if (no_module && capable(CAP_NET_ADMIN))
74313 no_module = request_module("netdev-%s", name);
74314 if (no_module && capable(CAP_SYS_MODULE)) {
74315 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
74316 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
74317 +#else
74318 if (!request_module("%s", name))
74319 pr_err("Loading kernel module for a network device "
74320 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
74321 "instead\n", name);
74322 +#endif
74323 }
74324 }
74325 EXPORT_SYMBOL(dev_load);
74326 @@ -1573,7 +1577,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
74327 {
74328 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
74329 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
74330 - atomic_long_inc(&dev->rx_dropped);
74331 + atomic_long_inc_unchecked(&dev->rx_dropped);
74332 kfree_skb(skb);
74333 return NET_RX_DROP;
74334 }
74335 @@ -1583,7 +1587,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
74336 nf_reset(skb);
74337
74338 if (unlikely(!is_skb_forwardable(dev, skb))) {
74339 - atomic_long_inc(&dev->rx_dropped);
74340 + atomic_long_inc_unchecked(&dev->rx_dropped);
74341 kfree_skb(skb);
74342 return NET_RX_DROP;
74343 }
74344 @@ -2036,7 +2040,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
74345
74346 struct dev_gso_cb {
74347 void (*destructor)(struct sk_buff *skb);
74348 -};
74349 +} __no_const;
74350
74351 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
74352
74353 @@ -2970,7 +2974,7 @@ enqueue:
74354
74355 local_irq_restore(flags);
74356
74357 - atomic_long_inc(&skb->dev->rx_dropped);
74358 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
74359 kfree_skb(skb);
74360 return NET_RX_DROP;
74361 }
74362 @@ -3044,7 +3048,7 @@ int netif_rx_ni(struct sk_buff *skb)
74363 }
74364 EXPORT_SYMBOL(netif_rx_ni);
74365
74366 -static void net_tx_action(struct softirq_action *h)
74367 +static void net_tx_action(void)
74368 {
74369 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74370
74371 @@ -3333,7 +3337,7 @@ ncls:
74372 if (pt_prev) {
74373 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
74374 } else {
74375 - atomic_long_inc(&skb->dev->rx_dropped);
74376 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
74377 kfree_skb(skb);
74378 /* Jamal, now you will not able to escape explaining
74379 * me how you were going to use this. :-)
74380 @@ -3897,7 +3901,7 @@ void netif_napi_del(struct napi_struct *napi)
74381 }
74382 EXPORT_SYMBOL(netif_napi_del);
74383
74384 -static void net_rx_action(struct softirq_action *h)
74385 +static void net_rx_action(void)
74386 {
74387 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74388 unsigned long time_limit = jiffies + 2;
74389 @@ -5955,7 +5959,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
74390 } else {
74391 netdev_stats_to_stats64(storage, &dev->stats);
74392 }
74393 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
74394 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
74395 return storage;
74396 }
74397 EXPORT_SYMBOL(dev_get_stats);
74398 diff --git a/net/core/flow.c b/net/core/flow.c
74399 index e318c7e..168b1d0 100644
74400 --- a/net/core/flow.c
74401 +++ b/net/core/flow.c
74402 @@ -61,7 +61,7 @@ struct flow_cache {
74403 struct timer_list rnd_timer;
74404 };
74405
74406 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
74407 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
74408 EXPORT_SYMBOL(flow_cache_genid);
74409 static struct flow_cache flow_cache_global;
74410 static struct kmem_cache *flow_cachep __read_mostly;
74411 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
74412
74413 static int flow_entry_valid(struct flow_cache_entry *fle)
74414 {
74415 - if (atomic_read(&flow_cache_genid) != fle->genid)
74416 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
74417 return 0;
74418 if (fle->object && !fle->object->ops->check(fle->object))
74419 return 0;
74420 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
74421 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
74422 fcp->hash_count++;
74423 }
74424 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
74425 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
74426 flo = fle->object;
74427 if (!flo)
74428 goto ret_object;
74429 @@ -280,7 +280,7 @@ nocache:
74430 }
74431 flo = resolver(net, key, family, dir, flo, ctx);
74432 if (fle) {
74433 - fle->genid = atomic_read(&flow_cache_genid);
74434 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
74435 if (!IS_ERR(flo))
74436 fle->object = flo;
74437 else
74438 diff --git a/net/core/iovec.c b/net/core/iovec.c
74439 index c40f27e..7f49254 100644
74440 --- a/net/core/iovec.c
74441 +++ b/net/core/iovec.c
74442 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
74443 if (m->msg_namelen) {
74444 if (mode == VERIFY_READ) {
74445 void __user *namep;
74446 - namep = (void __user __force *) m->msg_name;
74447 + namep = (void __force_user *) m->msg_name;
74448 err = move_addr_to_kernel(namep, m->msg_namelen,
74449 address);
74450 if (err < 0)
74451 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
74452 }
74453
74454 size = m->msg_iovlen * sizeof(struct iovec);
74455 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
74456 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
74457 return -EFAULT;
74458
74459 m->msg_iov = iov;
74460 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
74461 index 9083e82..1673203 100644
74462 --- a/net/core/rtnetlink.c
74463 +++ b/net/core/rtnetlink.c
74464 @@ -57,7 +57,7 @@ struct rtnl_link {
74465 rtnl_doit_func doit;
74466 rtnl_dumpit_func dumpit;
74467 rtnl_calcit_func calcit;
74468 -};
74469 +} __no_const;
74470
74471 static DEFINE_MUTEX(rtnl_mutex);
74472 static u16 min_ifinfo_dump_size;
74473 diff --git a/net/core/scm.c b/net/core/scm.c
74474 index ff52ad0..aff1c0f 100644
74475 --- a/net/core/scm.c
74476 +++ b/net/core/scm.c
74477 @@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
74478 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
74479 {
74480 struct cmsghdr __user *cm
74481 - = (__force struct cmsghdr __user *)msg->msg_control;
74482 + = (struct cmsghdr __force_user *)msg->msg_control;
74483 struct cmsghdr cmhdr;
74484 int cmlen = CMSG_LEN(len);
74485 int err;
74486 @@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
74487 err = -EFAULT;
74488 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
74489 goto out;
74490 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
74491 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
74492 goto out;
74493 cmlen = CMSG_SPACE(len);
74494 if (msg->msg_controllen < cmlen)
74495 @@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
74496 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
74497 {
74498 struct cmsghdr __user *cm
74499 - = (__force struct cmsghdr __user*)msg->msg_control;
74500 + = (struct cmsghdr __force_user *)msg->msg_control;
74501
74502 int fdmax = 0;
74503 int fdnum = scm->fp->count;
74504 @@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
74505 if (fdnum < fdmax)
74506 fdmax = fdnum;
74507
74508 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
74509 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
74510 i++, cmfptr++)
74511 {
74512 int new_fd;
74513 diff --git a/net/core/sock.c b/net/core/sock.c
74514 index b23f174..b9a0d26 100644
74515 --- a/net/core/sock.c
74516 +++ b/net/core/sock.c
74517 @@ -289,7 +289,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74518 struct sk_buff_head *list = &sk->sk_receive_queue;
74519
74520 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
74521 - atomic_inc(&sk->sk_drops);
74522 + atomic_inc_unchecked(&sk->sk_drops);
74523 trace_sock_rcvqueue_full(sk, skb);
74524 return -ENOMEM;
74525 }
74526 @@ -299,7 +299,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74527 return err;
74528
74529 if (!sk_rmem_schedule(sk, skb->truesize)) {
74530 - atomic_inc(&sk->sk_drops);
74531 + atomic_inc_unchecked(&sk->sk_drops);
74532 return -ENOBUFS;
74533 }
74534
74535 @@ -319,7 +319,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74536 skb_dst_force(skb);
74537
74538 spin_lock_irqsave(&list->lock, flags);
74539 - skb->dropcount = atomic_read(&sk->sk_drops);
74540 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
74541 __skb_queue_tail(list, skb);
74542 spin_unlock_irqrestore(&list->lock, flags);
74543
74544 @@ -339,7 +339,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
74545 skb->dev = NULL;
74546
74547 if (sk_rcvqueues_full(sk, skb)) {
74548 - atomic_inc(&sk->sk_drops);
74549 + atomic_inc_unchecked(&sk->sk_drops);
74550 goto discard_and_relse;
74551 }
74552 if (nested)
74553 @@ -357,7 +357,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
74554 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
74555 } else if (sk_add_backlog(sk, skb)) {
74556 bh_unlock_sock(sk);
74557 - atomic_inc(&sk->sk_drops);
74558 + atomic_inc_unchecked(&sk->sk_drops);
74559 goto discard_and_relse;
74560 }
74561
74562 @@ -917,7 +917,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74563 if (len > sizeof(peercred))
74564 len = sizeof(peercred);
74565 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
74566 - if (copy_to_user(optval, &peercred, len))
74567 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
74568 return -EFAULT;
74569 goto lenout;
74570 }
74571 @@ -930,7 +930,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74572 return -ENOTCONN;
74573 if (lv < len)
74574 return -EINVAL;
74575 - if (copy_to_user(optval, address, len))
74576 + if (len > sizeof(address) || copy_to_user(optval, address, len))
74577 return -EFAULT;
74578 goto lenout;
74579 }
74580 @@ -963,7 +963,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74581
74582 if (len > lv)
74583 len = lv;
74584 - if (copy_to_user(optval, &v, len))
74585 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
74586 return -EFAULT;
74587 lenout:
74588 if (put_user(len, optlen))
74589 @@ -2020,7 +2020,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
74590 */
74591 smp_wmb();
74592 atomic_set(&sk->sk_refcnt, 1);
74593 - atomic_set(&sk->sk_drops, 0);
74594 + atomic_set_unchecked(&sk->sk_drops, 0);
74595 }
74596 EXPORT_SYMBOL(sock_init_data);
74597
74598 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
74599 index 02e75d1..9a57a7c 100644
74600 --- a/net/decnet/sysctl_net_decnet.c
74601 +++ b/net/decnet/sysctl_net_decnet.c
74602 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
74603
74604 if (len > *lenp) len = *lenp;
74605
74606 - if (copy_to_user(buffer, addr, len))
74607 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
74608 return -EFAULT;
74609
74610 *lenp = len;
74611 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
74612
74613 if (len > *lenp) len = *lenp;
74614
74615 - if (copy_to_user(buffer, devname, len))
74616 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
74617 return -EFAULT;
74618
74619 *lenp = len;
74620 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
74621 index 39a2d29..f39c0fe 100644
74622 --- a/net/econet/Kconfig
74623 +++ b/net/econet/Kconfig
74624 @@ -4,7 +4,7 @@
74625
74626 config ECONET
74627 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
74628 - depends on EXPERIMENTAL && INET
74629 + depends on EXPERIMENTAL && INET && BROKEN
74630 ---help---
74631 Econet is a fairly old and slow networking protocol mainly used by
74632 Acorn computers to access file and print servers. It uses native
74633 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
74634 index 92fc5f6..b790d91 100644
74635 --- a/net/ipv4/fib_frontend.c
74636 +++ b/net/ipv4/fib_frontend.c
74637 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
74638 #ifdef CONFIG_IP_ROUTE_MULTIPATH
74639 fib_sync_up(dev);
74640 #endif
74641 - atomic_inc(&net->ipv4.dev_addr_genid);
74642 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74643 rt_cache_flush(dev_net(dev), -1);
74644 break;
74645 case NETDEV_DOWN:
74646 fib_del_ifaddr(ifa, NULL);
74647 - atomic_inc(&net->ipv4.dev_addr_genid);
74648 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74649 if (ifa->ifa_dev->ifa_list == NULL) {
74650 /* Last address was deleted from this interface.
74651 * Disable IP.
74652 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
74653 #ifdef CONFIG_IP_ROUTE_MULTIPATH
74654 fib_sync_up(dev);
74655 #endif
74656 - atomic_inc(&net->ipv4.dev_addr_genid);
74657 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74658 rt_cache_flush(dev_net(dev), -1);
74659 break;
74660 case NETDEV_DOWN:
74661 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
74662 index 80106d8..232e898 100644
74663 --- a/net/ipv4/fib_semantics.c
74664 +++ b/net/ipv4/fib_semantics.c
74665 @@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
74666 nh->nh_saddr = inet_select_addr(nh->nh_dev,
74667 nh->nh_gw,
74668 nh->nh_parent->fib_scope);
74669 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
74670 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
74671
74672 return nh->nh_saddr;
74673 }
74674 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
74675 index ccee270..db23c3c 100644
74676 --- a/net/ipv4/inet_diag.c
74677 +++ b/net/ipv4/inet_diag.c
74678 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct sock *sk,
74679 r->idiag_retrans = 0;
74680
74681 r->id.idiag_if = sk->sk_bound_dev_if;
74682 +
74683 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74684 + r->id.idiag_cookie[0] = 0;
74685 + r->id.idiag_cookie[1] = 0;
74686 +#else
74687 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
74688 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
74689 +#endif
74690
74691 r->id.idiag_sport = inet->inet_sport;
74692 r->id.idiag_dport = inet->inet_dport;
74693 @@ -210,8 +216,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
74694 r->idiag_family = tw->tw_family;
74695 r->idiag_retrans = 0;
74696 r->id.idiag_if = tw->tw_bound_dev_if;
74697 +
74698 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74699 + r->id.idiag_cookie[0] = 0;
74700 + r->id.idiag_cookie[1] = 0;
74701 +#else
74702 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
74703 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
74704 +#endif
74705 +
74706 r->id.idiag_sport = tw->tw_sport;
74707 r->id.idiag_dport = tw->tw_dport;
74708 r->id.idiag_src[0] = tw->tw_rcv_saddr;
74709 @@ -294,12 +307,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
74710 if (sk == NULL)
74711 goto unlock;
74712
74713 +#ifndef CONFIG_GRKERNSEC_HIDESYM
74714 err = -ESTALE;
74715 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
74716 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
74717 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
74718 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
74719 goto out;
74720 +#endif
74721
74722 err = -ENOMEM;
74723 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
74724 @@ -589,8 +604,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
74725 r->idiag_retrans = req->retrans;
74726
74727 r->id.idiag_if = sk->sk_bound_dev_if;
74728 +
74729 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74730 + r->id.idiag_cookie[0] = 0;
74731 + r->id.idiag_cookie[1] = 0;
74732 +#else
74733 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
74734 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
74735 +#endif
74736
74737 tmo = req->expires - jiffies;
74738 if (tmo < 0)
74739 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
74740 index 984ec65..97ac518 100644
74741 --- a/net/ipv4/inet_hashtables.c
74742 +++ b/net/ipv4/inet_hashtables.c
74743 @@ -18,12 +18,15 @@
74744 #include <linux/sched.h>
74745 #include <linux/slab.h>
74746 #include <linux/wait.h>
74747 +#include <linux/security.h>
74748
74749 #include <net/inet_connection_sock.h>
74750 #include <net/inet_hashtables.h>
74751 #include <net/secure_seq.h>
74752 #include <net/ip.h>
74753
74754 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
74755 +
74756 /*
74757 * Allocate and initialize a new local port bind bucket.
74758 * The bindhash mutex for snum's hash chain must be held here.
74759 @@ -530,6 +533,8 @@ ok:
74760 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
74761 spin_unlock(&head->lock);
74762
74763 + gr_update_task_in_ip_table(current, inet_sk(sk));
74764 +
74765 if (tw) {
74766 inet_twsk_deschedule(tw, death_row);
74767 while (twrefcnt) {
74768 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
74769 index 86f13c67..59a35b5 100644
74770 --- a/net/ipv4/inetpeer.c
74771 +++ b/net/ipv4/inetpeer.c
74772 @@ -436,8 +436,8 @@ relookup:
74773 if (p) {
74774 p->daddr = *daddr;
74775 atomic_set(&p->refcnt, 1);
74776 - atomic_set(&p->rid, 0);
74777 - atomic_set(&p->ip_id_count,
74778 + atomic_set_unchecked(&p->rid, 0);
74779 + atomic_set_unchecked(&p->ip_id_count,
74780 (daddr->family == AF_INET) ?
74781 secure_ip_id(daddr->addr.a4) :
74782 secure_ipv6_id(daddr->addr.a6));
74783 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
74784 index fdaabf2..0ec3205 100644
74785 --- a/net/ipv4/ip_fragment.c
74786 +++ b/net/ipv4/ip_fragment.c
74787 @@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
74788 return 0;
74789
74790 start = qp->rid;
74791 - end = atomic_inc_return(&peer->rid);
74792 + end = atomic_inc_return_unchecked(&peer->rid);
74793 qp->rid = end;
74794
74795 rc = qp->q.fragments && (end - start) > max;
74796 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
74797 index 09ff51b..d3968eb 100644
74798 --- a/net/ipv4/ip_sockglue.c
74799 +++ b/net/ipv4/ip_sockglue.c
74800 @@ -1111,7 +1111,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
74801 len = min_t(unsigned int, len, opt->optlen);
74802 if (put_user(len, optlen))
74803 return -EFAULT;
74804 - if (copy_to_user(optval, opt->__data, len))
74805 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
74806 + copy_to_user(optval, opt->__data, len))
74807 return -EFAULT;
74808 return 0;
74809 }
74810 @@ -1239,7 +1240,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
74811 if (sk->sk_type != SOCK_STREAM)
74812 return -ENOPROTOOPT;
74813
74814 - msg.msg_control = optval;
74815 + msg.msg_control = (void __force_kernel *)optval;
74816 msg.msg_controllen = len;
74817 msg.msg_flags = flags;
74818
74819 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
74820 index 99ec116..c5628fe 100644
74821 --- a/net/ipv4/ipconfig.c
74822 +++ b/net/ipv4/ipconfig.c
74823 @@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
74824
74825 mm_segment_t oldfs = get_fs();
74826 set_fs(get_ds());
74827 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
74828 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
74829 set_fs(oldfs);
74830 return res;
74831 }
74832 @@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
74833
74834 mm_segment_t oldfs = get_fs();
74835 set_fs(get_ds());
74836 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
74837 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
74838 set_fs(oldfs);
74839 return res;
74840 }
74841 @@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
74842
74843 mm_segment_t oldfs = get_fs();
74844 set_fs(get_ds());
74845 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
74846 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
74847 set_fs(oldfs);
74848 return res;
74849 }
74850 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
74851 index 2133c30..5c4b40b 100644
74852 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
74853 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
74854 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
74855
74856 *len = 0;
74857
74858 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
74859 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
74860 if (*octets == NULL)
74861 return 0;
74862
74863 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
74864 index 43d4c3b..1914409 100644
74865 --- a/net/ipv4/ping.c
74866 +++ b/net/ipv4/ping.c
74867 @@ -836,7 +836,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
74868 sk_rmem_alloc_get(sp),
74869 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74870 atomic_read(&sp->sk_refcnt), sp,
74871 - atomic_read(&sp->sk_drops), len);
74872 + atomic_read_unchecked(&sp->sk_drops), len);
74873 }
74874
74875 static int ping_seq_show(struct seq_file *seq, void *v)
74876 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
74877 index 007e2eb..85a18a0 100644
74878 --- a/net/ipv4/raw.c
74879 +++ b/net/ipv4/raw.c
74880 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
74881 int raw_rcv(struct sock *sk, struct sk_buff *skb)
74882 {
74883 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
74884 - atomic_inc(&sk->sk_drops);
74885 + atomic_inc_unchecked(&sk->sk_drops);
74886 kfree_skb(skb);
74887 return NET_RX_DROP;
74888 }
74889 @@ -738,16 +738,20 @@ static int raw_init(struct sock *sk)
74890
74891 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
74892 {
74893 + struct icmp_filter filter;
74894 +
74895 if (optlen > sizeof(struct icmp_filter))
74896 optlen = sizeof(struct icmp_filter);
74897 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
74898 + if (copy_from_user(&filter, optval, optlen))
74899 return -EFAULT;
74900 + raw_sk(sk)->filter = filter;
74901 return 0;
74902 }
74903
74904 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
74905 {
74906 int len, ret = -EFAULT;
74907 + struct icmp_filter filter;
74908
74909 if (get_user(len, optlen))
74910 goto out;
74911 @@ -757,8 +761,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
74912 if (len > sizeof(struct icmp_filter))
74913 len = sizeof(struct icmp_filter);
74914 ret = -EFAULT;
74915 - if (put_user(len, optlen) ||
74916 - copy_to_user(optval, &raw_sk(sk)->filter, len))
74917 + filter = raw_sk(sk)->filter;
74918 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
74919 goto out;
74920 ret = 0;
74921 out: return ret;
74922 @@ -986,7 +990,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
74923 sk_wmem_alloc_get(sp),
74924 sk_rmem_alloc_get(sp),
74925 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
74926 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
74927 + atomic_read(&sp->sk_refcnt),
74928 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74929 + NULL,
74930 +#else
74931 + sp,
74932 +#endif
74933 + atomic_read_unchecked(&sp->sk_drops));
74934 }
74935
74936 static int raw_seq_show(struct seq_file *seq, void *v)
74937 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
74938 index 94cdbc5..0cb0063 100644
74939 --- a/net/ipv4/route.c
74940 +++ b/net/ipv4/route.c
74941 @@ -313,7 +313,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
74942
74943 static inline int rt_genid(struct net *net)
74944 {
74945 - return atomic_read(&net->ipv4.rt_genid);
74946 + return atomic_read_unchecked(&net->ipv4.rt_genid);
74947 }
74948
74949 #ifdef CONFIG_PROC_FS
74950 @@ -937,7 +937,7 @@ static void rt_cache_invalidate(struct net *net)
74951 unsigned char shuffle;
74952
74953 get_random_bytes(&shuffle, sizeof(shuffle));
74954 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
74955 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
74956 redirect_genid++;
74957 }
74958
74959 @@ -3022,7 +3022,7 @@ static int rt_fill_info(struct net *net,
74960 error = rt->dst.error;
74961 if (peer) {
74962 inet_peer_refcheck(rt->peer);
74963 - id = atomic_read(&peer->ip_id_count) & 0xffff;
74964 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
74965 if (peer->tcp_ts_stamp) {
74966 ts = peer->tcp_ts;
74967 tsage = get_seconds() - peer->tcp_ts_stamp;
74968 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
74969 index eb90aa8..22bf114 100644
74970 --- a/net/ipv4/tcp_ipv4.c
74971 +++ b/net/ipv4/tcp_ipv4.c
74972 @@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
74973 int sysctl_tcp_low_latency __read_mostly;
74974 EXPORT_SYMBOL(sysctl_tcp_low_latency);
74975
74976 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74977 +extern int grsec_enable_blackhole;
74978 +#endif
74979
74980 #ifdef CONFIG_TCP_MD5SIG
74981 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
74982 @@ -1632,6 +1635,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
74983 return 0;
74984
74985 reset:
74986 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74987 + if (!grsec_enable_blackhole)
74988 +#endif
74989 tcp_v4_send_reset(rsk, skb);
74990 discard:
74991 kfree_skb(skb);
74992 @@ -1694,12 +1700,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
74993 TCP_SKB_CB(skb)->sacked = 0;
74994
74995 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
74996 - if (!sk)
74997 + if (!sk) {
74998 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74999 + ret = 1;
75000 +#endif
75001 goto no_tcp_socket;
75002 -
75003 + }
75004 process:
75005 - if (sk->sk_state == TCP_TIME_WAIT)
75006 + if (sk->sk_state == TCP_TIME_WAIT) {
75007 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75008 + ret = 2;
75009 +#endif
75010 goto do_time_wait;
75011 + }
75012
75013 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
75014 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
75015 @@ -1749,6 +1762,10 @@ no_tcp_socket:
75016 bad_packet:
75017 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
75018 } else {
75019 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75020 + if (!grsec_enable_blackhole || (ret == 1 &&
75021 + (skb->dev->flags & IFF_LOOPBACK)))
75022 +#endif
75023 tcp_v4_send_reset(NULL, skb);
75024 }
75025
75026 @@ -2409,7 +2426,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
75027 0, /* non standard timer */
75028 0, /* open_requests have no inode */
75029 atomic_read(&sk->sk_refcnt),
75030 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75031 + NULL,
75032 +#else
75033 req,
75034 +#endif
75035 len);
75036 }
75037
75038 @@ -2459,7 +2480,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
75039 sock_i_uid(sk),
75040 icsk->icsk_probes_out,
75041 sock_i_ino(sk),
75042 - atomic_read(&sk->sk_refcnt), sk,
75043 + atomic_read(&sk->sk_refcnt),
75044 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75045 + NULL,
75046 +#else
75047 + sk,
75048 +#endif
75049 jiffies_to_clock_t(icsk->icsk_rto),
75050 jiffies_to_clock_t(icsk->icsk_ack.ato),
75051 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
75052 @@ -2487,7 +2513,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
75053 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
75054 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
75055 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
75056 - atomic_read(&tw->tw_refcnt), tw, len);
75057 + atomic_read(&tw->tw_refcnt),
75058 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75059 + NULL,
75060 +#else
75061 + tw,
75062 +#endif
75063 + len);
75064 }
75065
75066 #define TMPSZ 150
75067 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
75068 index 66363b6..b0654a3 100644
75069 --- a/net/ipv4/tcp_minisocks.c
75070 +++ b/net/ipv4/tcp_minisocks.c
75071 @@ -27,6 +27,10 @@
75072 #include <net/inet_common.h>
75073 #include <net/xfrm.h>
75074
75075 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75076 +extern int grsec_enable_blackhole;
75077 +#endif
75078 +
75079 int sysctl_tcp_syncookies __read_mostly = 1;
75080 EXPORT_SYMBOL(sysctl_tcp_syncookies);
75081
75082 @@ -751,6 +755,10 @@ listen_overflow:
75083
75084 embryonic_reset:
75085 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
75086 +
75087 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75088 + if (!grsec_enable_blackhole)
75089 +#endif
75090 if (!(flg & TCP_FLAG_RST))
75091 req->rsk_ops->send_reset(sk, skb);
75092
75093 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
75094 index 85ee7eb..53277ab 100644
75095 --- a/net/ipv4/tcp_probe.c
75096 +++ b/net/ipv4/tcp_probe.c
75097 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
75098 if (cnt + width >= len)
75099 break;
75100
75101 - if (copy_to_user(buf + cnt, tbuf, width))
75102 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
75103 return -EFAULT;
75104 cnt += width;
75105 }
75106 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
75107 index 2e0f0af..e2948bf 100644
75108 --- a/net/ipv4/tcp_timer.c
75109 +++ b/net/ipv4/tcp_timer.c
75110 @@ -22,6 +22,10 @@
75111 #include <linux/gfp.h>
75112 #include <net/tcp.h>
75113
75114 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75115 +extern int grsec_lastack_retries;
75116 +#endif
75117 +
75118 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
75119 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
75120 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
75121 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock *sk)
75122 }
75123 }
75124
75125 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75126 + if ((sk->sk_state == TCP_LAST_ACK) &&
75127 + (grsec_lastack_retries > 0) &&
75128 + (grsec_lastack_retries < retry_until))
75129 + retry_until = grsec_lastack_retries;
75130 +#endif
75131 +
75132 if (retransmits_timed_out(sk, retry_until,
75133 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
75134 /* Has it gone just too far? */
75135 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
75136 index 5a65eea..bd913a1 100644
75137 --- a/net/ipv4/udp.c
75138 +++ b/net/ipv4/udp.c
75139 @@ -86,6 +86,7 @@
75140 #include <linux/types.h>
75141 #include <linux/fcntl.h>
75142 #include <linux/module.h>
75143 +#include <linux/security.h>
75144 #include <linux/socket.h>
75145 #include <linux/sockios.h>
75146 #include <linux/igmp.h>
75147 @@ -108,6 +109,10 @@
75148 #include <trace/events/udp.h>
75149 #include "udp_impl.h"
75150
75151 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75152 +extern int grsec_enable_blackhole;
75153 +#endif
75154 +
75155 struct udp_table udp_table __read_mostly;
75156 EXPORT_SYMBOL(udp_table);
75157
75158 @@ -565,6 +570,9 @@ found:
75159 return s;
75160 }
75161
75162 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
75163 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
75164 +
75165 /*
75166 * This routine is called by the ICMP module when it gets some
75167 * sort of error condition. If err < 0 then the socket should
75168 @@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
75169 dport = usin->sin_port;
75170 if (dport == 0)
75171 return -EINVAL;
75172 +
75173 + err = gr_search_udp_sendmsg(sk, usin);
75174 + if (err)
75175 + return err;
75176 } else {
75177 if (sk->sk_state != TCP_ESTABLISHED)
75178 return -EDESTADDRREQ;
75179 +
75180 + err = gr_search_udp_sendmsg(sk, NULL);
75181 + if (err)
75182 + return err;
75183 +
75184 daddr = inet->inet_daddr;
75185 dport = inet->inet_dport;
75186 /* Open fast path for connected socket.
75187 @@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(struct sock *sk)
75188 udp_lib_checksum_complete(skb)) {
75189 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75190 IS_UDPLITE(sk));
75191 - atomic_inc(&sk->sk_drops);
75192 + atomic_inc_unchecked(&sk->sk_drops);
75193 __skb_unlink(skb, rcvq);
75194 __skb_queue_tail(&list_kill, skb);
75195 }
75196 @@ -1185,6 +1202,10 @@ try_again:
75197 if (!skb)
75198 goto out;
75199
75200 + err = gr_search_udp_recvmsg(sk, skb);
75201 + if (err)
75202 + goto out_free;
75203 +
75204 ulen = skb->len - sizeof(struct udphdr);
75205 copied = len;
75206 if (copied > ulen)
75207 @@ -1487,7 +1508,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75208
75209 drop:
75210 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
75211 - atomic_inc(&sk->sk_drops);
75212 + atomic_inc_unchecked(&sk->sk_drops);
75213 kfree_skb(skb);
75214 return -1;
75215 }
75216 @@ -1506,7 +1527,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
75217 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
75218
75219 if (!skb1) {
75220 - atomic_inc(&sk->sk_drops);
75221 + atomic_inc_unchecked(&sk->sk_drops);
75222 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
75223 IS_UDPLITE(sk));
75224 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75225 @@ -1675,6 +1696,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75226 goto csum_error;
75227
75228 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
75229 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75230 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
75231 +#endif
75232 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
75233
75234 /*
75235 @@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
75236 sk_wmem_alloc_get(sp),
75237 sk_rmem_alloc_get(sp),
75238 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75239 - atomic_read(&sp->sk_refcnt), sp,
75240 - atomic_read(&sp->sk_drops), len);
75241 + atomic_read(&sp->sk_refcnt),
75242 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75243 + NULL,
75244 +#else
75245 + sp,
75246 +#endif
75247 + atomic_read_unchecked(&sp->sk_drops), len);
75248 }
75249
75250 int udp4_seq_show(struct seq_file *seq, void *v)
75251 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
75252 index 836c4ea..cbb74dc 100644
75253 --- a/net/ipv6/addrconf.c
75254 +++ b/net/ipv6/addrconf.c
75255 @@ -2149,7 +2149,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
75256 p.iph.ihl = 5;
75257 p.iph.protocol = IPPROTO_IPV6;
75258 p.iph.ttl = 64;
75259 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
75260 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
75261
75262 if (ops->ndo_do_ioctl) {
75263 mm_segment_t oldfs = get_fs();
75264 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
75265 index 1567fb1..29af910 100644
75266 --- a/net/ipv6/inet6_connection_sock.c
75267 +++ b/net/ipv6/inet6_connection_sock.c
75268 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
75269 #ifdef CONFIG_XFRM
75270 {
75271 struct rt6_info *rt = (struct rt6_info *)dst;
75272 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
75273 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
75274 }
75275 #endif
75276 }
75277 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
75278 #ifdef CONFIG_XFRM
75279 if (dst) {
75280 struct rt6_info *rt = (struct rt6_info *)dst;
75281 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
75282 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
75283 __sk_dst_reset(sk);
75284 dst = NULL;
75285 }
75286 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
75287 index 26cb08c..8af9877 100644
75288 --- a/net/ipv6/ipv6_sockglue.c
75289 +++ b/net/ipv6/ipv6_sockglue.c
75290 @@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
75291 if (sk->sk_type != SOCK_STREAM)
75292 return -ENOPROTOOPT;
75293
75294 - msg.msg_control = optval;
75295 + msg.msg_control = (void __force_kernel *)optval;
75296 msg.msg_controllen = len;
75297 msg.msg_flags = flags;
75298
75299 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
75300 index 361ebf3..d5628fb 100644
75301 --- a/net/ipv6/raw.c
75302 +++ b/net/ipv6/raw.c
75303 @@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
75304 {
75305 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
75306 skb_checksum_complete(skb)) {
75307 - atomic_inc(&sk->sk_drops);
75308 + atomic_inc_unchecked(&sk->sk_drops);
75309 kfree_skb(skb);
75310 return NET_RX_DROP;
75311 }
75312 @@ -404,7 +404,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
75313 struct raw6_sock *rp = raw6_sk(sk);
75314
75315 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
75316 - atomic_inc(&sk->sk_drops);
75317 + atomic_inc_unchecked(&sk->sk_drops);
75318 kfree_skb(skb);
75319 return NET_RX_DROP;
75320 }
75321 @@ -428,7 +428,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
75322
75323 if (inet->hdrincl) {
75324 if (skb_checksum_complete(skb)) {
75325 - atomic_inc(&sk->sk_drops);
75326 + atomic_inc_unchecked(&sk->sk_drops);
75327 kfree_skb(skb);
75328 return NET_RX_DROP;
75329 }
75330 @@ -601,7 +601,7 @@ out:
75331 return err;
75332 }
75333
75334 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
75335 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
75336 struct flowi6 *fl6, struct dst_entry **dstp,
75337 unsigned int flags)
75338 {
75339 @@ -909,12 +909,15 @@ do_confirm:
75340 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
75341 char __user *optval, int optlen)
75342 {
75343 + struct icmp6_filter filter;
75344 +
75345 switch (optname) {
75346 case ICMPV6_FILTER:
75347 if (optlen > sizeof(struct icmp6_filter))
75348 optlen = sizeof(struct icmp6_filter);
75349 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
75350 + if (copy_from_user(&filter, optval, optlen))
75351 return -EFAULT;
75352 + raw6_sk(sk)->filter = filter;
75353 return 0;
75354 default:
75355 return -ENOPROTOOPT;
75356 @@ -927,6 +930,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
75357 char __user *optval, int __user *optlen)
75358 {
75359 int len;
75360 + struct icmp6_filter filter;
75361
75362 switch (optname) {
75363 case ICMPV6_FILTER:
75364 @@ -938,7 +942,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
75365 len = sizeof(struct icmp6_filter);
75366 if (put_user(len, optlen))
75367 return -EFAULT;
75368 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
75369 + filter = raw6_sk(sk)->filter;
75370 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
75371 return -EFAULT;
75372 return 0;
75373 default:
75374 @@ -1245,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
75375 0, 0L, 0,
75376 sock_i_uid(sp), 0,
75377 sock_i_ino(sp),
75378 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
75379 + atomic_read(&sp->sk_refcnt),
75380 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75381 + NULL,
75382 +#else
75383 + sp,
75384 +#endif
75385 + atomic_read_unchecked(&sp->sk_drops));
75386 }
75387
75388 static int raw6_seq_show(struct seq_file *seq, void *v)
75389 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
75390 index b859e4a..f9d1589 100644
75391 --- a/net/ipv6/tcp_ipv6.c
75392 +++ b/net/ipv6/tcp_ipv6.c
75393 @@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
75394 }
75395 #endif
75396
75397 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75398 +extern int grsec_enable_blackhole;
75399 +#endif
75400 +
75401 static void tcp_v6_hash(struct sock *sk)
75402 {
75403 if (sk->sk_state != TCP_CLOSE) {
75404 @@ -1651,6 +1655,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
75405 return 0;
75406
75407 reset:
75408 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75409 + if (!grsec_enable_blackhole)
75410 +#endif
75411 tcp_v6_send_reset(sk, skb);
75412 discard:
75413 if (opt_skb)
75414 @@ -1730,12 +1737,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
75415 TCP_SKB_CB(skb)->sacked = 0;
75416
75417 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
75418 - if (!sk)
75419 + if (!sk) {
75420 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75421 + ret = 1;
75422 +#endif
75423 goto no_tcp_socket;
75424 + }
75425
75426 process:
75427 - if (sk->sk_state == TCP_TIME_WAIT)
75428 + if (sk->sk_state == TCP_TIME_WAIT) {
75429 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75430 + ret = 2;
75431 +#endif
75432 goto do_time_wait;
75433 + }
75434
75435 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
75436 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
75437 @@ -1783,6 +1798,10 @@ no_tcp_socket:
75438 bad_packet:
75439 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
75440 } else {
75441 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75442 + if (!grsec_enable_blackhole || (ret == 1 &&
75443 + (skb->dev->flags & IFF_LOOPBACK)))
75444 +#endif
75445 tcp_v6_send_reset(NULL, skb);
75446 }
75447
75448 @@ -2043,7 +2062,13 @@ static void get_openreq6(struct seq_file *seq,
75449 uid,
75450 0, /* non standard timer */
75451 0, /* open_requests have no inode */
75452 - 0, req);
75453 + 0,
75454 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75455 + NULL
75456 +#else
75457 + req
75458 +#endif
75459 + );
75460 }
75461
75462 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
75463 @@ -2093,7 +2118,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
75464 sock_i_uid(sp),
75465 icsk->icsk_probes_out,
75466 sock_i_ino(sp),
75467 - atomic_read(&sp->sk_refcnt), sp,
75468 + atomic_read(&sp->sk_refcnt),
75469 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75470 + NULL,
75471 +#else
75472 + sp,
75473 +#endif
75474 jiffies_to_clock_t(icsk->icsk_rto),
75475 jiffies_to_clock_t(icsk->icsk_ack.ato),
75476 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
75477 @@ -2128,7 +2158,13 @@ static void get_timewait6_sock(struct seq_file *seq,
75478 dest->s6_addr32[2], dest->s6_addr32[3], destp,
75479 tw->tw_substate, 0, 0,
75480 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
75481 - atomic_read(&tw->tw_refcnt), tw);
75482 + atomic_read(&tw->tw_refcnt),
75483 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75484 + NULL
75485 +#else
75486 + tw
75487 +#endif
75488 + );
75489 }
75490
75491 static int tcp6_seq_show(struct seq_file *seq, void *v)
75492 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
75493 index 8c25419..47a51ae 100644
75494 --- a/net/ipv6/udp.c
75495 +++ b/net/ipv6/udp.c
75496 @@ -50,6 +50,10 @@
75497 #include <linux/seq_file.h>
75498 #include "udp_impl.h"
75499
75500 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75501 +extern int grsec_enable_blackhole;
75502 +#endif
75503 +
75504 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
75505 {
75506 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
75507 @@ -549,7 +553,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
75508
75509 return 0;
75510 drop:
75511 - atomic_inc(&sk->sk_drops);
75512 + atomic_inc_unchecked(&sk->sk_drops);
75513 drop_no_sk_drops_inc:
75514 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
75515 kfree_skb(skb);
75516 @@ -625,7 +629,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
75517 continue;
75518 }
75519 drop:
75520 - atomic_inc(&sk->sk_drops);
75521 + atomic_inc_unchecked(&sk->sk_drops);
75522 UDP6_INC_STATS_BH(sock_net(sk),
75523 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
75524 UDP6_INC_STATS_BH(sock_net(sk),
75525 @@ -780,6 +784,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75526 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
75527 proto == IPPROTO_UDPLITE);
75528
75529 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75530 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
75531 +#endif
75532 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
75533
75534 kfree_skb(skb);
75535 @@ -796,7 +803,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75536 if (!sock_owned_by_user(sk))
75537 udpv6_queue_rcv_skb(sk, skb);
75538 else if (sk_add_backlog(sk, skb)) {
75539 - atomic_inc(&sk->sk_drops);
75540 + atomic_inc_unchecked(&sk->sk_drops);
75541 bh_unlock_sock(sk);
75542 sock_put(sk);
75543 goto discard;
75544 @@ -1407,8 +1414,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
75545 0, 0L, 0,
75546 sock_i_uid(sp), 0,
75547 sock_i_ino(sp),
75548 - atomic_read(&sp->sk_refcnt), sp,
75549 - atomic_read(&sp->sk_drops));
75550 + atomic_read(&sp->sk_refcnt),
75551 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75552 + NULL,
75553 +#else
75554 + sp,
75555 +#endif
75556 + atomic_read_unchecked(&sp->sk_drops));
75557 }
75558
75559 int udp6_seq_show(struct seq_file *seq, void *v)
75560 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
75561 index 253695d..9481ce8 100644
75562 --- a/net/irda/ircomm/ircomm_tty.c
75563 +++ b/net/irda/ircomm/ircomm_tty.c
75564 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75565 add_wait_queue(&self->open_wait, &wait);
75566
75567 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
75568 - __FILE__,__LINE__, tty->driver->name, self->open_count );
75569 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
75570
75571 /* As far as I can see, we protect open_count - Jean II */
75572 spin_lock_irqsave(&self->spinlock, flags);
75573 if (!tty_hung_up_p(filp)) {
75574 extra_count = 1;
75575 - self->open_count--;
75576 + local_dec(&self->open_count);
75577 }
75578 spin_unlock_irqrestore(&self->spinlock, flags);
75579 - self->blocked_open++;
75580 + local_inc(&self->blocked_open);
75581
75582 while (1) {
75583 if (tty->termios->c_cflag & CBAUD) {
75584 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75585 }
75586
75587 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
75588 - __FILE__,__LINE__, tty->driver->name, self->open_count );
75589 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
75590
75591 schedule();
75592 }
75593 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75594 if (extra_count) {
75595 /* ++ is not atomic, so this should be protected - Jean II */
75596 spin_lock_irqsave(&self->spinlock, flags);
75597 - self->open_count++;
75598 + local_inc(&self->open_count);
75599 spin_unlock_irqrestore(&self->spinlock, flags);
75600 }
75601 - self->blocked_open--;
75602 + local_dec(&self->blocked_open);
75603
75604 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
75605 - __FILE__,__LINE__, tty->driver->name, self->open_count);
75606 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
75607
75608 if (!retval)
75609 self->flags |= ASYNC_NORMAL_ACTIVE;
75610 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
75611 }
75612 /* ++ is not atomic, so this should be protected - Jean II */
75613 spin_lock_irqsave(&self->spinlock, flags);
75614 - self->open_count++;
75615 + local_inc(&self->open_count);
75616
75617 tty->driver_data = self;
75618 self->tty = tty;
75619 spin_unlock_irqrestore(&self->spinlock, flags);
75620
75621 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
75622 - self->line, self->open_count);
75623 + self->line, local_read(&self->open_count));
75624
75625 /* Not really used by us, but lets do it anyway */
75626 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
75627 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75628 return;
75629 }
75630
75631 - if ((tty->count == 1) && (self->open_count != 1)) {
75632 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
75633 /*
75634 * Uh, oh. tty->count is 1, which means that the tty
75635 * structure will be freed. state->count should always
75636 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75637 */
75638 IRDA_DEBUG(0, "%s(), bad serial port count; "
75639 "tty->count is 1, state->count is %d\n", __func__ ,
75640 - self->open_count);
75641 - self->open_count = 1;
75642 + local_read(&self->open_count));
75643 + local_set(&self->open_count, 1);
75644 }
75645
75646 - if (--self->open_count < 0) {
75647 + if (local_dec_return(&self->open_count) < 0) {
75648 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
75649 - __func__, self->line, self->open_count);
75650 - self->open_count = 0;
75651 + __func__, self->line, local_read(&self->open_count));
75652 + local_set(&self->open_count, 0);
75653 }
75654 - if (self->open_count) {
75655 + if (local_read(&self->open_count)) {
75656 spin_unlock_irqrestore(&self->spinlock, flags);
75657
75658 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
75659 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75660 tty->closing = 0;
75661 self->tty = NULL;
75662
75663 - if (self->blocked_open) {
75664 + if (local_read(&self->blocked_open)) {
75665 if (self->close_delay)
75666 schedule_timeout_interruptible(self->close_delay);
75667 wake_up_interruptible(&self->open_wait);
75668 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
75669 spin_lock_irqsave(&self->spinlock, flags);
75670 self->flags &= ~ASYNC_NORMAL_ACTIVE;
75671 self->tty = NULL;
75672 - self->open_count = 0;
75673 + local_set(&self->open_count, 0);
75674 spin_unlock_irqrestore(&self->spinlock, flags);
75675
75676 wake_up_interruptible(&self->open_wait);
75677 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
75678 seq_putc(m, '\n');
75679
75680 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
75681 - seq_printf(m, "Open count: %d\n", self->open_count);
75682 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
75683 seq_printf(m, "Max data size: %d\n", self->max_data_size);
75684 seq_printf(m, "Max header size: %d\n", self->max_header_size);
75685
75686 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
75687 index 274d150..656a144 100644
75688 --- a/net/iucv/af_iucv.c
75689 +++ b/net/iucv/af_iucv.c
75690 @@ -787,10 +787,10 @@ static int iucv_sock_autobind(struct sock *sk)
75691
75692 write_lock_bh(&iucv_sk_list.lock);
75693
75694 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
75695 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
75696 while (__iucv_get_sock_by_name(name)) {
75697 sprintf(name, "%08x",
75698 - atomic_inc_return(&iucv_sk_list.autobind_name));
75699 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
75700 }
75701
75702 write_unlock_bh(&iucv_sk_list.lock);
75703 diff --git a/net/key/af_key.c b/net/key/af_key.c
75704 index 1e733e9..3d73c9f 100644
75705 --- a/net/key/af_key.c
75706 +++ b/net/key/af_key.c
75707 @@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
75708 static u32 get_acqseq(void)
75709 {
75710 u32 res;
75711 - static atomic_t acqseq;
75712 + static atomic_unchecked_t acqseq;
75713
75714 do {
75715 - res = atomic_inc_return(&acqseq);
75716 + res = atomic_inc_return_unchecked(&acqseq);
75717 } while (!res);
75718 return res;
75719 }
75720 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
75721 index 73495f1..ad51356 100644
75722 --- a/net/mac80211/ieee80211_i.h
75723 +++ b/net/mac80211/ieee80211_i.h
75724 @@ -27,6 +27,7 @@
75725 #include <net/ieee80211_radiotap.h>
75726 #include <net/cfg80211.h>
75727 #include <net/mac80211.h>
75728 +#include <asm/local.h>
75729 #include "key.h"
75730 #include "sta_info.h"
75731
75732 @@ -764,7 +765,7 @@ struct ieee80211_local {
75733 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
75734 spinlock_t queue_stop_reason_lock;
75735
75736 - int open_count;
75737 + local_t open_count;
75738 int monitors, cooked_mntrs;
75739 /* number of interfaces with corresponding FIF_ flags */
75740 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
75741 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
75742 index 30d7355..e260095 100644
75743 --- a/net/mac80211/iface.c
75744 +++ b/net/mac80211/iface.c
75745 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75746 break;
75747 }
75748
75749 - if (local->open_count == 0) {
75750 + if (local_read(&local->open_count) == 0) {
75751 res = drv_start(local);
75752 if (res)
75753 goto err_del_bss;
75754 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75755 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
75756
75757 if (!is_valid_ether_addr(dev->dev_addr)) {
75758 - if (!local->open_count)
75759 + if (!local_read(&local->open_count))
75760 drv_stop(local);
75761 return -EADDRNOTAVAIL;
75762 }
75763 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75764 mutex_unlock(&local->mtx);
75765
75766 if (coming_up)
75767 - local->open_count++;
75768 + local_inc(&local->open_count);
75769
75770 if (hw_reconf_flags) {
75771 ieee80211_hw_config(local, hw_reconf_flags);
75772 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
75773 err_del_interface:
75774 drv_remove_interface(local, &sdata->vif);
75775 err_stop:
75776 - if (!local->open_count)
75777 + if (!local_read(&local->open_count))
75778 drv_stop(local);
75779 err_del_bss:
75780 sdata->bss = NULL;
75781 @@ -472,7 +472,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
75782 }
75783
75784 if (going_down)
75785 - local->open_count--;
75786 + local_dec(&local->open_count);
75787
75788 switch (sdata->vif.type) {
75789 case NL80211_IFTYPE_AP_VLAN:
75790 @@ -531,7 +531,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
75791
75792 ieee80211_recalc_ps(local, -1);
75793
75794 - if (local->open_count == 0) {
75795 + if (local_read(&local->open_count) == 0) {
75796 if (local->ops->napi_poll)
75797 napi_disable(&local->napi);
75798 ieee80211_clear_tx_pending(local);
75799 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
75800 index 7d9b21d..0687004 100644
75801 --- a/net/mac80211/main.c
75802 +++ b/net/mac80211/main.c
75803 @@ -163,7 +163,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
75804 local->hw.conf.power_level = power;
75805 }
75806
75807 - if (changed && local->open_count) {
75808 + if (changed && local_read(&local->open_count)) {
75809 ret = drv_config(local, changed);
75810 /*
75811 * Goal:
75812 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
75813 index 9ee7164..56c5061 100644
75814 --- a/net/mac80211/pm.c
75815 +++ b/net/mac80211/pm.c
75816 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75817 struct ieee80211_sub_if_data *sdata;
75818 struct sta_info *sta;
75819
75820 - if (!local->open_count)
75821 + if (!local_read(&local->open_count))
75822 goto suspend;
75823
75824 ieee80211_scan_cancel(local);
75825 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75826 cancel_work_sync(&local->dynamic_ps_enable_work);
75827 del_timer_sync(&local->dynamic_ps_timer);
75828
75829 - local->wowlan = wowlan && local->open_count;
75830 + local->wowlan = wowlan && local_read(&local->open_count);
75831 if (local->wowlan) {
75832 int err = drv_suspend(local, wowlan);
75833 if (err < 0) {
75834 @@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
75835 }
75836
75837 /* stop hardware - this must stop RX */
75838 - if (local->open_count)
75839 + if (local_read(&local->open_count))
75840 ieee80211_stop_device(local);
75841
75842 suspend:
75843 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
75844 index 7d84b87..6a69cd9 100644
75845 --- a/net/mac80211/rate.c
75846 +++ b/net/mac80211/rate.c
75847 @@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
75848
75849 ASSERT_RTNL();
75850
75851 - if (local->open_count)
75852 + if (local_read(&local->open_count))
75853 return -EBUSY;
75854
75855 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
75856 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
75857 index c97a065..ff61928 100644
75858 --- a/net/mac80211/rc80211_pid_debugfs.c
75859 +++ b/net/mac80211/rc80211_pid_debugfs.c
75860 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
75861
75862 spin_unlock_irqrestore(&events->lock, status);
75863
75864 - if (copy_to_user(buf, pb, p))
75865 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
75866 return -EFAULT;
75867
75868 return p;
75869 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
75870 index d5230ec..c604b21 100644
75871 --- a/net/mac80211/util.c
75872 +++ b/net/mac80211/util.c
75873 @@ -1000,7 +1000,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
75874 drv_set_coverage_class(local, hw->wiphy->coverage_class);
75875
75876 /* everything else happens only if HW was up & running */
75877 - if (!local->open_count)
75878 + if (!local_read(&local->open_count))
75879 goto wake_up;
75880
75881 /*
75882 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
75883 index d5597b7..ab6d39c 100644
75884 --- a/net/netfilter/Kconfig
75885 +++ b/net/netfilter/Kconfig
75886 @@ -779,6 +779,16 @@ config NETFILTER_XT_MATCH_ESP
75887
75888 To compile it as a module, choose M here. If unsure, say N.
75889
75890 +config NETFILTER_XT_MATCH_GRADM
75891 + tristate '"gradm" match support'
75892 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
75893 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
75894 + ---help---
75895 + The gradm match allows to match on grsecurity RBAC being enabled.
75896 + It is useful when iptables rules are applied early on bootup to
75897 + prevent connections to the machine (except from a trusted host)
75898 + while the RBAC system is disabled.
75899 +
75900 config NETFILTER_XT_MATCH_HASHLIMIT
75901 tristate '"hashlimit" match support'
75902 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
75903 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
75904 index 1a02853..5d8c22e 100644
75905 --- a/net/netfilter/Makefile
75906 +++ b/net/netfilter/Makefile
75907 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
75908 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
75909 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
75910 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
75911 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
75912 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
75913 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
75914 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
75915 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
75916 index 29fa5ba..8debc79 100644
75917 --- a/net/netfilter/ipvs/ip_vs_conn.c
75918 +++ b/net/netfilter/ipvs/ip_vs_conn.c
75919 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
75920 /* Increase the refcnt counter of the dest */
75921 atomic_inc(&dest->refcnt);
75922
75923 - conn_flags = atomic_read(&dest->conn_flags);
75924 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
75925 if (cp->protocol != IPPROTO_UDP)
75926 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
75927 /* Bind with the destination and its corresponding transmitter */
75928 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
75929 atomic_set(&cp->refcnt, 1);
75930
75931 atomic_set(&cp->n_control, 0);
75932 - atomic_set(&cp->in_pkts, 0);
75933 + atomic_set_unchecked(&cp->in_pkts, 0);
75934
75935 atomic_inc(&ipvs->conn_count);
75936 if (flags & IP_VS_CONN_F_NO_CPORT)
75937 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
75938
75939 /* Don't drop the entry if its number of incoming packets is not
75940 located in [0, 8] */
75941 - i = atomic_read(&cp->in_pkts);
75942 + i = atomic_read_unchecked(&cp->in_pkts);
75943 if (i > 8 || i < 0) return 0;
75944
75945 if (!todrop_rate[i]) return 0;
75946 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
75947 index 6dc7d7d..e45913a 100644
75948 --- a/net/netfilter/ipvs/ip_vs_core.c
75949 +++ b/net/netfilter/ipvs/ip_vs_core.c
75950 @@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
75951 ret = cp->packet_xmit(skb, cp, pd->pp);
75952 /* do not touch skb anymore */
75953
75954 - atomic_inc(&cp->in_pkts);
75955 + atomic_inc_unchecked(&cp->in_pkts);
75956 ip_vs_conn_put(cp);
75957 return ret;
75958 }
75959 @@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
75960 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
75961 pkts = sysctl_sync_threshold(ipvs);
75962 else
75963 - pkts = atomic_add_return(1, &cp->in_pkts);
75964 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
75965
75966 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
75967 cp->protocol == IPPROTO_SCTP) {
75968 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
75969 index e1a66cf..0910076 100644
75970 --- a/net/netfilter/ipvs/ip_vs_ctl.c
75971 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
75972 @@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
75973 ip_vs_rs_hash(ipvs, dest);
75974 write_unlock_bh(&ipvs->rs_lock);
75975 }
75976 - atomic_set(&dest->conn_flags, conn_flags);
75977 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
75978
75979 /* bind the service */
75980 if (!dest->svc) {
75981 @@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
75982 " %-7s %-6d %-10d %-10d\n",
75983 &dest->addr.in6,
75984 ntohs(dest->port),
75985 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
75986 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
75987 atomic_read(&dest->weight),
75988 atomic_read(&dest->activeconns),
75989 atomic_read(&dest->inactconns));
75990 @@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
75991 "%-7s %-6d %-10d %-10d\n",
75992 ntohl(dest->addr.ip),
75993 ntohs(dest->port),
75994 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
75995 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
75996 atomic_read(&dest->weight),
75997 atomic_read(&dest->activeconns),
75998 atomic_read(&dest->inactconns));
75999 @@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
76000
76001 entry.addr = dest->addr.ip;
76002 entry.port = dest->port;
76003 - entry.conn_flags = atomic_read(&dest->conn_flags);
76004 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
76005 entry.weight = atomic_read(&dest->weight);
76006 entry.u_threshold = dest->u_threshold;
76007 entry.l_threshold = dest->l_threshold;
76008 @@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
76009 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
76010
76011 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
76012 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
76013 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
76014 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
76015 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
76016 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
76017 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
76018 index 2b6678c0..aaa41fc 100644
76019 --- a/net/netfilter/ipvs/ip_vs_sync.c
76020 +++ b/net/netfilter/ipvs/ip_vs_sync.c
76021 @@ -649,7 +649,7 @@ control:
76022 * i.e only increment in_pkts for Templates.
76023 */
76024 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
76025 - int pkts = atomic_add_return(1, &cp->in_pkts);
76026 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
76027
76028 if (pkts % sysctl_sync_period(ipvs) != 1)
76029 return;
76030 @@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
76031
76032 if (opt)
76033 memcpy(&cp->in_seq, opt, sizeof(*opt));
76034 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
76035 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
76036 cp->state = state;
76037 cp->old_state = cp->state;
76038 /*
76039 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
76040 index aa2d720..d8aa111 100644
76041 --- a/net/netfilter/ipvs/ip_vs_xmit.c
76042 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
76043 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
76044 else
76045 rc = NF_ACCEPT;
76046 /* do not touch skb anymore */
76047 - atomic_inc(&cp->in_pkts);
76048 + atomic_inc_unchecked(&cp->in_pkts);
76049 goto out;
76050 }
76051
76052 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
76053 else
76054 rc = NF_ACCEPT;
76055 /* do not touch skb anymore */
76056 - atomic_inc(&cp->in_pkts);
76057 + atomic_inc_unchecked(&cp->in_pkts);
76058 goto out;
76059 }
76060
76061 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
76062 index 66b2c54..c7884e3 100644
76063 --- a/net/netfilter/nfnetlink_log.c
76064 +++ b/net/netfilter/nfnetlink_log.c
76065 @@ -70,7 +70,7 @@ struct nfulnl_instance {
76066 };
76067
76068 static DEFINE_SPINLOCK(instances_lock);
76069 -static atomic_t global_seq;
76070 +static atomic_unchecked_t global_seq;
76071
76072 #define INSTANCE_BUCKETS 16
76073 static struct hlist_head instance_table[INSTANCE_BUCKETS];
76074 @@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
76075 /* global sequence number */
76076 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
76077 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
76078 - htonl(atomic_inc_return(&global_seq)));
76079 + htonl(atomic_inc_return_unchecked(&global_seq)));
76080
76081 if (data_len) {
76082 struct nlattr *nla;
76083 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
76084 new file mode 100644
76085 index 0000000..6905327
76086 --- /dev/null
76087 +++ b/net/netfilter/xt_gradm.c
76088 @@ -0,0 +1,51 @@
76089 +/*
76090 + * gradm match for netfilter
76091 + * Copyright © Zbigniew Krzystolik, 2010
76092 + *
76093 + * This program is free software; you can redistribute it and/or modify
76094 + * it under the terms of the GNU General Public License; either version
76095 + * 2 or 3 as published by the Free Software Foundation.
76096 + */
76097 +#include <linux/module.h>
76098 +#include <linux/moduleparam.h>
76099 +#include <linux/skbuff.h>
76100 +#include <linux/netfilter/x_tables.h>
76101 +#include <linux/grsecurity.h>
76102 +#include <linux/netfilter/xt_gradm.h>
76103 +
76104 +static bool
76105 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
76106 +{
76107 + const struct xt_gradm_mtinfo *info = par->matchinfo;
76108 + bool retval = false;
76109 + if (gr_acl_is_enabled())
76110 + retval = true;
76111 + return retval ^ info->invflags;
76112 +}
76113 +
76114 +static struct xt_match gradm_mt_reg __read_mostly = {
76115 + .name = "gradm",
76116 + .revision = 0,
76117 + .family = NFPROTO_UNSPEC,
76118 + .match = gradm_mt,
76119 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
76120 + .me = THIS_MODULE,
76121 +};
76122 +
76123 +static int __init gradm_mt_init(void)
76124 +{
76125 + return xt_register_match(&gradm_mt_reg);
76126 +}
76127 +
76128 +static void __exit gradm_mt_exit(void)
76129 +{
76130 + xt_unregister_match(&gradm_mt_reg);
76131 +}
76132 +
76133 +module_init(gradm_mt_init);
76134 +module_exit(gradm_mt_exit);
76135 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
76136 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
76137 +MODULE_LICENSE("GPL");
76138 +MODULE_ALIAS("ipt_gradm");
76139 +MODULE_ALIAS("ip6t_gradm");
76140 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
76141 index 4fe4fb4..87a89e5 100644
76142 --- a/net/netfilter/xt_statistic.c
76143 +++ b/net/netfilter/xt_statistic.c
76144 @@ -19,7 +19,7 @@
76145 #include <linux/module.h>
76146
76147 struct xt_statistic_priv {
76148 - atomic_t count;
76149 + atomic_unchecked_t count;
76150 } ____cacheline_aligned_in_smp;
76151
76152 MODULE_LICENSE("GPL");
76153 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
76154 break;
76155 case XT_STATISTIC_MODE_NTH:
76156 do {
76157 - oval = atomic_read(&info->master->count);
76158 + oval = atomic_read_unchecked(&info->master->count);
76159 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
76160 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
76161 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
76162 if (nval == 0)
76163 ret = !ret;
76164 break;
76165 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
76166 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
76167 if (info->master == NULL)
76168 return -ENOMEM;
76169 - atomic_set(&info->master->count, info->u.nth.count);
76170 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
76171
76172 return 0;
76173 }
76174 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
76175 index 1201b6d..bcff8c6 100644
76176 --- a/net/netlink/af_netlink.c
76177 +++ b/net/netlink/af_netlink.c
76178 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock *sk)
76179 sk->sk_error_report(sk);
76180 }
76181 }
76182 - atomic_inc(&sk->sk_drops);
76183 + atomic_inc_unchecked(&sk->sk_drops);
76184 }
76185
76186 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
76187 @@ -1999,7 +1999,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
76188 sk_wmem_alloc_get(s),
76189 nlk->cb,
76190 atomic_read(&s->sk_refcnt),
76191 - atomic_read(&s->sk_drops),
76192 + atomic_read_unchecked(&s->sk_drops),
76193 sock_i_ino(s)
76194 );
76195
76196 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
76197 index 732152f..60bb09e 100644
76198 --- a/net/netrom/af_netrom.c
76199 +++ b/net/netrom/af_netrom.c
76200 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
76201 struct sock *sk = sock->sk;
76202 struct nr_sock *nr = nr_sk(sk);
76203
76204 + memset(sax, 0, sizeof(*sax));
76205 lock_sock(sk);
76206 if (peer != 0) {
76207 if (sk->sk_state != TCP_ESTABLISHED) {
76208 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
76209 *uaddr_len = sizeof(struct full_sockaddr_ax25);
76210 } else {
76211 sax->fsa_ax25.sax25_family = AF_NETROM;
76212 - sax->fsa_ax25.sax25_ndigis = 0;
76213 sax->fsa_ax25.sax25_call = nr->source_addr;
76214 *uaddr_len = sizeof(struct sockaddr_ax25);
76215 }
76216 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
76217 index d9d4970..d5a6a68 100644
76218 --- a/net/packet/af_packet.c
76219 +++ b/net/packet/af_packet.c
76220 @@ -1675,7 +1675,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
76221
76222 spin_lock(&sk->sk_receive_queue.lock);
76223 po->stats.tp_packets++;
76224 - skb->dropcount = atomic_read(&sk->sk_drops);
76225 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
76226 __skb_queue_tail(&sk->sk_receive_queue, skb);
76227 spin_unlock(&sk->sk_receive_queue.lock);
76228 sk->sk_data_ready(sk, skb->len);
76229 @@ -1684,7 +1684,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
76230 drop_n_acct:
76231 spin_lock(&sk->sk_receive_queue.lock);
76232 po->stats.tp_drops++;
76233 - atomic_inc(&sk->sk_drops);
76234 + atomic_inc_unchecked(&sk->sk_drops);
76235 spin_unlock(&sk->sk_receive_queue.lock);
76236
76237 drop_n_restore:
76238 @@ -3266,7 +3266,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
76239 case PACKET_HDRLEN:
76240 if (len > sizeof(int))
76241 len = sizeof(int);
76242 - if (copy_from_user(&val, optval, len))
76243 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
76244 return -EFAULT;
76245 switch (val) {
76246 case TPACKET_V1:
76247 @@ -3316,7 +3316,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
76248
76249 if (put_user(len, optlen))
76250 return -EFAULT;
76251 - if (copy_to_user(optval, data, len))
76252 + if (len > sizeof(st) || copy_to_user(optval, data, len))
76253 return -EFAULT;
76254 return 0;
76255 }
76256 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
76257 index d65f699..05aa6ce 100644
76258 --- a/net/phonet/af_phonet.c
76259 +++ b/net/phonet/af_phonet.c
76260 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
76261 {
76262 struct phonet_protocol *pp;
76263
76264 - if (protocol >= PHONET_NPROTO)
76265 + if (protocol < 0 || protocol >= PHONET_NPROTO)
76266 return NULL;
76267
76268 rcu_read_lock();
76269 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
76270 {
76271 int err = 0;
76272
76273 - if (protocol >= PHONET_NPROTO)
76274 + if (protocol < 0 || protocol >= PHONET_NPROTO)
76275 return -EINVAL;
76276
76277 err = proto_register(pp->prot, 1);
76278 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
76279 index 2ba6e9f..409573f 100644
76280 --- a/net/phonet/pep.c
76281 +++ b/net/phonet/pep.c
76282 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
76283
76284 case PNS_PEP_CTRL_REQ:
76285 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
76286 - atomic_inc(&sk->sk_drops);
76287 + atomic_inc_unchecked(&sk->sk_drops);
76288 break;
76289 }
76290 __skb_pull(skb, 4);
76291 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
76292 }
76293
76294 if (pn->rx_credits == 0) {
76295 - atomic_inc(&sk->sk_drops);
76296 + atomic_inc_unchecked(&sk->sk_drops);
76297 err = -ENOBUFS;
76298 break;
76299 }
76300 @@ -557,7 +557,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
76301 }
76302
76303 if (pn->rx_credits == 0) {
76304 - atomic_inc(&sk->sk_drops);
76305 + atomic_inc_unchecked(&sk->sk_drops);
76306 err = NET_RX_DROP;
76307 break;
76308 }
76309 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
76310 index 4c7eff3..59c727f 100644
76311 --- a/net/phonet/socket.c
76312 +++ b/net/phonet/socket.c
76313 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
76314 pn->resource, sk->sk_state,
76315 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
76316 sock_i_uid(sk), sock_i_ino(sk),
76317 - atomic_read(&sk->sk_refcnt), sk,
76318 - atomic_read(&sk->sk_drops), &len);
76319 + atomic_read(&sk->sk_refcnt),
76320 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76321 + NULL,
76322 +#else
76323 + sk,
76324 +#endif
76325 + atomic_read_unchecked(&sk->sk_drops), &len);
76326 }
76327 seq_printf(seq, "%*s\n", 127 - len, "");
76328 return 0;
76329 diff --git a/net/rds/cong.c b/net/rds/cong.c
76330 index e5b65ac..f3b6fb7 100644
76331 --- a/net/rds/cong.c
76332 +++ b/net/rds/cong.c
76333 @@ -78,7 +78,7 @@
76334 * finds that the saved generation number is smaller than the global generation
76335 * number, it wakes up the process.
76336 */
76337 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
76338 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
76339
76340 /*
76341 * Congestion monitoring
76342 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
76343 rdsdebug("waking map %p for %pI4\n",
76344 map, &map->m_addr);
76345 rds_stats_inc(s_cong_update_received);
76346 - atomic_inc(&rds_cong_generation);
76347 + atomic_inc_unchecked(&rds_cong_generation);
76348 if (waitqueue_active(&map->m_waitq))
76349 wake_up(&map->m_waitq);
76350 if (waitqueue_active(&rds_poll_waitq))
76351 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
76352
76353 int rds_cong_updated_since(unsigned long *recent)
76354 {
76355 - unsigned long gen = atomic_read(&rds_cong_generation);
76356 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
76357
76358 if (likely(*recent == gen))
76359 return 0;
76360 diff --git a/net/rds/ib.h b/net/rds/ib.h
76361 index edfaaaf..8c89879 100644
76362 --- a/net/rds/ib.h
76363 +++ b/net/rds/ib.h
76364 @@ -128,7 +128,7 @@ struct rds_ib_connection {
76365 /* sending acks */
76366 unsigned long i_ack_flags;
76367 #ifdef KERNEL_HAS_ATOMIC64
76368 - atomic64_t i_ack_next; /* next ACK to send */
76369 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
76370 #else
76371 spinlock_t i_ack_lock; /* protect i_ack_next */
76372 u64 i_ack_next; /* next ACK to send */
76373 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
76374 index 51c8689..36c555f 100644
76375 --- a/net/rds/ib_cm.c
76376 +++ b/net/rds/ib_cm.c
76377 @@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
76378 /* Clear the ACK state */
76379 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
76380 #ifdef KERNEL_HAS_ATOMIC64
76381 - atomic64_set(&ic->i_ack_next, 0);
76382 + atomic64_set_unchecked(&ic->i_ack_next, 0);
76383 #else
76384 ic->i_ack_next = 0;
76385 #endif
76386 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
76387 index e29e0ca..fa3a6a3 100644
76388 --- a/net/rds/ib_recv.c
76389 +++ b/net/rds/ib_recv.c
76390 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
76391 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
76392 int ack_required)
76393 {
76394 - atomic64_set(&ic->i_ack_next, seq);
76395 + atomic64_set_unchecked(&ic->i_ack_next, seq);
76396 if (ack_required) {
76397 smp_mb__before_clear_bit();
76398 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76399 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
76400 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76401 smp_mb__after_clear_bit();
76402
76403 - return atomic64_read(&ic->i_ack_next);
76404 + return atomic64_read_unchecked(&ic->i_ack_next);
76405 }
76406 #endif
76407
76408 diff --git a/net/rds/iw.h b/net/rds/iw.h
76409 index 04ce3b1..48119a6 100644
76410 --- a/net/rds/iw.h
76411 +++ b/net/rds/iw.h
76412 @@ -134,7 +134,7 @@ struct rds_iw_connection {
76413 /* sending acks */
76414 unsigned long i_ack_flags;
76415 #ifdef KERNEL_HAS_ATOMIC64
76416 - atomic64_t i_ack_next; /* next ACK to send */
76417 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
76418 #else
76419 spinlock_t i_ack_lock; /* protect i_ack_next */
76420 u64 i_ack_next; /* next ACK to send */
76421 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
76422 index 9556d28..f046d0e 100644
76423 --- a/net/rds/iw_cm.c
76424 +++ b/net/rds/iw_cm.c
76425 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
76426 /* Clear the ACK state */
76427 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
76428 #ifdef KERNEL_HAS_ATOMIC64
76429 - atomic64_set(&ic->i_ack_next, 0);
76430 + atomic64_set_unchecked(&ic->i_ack_next, 0);
76431 #else
76432 ic->i_ack_next = 0;
76433 #endif
76434 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
76435 index 5e57347..3916042 100644
76436 --- a/net/rds/iw_recv.c
76437 +++ b/net/rds/iw_recv.c
76438 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
76439 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
76440 int ack_required)
76441 {
76442 - atomic64_set(&ic->i_ack_next, seq);
76443 + atomic64_set_unchecked(&ic->i_ack_next, seq);
76444 if (ack_required) {
76445 smp_mb__before_clear_bit();
76446 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76447 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
76448 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76449 smp_mb__after_clear_bit();
76450
76451 - return atomic64_read(&ic->i_ack_next);
76452 + return atomic64_read_unchecked(&ic->i_ack_next);
76453 }
76454 #endif
76455
76456 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
76457 index edac9ef..16bcb98 100644
76458 --- a/net/rds/tcp.c
76459 +++ b/net/rds/tcp.c
76460 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
76461 int val = 1;
76462
76463 set_fs(KERNEL_DS);
76464 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
76465 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
76466 sizeof(val));
76467 set_fs(oldfs);
76468 }
76469 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
76470 index 1b4fd68..2234175 100644
76471 --- a/net/rds/tcp_send.c
76472 +++ b/net/rds/tcp_send.c
76473 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
76474
76475 oldfs = get_fs();
76476 set_fs(KERNEL_DS);
76477 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
76478 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
76479 sizeof(val));
76480 set_fs(oldfs);
76481 }
76482 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
76483 index 74c064c..fdec26f 100644
76484 --- a/net/rxrpc/af_rxrpc.c
76485 +++ b/net/rxrpc/af_rxrpc.c
76486 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
76487 __be32 rxrpc_epoch;
76488
76489 /* current debugging ID */
76490 -atomic_t rxrpc_debug_id;
76491 +atomic_unchecked_t rxrpc_debug_id;
76492
76493 /* count of skbs currently in use */
76494 atomic_t rxrpc_n_skbs;
76495 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
76496 index f99cfce..cc529dd 100644
76497 --- a/net/rxrpc/ar-ack.c
76498 +++ b/net/rxrpc/ar-ack.c
76499 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
76500
76501 _enter("{%d,%d,%d,%d},",
76502 call->acks_hard, call->acks_unacked,
76503 - atomic_read(&call->sequence),
76504 + atomic_read_unchecked(&call->sequence),
76505 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
76506
76507 stop = 0;
76508 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
76509
76510 /* each Tx packet has a new serial number */
76511 sp->hdr.serial =
76512 - htonl(atomic_inc_return(&call->conn->serial));
76513 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
76514
76515 hdr = (struct rxrpc_header *) txb->head;
76516 hdr->serial = sp->hdr.serial;
76517 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
76518 */
76519 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
76520 {
76521 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
76522 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
76523 }
76524
76525 /*
76526 @@ -629,7 +629,7 @@ process_further:
76527
76528 latest = ntohl(sp->hdr.serial);
76529 hard = ntohl(ack.firstPacket);
76530 - tx = atomic_read(&call->sequence);
76531 + tx = atomic_read_unchecked(&call->sequence);
76532
76533 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
76534 latest,
76535 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
76536 goto maybe_reschedule;
76537
76538 send_ACK_with_skew:
76539 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
76540 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
76541 ntohl(ack.serial));
76542 send_ACK:
76543 mtu = call->conn->trans->peer->if_mtu;
76544 @@ -1173,7 +1173,7 @@ send_ACK:
76545 ackinfo.rxMTU = htonl(5692);
76546 ackinfo.jumbo_max = htonl(4);
76547
76548 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
76549 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
76550 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
76551 ntohl(hdr.serial),
76552 ntohs(ack.maxSkew),
76553 @@ -1191,7 +1191,7 @@ send_ACK:
76554 send_message:
76555 _debug("send message");
76556
76557 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
76558 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
76559 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
76560 send_message_2:
76561
76562 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
76563 index bf656c2..48f9d27 100644
76564 --- a/net/rxrpc/ar-call.c
76565 +++ b/net/rxrpc/ar-call.c
76566 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
76567 spin_lock_init(&call->lock);
76568 rwlock_init(&call->state_lock);
76569 atomic_set(&call->usage, 1);
76570 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
76571 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76572 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
76573
76574 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
76575 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
76576 index 4106ca9..a338d7a 100644
76577 --- a/net/rxrpc/ar-connection.c
76578 +++ b/net/rxrpc/ar-connection.c
76579 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
76580 rwlock_init(&conn->lock);
76581 spin_lock_init(&conn->state_lock);
76582 atomic_set(&conn->usage, 1);
76583 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
76584 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76585 conn->avail_calls = RXRPC_MAXCALLS;
76586 conn->size_align = 4;
76587 conn->header_size = sizeof(struct rxrpc_header);
76588 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
76589 index e7ed43a..6afa140 100644
76590 --- a/net/rxrpc/ar-connevent.c
76591 +++ b/net/rxrpc/ar-connevent.c
76592 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
76593
76594 len = iov[0].iov_len + iov[1].iov_len;
76595
76596 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
76597 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
76598 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
76599
76600 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
76601 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
76602 index 1a2b0633..e8d1382 100644
76603 --- a/net/rxrpc/ar-input.c
76604 +++ b/net/rxrpc/ar-input.c
76605 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
76606 /* track the latest serial number on this connection for ACK packet
76607 * information */
76608 serial = ntohl(sp->hdr.serial);
76609 - hi_serial = atomic_read(&call->conn->hi_serial);
76610 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
76611 while (serial > hi_serial)
76612 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
76613 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
76614 serial);
76615
76616 /* request ACK generation for any ACK or DATA packet that requests
76617 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
76618 index 8e22bd3..f66d1c0 100644
76619 --- a/net/rxrpc/ar-internal.h
76620 +++ b/net/rxrpc/ar-internal.h
76621 @@ -272,8 +272,8 @@ struct rxrpc_connection {
76622 int error; /* error code for local abort */
76623 int debug_id; /* debug ID for printks */
76624 unsigned call_counter; /* call ID counter */
76625 - atomic_t serial; /* packet serial number counter */
76626 - atomic_t hi_serial; /* highest serial number received */
76627 + atomic_unchecked_t serial; /* packet serial number counter */
76628 + atomic_unchecked_t hi_serial; /* highest serial number received */
76629 u8 avail_calls; /* number of calls available */
76630 u8 size_align; /* data size alignment (for security) */
76631 u8 header_size; /* rxrpc + security header size */
76632 @@ -346,7 +346,7 @@ struct rxrpc_call {
76633 spinlock_t lock;
76634 rwlock_t state_lock; /* lock for state transition */
76635 atomic_t usage;
76636 - atomic_t sequence; /* Tx data packet sequence counter */
76637 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
76638 u32 abort_code; /* local/remote abort code */
76639 enum { /* current state of call */
76640 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
76641 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
76642 */
76643 extern atomic_t rxrpc_n_skbs;
76644 extern __be32 rxrpc_epoch;
76645 -extern atomic_t rxrpc_debug_id;
76646 +extern atomic_unchecked_t rxrpc_debug_id;
76647 extern struct workqueue_struct *rxrpc_workqueue;
76648
76649 /*
76650 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
76651 index 87f7135..74d3703 100644
76652 --- a/net/rxrpc/ar-local.c
76653 +++ b/net/rxrpc/ar-local.c
76654 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
76655 spin_lock_init(&local->lock);
76656 rwlock_init(&local->services_lock);
76657 atomic_set(&local->usage, 1);
76658 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
76659 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76660 memcpy(&local->srx, srx, sizeof(*srx));
76661 }
76662
76663 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
76664 index 338d793..47391d0 100644
76665 --- a/net/rxrpc/ar-output.c
76666 +++ b/net/rxrpc/ar-output.c
76667 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
76668 sp->hdr.cid = call->cid;
76669 sp->hdr.callNumber = call->call_id;
76670 sp->hdr.seq =
76671 - htonl(atomic_inc_return(&call->sequence));
76672 + htonl(atomic_inc_return_unchecked(&call->sequence));
76673 sp->hdr.serial =
76674 - htonl(atomic_inc_return(&conn->serial));
76675 + htonl(atomic_inc_return_unchecked(&conn->serial));
76676 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
76677 sp->hdr.userStatus = 0;
76678 sp->hdr.securityIndex = conn->security_ix;
76679 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
76680 index 2754f09..b20e38f 100644
76681 --- a/net/rxrpc/ar-peer.c
76682 +++ b/net/rxrpc/ar-peer.c
76683 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
76684 INIT_LIST_HEAD(&peer->error_targets);
76685 spin_lock_init(&peer->lock);
76686 atomic_set(&peer->usage, 1);
76687 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
76688 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76689 memcpy(&peer->srx, srx, sizeof(*srx));
76690
76691 rxrpc_assess_MTU_size(peer);
76692 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
76693 index 38047f7..9f48511 100644
76694 --- a/net/rxrpc/ar-proc.c
76695 +++ b/net/rxrpc/ar-proc.c
76696 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
76697 atomic_read(&conn->usage),
76698 rxrpc_conn_states[conn->state],
76699 key_serial(conn->key),
76700 - atomic_read(&conn->serial),
76701 - atomic_read(&conn->hi_serial));
76702 + atomic_read_unchecked(&conn->serial),
76703 + atomic_read_unchecked(&conn->hi_serial));
76704
76705 return 0;
76706 }
76707 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
76708 index 92df566..87ec1bf 100644
76709 --- a/net/rxrpc/ar-transport.c
76710 +++ b/net/rxrpc/ar-transport.c
76711 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
76712 spin_lock_init(&trans->client_lock);
76713 rwlock_init(&trans->conn_lock);
76714 atomic_set(&trans->usage, 1);
76715 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
76716 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
76717
76718 if (peer->srx.transport.family == AF_INET) {
76719 switch (peer->srx.transport_type) {
76720 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
76721 index 7635107..4670276 100644
76722 --- a/net/rxrpc/rxkad.c
76723 +++ b/net/rxrpc/rxkad.c
76724 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
76725
76726 len = iov[0].iov_len + iov[1].iov_len;
76727
76728 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
76729 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
76730 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
76731
76732 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
76733 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
76734
76735 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
76736
76737 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
76738 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
76739 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
76740
76741 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
76742 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
76743 index 1e2eee8..ce3967e 100644
76744 --- a/net/sctp/proc.c
76745 +++ b/net/sctp/proc.c
76746 @@ -319,7 +319,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
76747 seq_printf(seq,
76748 "%8pK %8pK %-3d %-3d %-2d %-4d "
76749 "%4d %8d %8d %7d %5lu %-5d %5d ",
76750 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
76751 + assoc, sk,
76752 + sctp_sk(sk)->type, sk->sk_state,
76753 assoc->state, hash,
76754 assoc->assoc_id,
76755 assoc->sndbuf_used,
76756 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
76757 index 54a7cd2..944edae 100644
76758 --- a/net/sctp/socket.c
76759 +++ b/net/sctp/socket.c
76760 @@ -4574,7 +4574,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
76761 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
76762 if (space_left < addrlen)
76763 return -ENOMEM;
76764 - if (copy_to_user(to, &temp, addrlen))
76765 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
76766 return -EFAULT;
76767 to += addrlen;
76768 cnt++;
76769 diff --git a/net/socket.c b/net/socket.c
76770 index 2dce67a..1e91168 100644
76771 --- a/net/socket.c
76772 +++ b/net/socket.c
76773 @@ -88,6 +88,7 @@
76774 #include <linux/nsproxy.h>
76775 #include <linux/magic.h>
76776 #include <linux/slab.h>
76777 +#include <linux/in.h>
76778
76779 #include <asm/uaccess.h>
76780 #include <asm/unistd.h>
76781 @@ -105,6 +106,8 @@
76782 #include <linux/sockios.h>
76783 #include <linux/atalk.h>
76784
76785 +#include <linux/grsock.h>
76786 +
76787 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
76788 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
76789 unsigned long nr_segs, loff_t pos);
76790 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
76791 &sockfs_dentry_operations, SOCKFS_MAGIC);
76792 }
76793
76794 -static struct vfsmount *sock_mnt __read_mostly;
76795 +struct vfsmount *sock_mnt __read_mostly;
76796
76797 static struct file_system_type sock_fs_type = {
76798 .name = "sockfs",
76799 @@ -1187,6 +1190,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
76800 return -EAFNOSUPPORT;
76801 if (type < 0 || type >= SOCK_MAX)
76802 return -EINVAL;
76803 + if (protocol < 0)
76804 + return -EINVAL;
76805
76806 /* Compatibility.
76807
76808 @@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
76809 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
76810 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
76811
76812 + if(!gr_search_socket(family, type, protocol)) {
76813 + retval = -EACCES;
76814 + goto out;
76815 + }
76816 +
76817 + if (gr_handle_sock_all(family, type, protocol)) {
76818 + retval = -EACCES;
76819 + goto out;
76820 + }
76821 +
76822 retval = sock_create(family, type, protocol, &sock);
76823 if (retval < 0)
76824 goto out;
76825 @@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
76826 if (sock) {
76827 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
76828 if (err >= 0) {
76829 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
76830 + err = -EACCES;
76831 + goto error;
76832 + }
76833 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
76834 + if (err)
76835 + goto error;
76836 +
76837 err = security_socket_bind(sock,
76838 (struct sockaddr *)&address,
76839 addrlen);
76840 @@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
76841 (struct sockaddr *)
76842 &address, addrlen);
76843 }
76844 +error:
76845 fput_light(sock->file, fput_needed);
76846 }
76847 return err;
76848 @@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
76849 if ((unsigned)backlog > somaxconn)
76850 backlog = somaxconn;
76851
76852 + if (gr_handle_sock_server_other(sock->sk)) {
76853 + err = -EPERM;
76854 + goto error;
76855 + }
76856 +
76857 + err = gr_search_listen(sock);
76858 + if (err)
76859 + goto error;
76860 +
76861 err = security_socket_listen(sock, backlog);
76862 if (!err)
76863 err = sock->ops->listen(sock, backlog);
76864
76865 +error:
76866 fput_light(sock->file, fput_needed);
76867 }
76868 return err;
76869 @@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
76870 newsock->type = sock->type;
76871 newsock->ops = sock->ops;
76872
76873 + if (gr_handle_sock_server_other(sock->sk)) {
76874 + err = -EPERM;
76875 + sock_release(newsock);
76876 + goto out_put;
76877 + }
76878 +
76879 + err = gr_search_accept(sock);
76880 + if (err) {
76881 + sock_release(newsock);
76882 + goto out_put;
76883 + }
76884 +
76885 /*
76886 * We don't need try_module_get here, as the listening socket (sock)
76887 * has the protocol module (sock->ops->owner) held.
76888 @@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
76889 fd_install(newfd, newfile);
76890 err = newfd;
76891
76892 + gr_attach_curr_ip(newsock->sk);
76893 +
76894 out_put:
76895 fput_light(sock->file, fput_needed);
76896 out:
76897 @@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
76898 int, addrlen)
76899 {
76900 struct socket *sock;
76901 + struct sockaddr *sck;
76902 struct sockaddr_storage address;
76903 int err, fput_needed;
76904
76905 @@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
76906 if (err < 0)
76907 goto out_put;
76908
76909 + sck = (struct sockaddr *)&address;
76910 +
76911 + if (gr_handle_sock_client(sck)) {
76912 + err = -EACCES;
76913 + goto out_put;
76914 + }
76915 +
76916 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
76917 + if (err)
76918 + goto out_put;
76919 +
76920 err =
76921 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
76922 if (err)
76923 @@ -1950,7 +2010,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
76924 * checking falls down on this.
76925 */
76926 if (copy_from_user(ctl_buf,
76927 - (void __user __force *)msg_sys->msg_control,
76928 + (void __force_user *)msg_sys->msg_control,
76929 ctl_len))
76930 goto out_freectl;
76931 msg_sys->msg_control = ctl_buf;
76932 @@ -2120,7 +2180,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
76933 * kernel msghdr to use the kernel address space)
76934 */
76935
76936 - uaddr = (__force void __user *)msg_sys->msg_name;
76937 + uaddr = (void __force_user *)msg_sys->msg_name;
76938 uaddr_len = COMPAT_NAMELEN(msg);
76939 if (MSG_CMSG_COMPAT & flags) {
76940 err = verify_compat_iovec(msg_sys, iov,
76941 @@ -2748,7 +2808,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
76942 }
76943
76944 ifr = compat_alloc_user_space(buf_size);
76945 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
76946 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
76947
76948 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
76949 return -EFAULT;
76950 @@ -2772,12 +2832,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
76951 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
76952
76953 if (copy_in_user(rxnfc, compat_rxnfc,
76954 - (void *)(&rxnfc->fs.m_ext + 1) -
76955 - (void *)rxnfc) ||
76956 + (void __user *)(&rxnfc->fs.m_ext + 1) -
76957 + (void __user *)rxnfc) ||
76958 copy_in_user(&rxnfc->fs.ring_cookie,
76959 &compat_rxnfc->fs.ring_cookie,
76960 - (void *)(&rxnfc->fs.location + 1) -
76961 - (void *)&rxnfc->fs.ring_cookie) ||
76962 + (void __user *)(&rxnfc->fs.location + 1) -
76963 + (void __user *)&rxnfc->fs.ring_cookie) ||
76964 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
76965 sizeof(rxnfc->rule_cnt)))
76966 return -EFAULT;
76967 @@ -2789,12 +2849,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
76968
76969 if (convert_out) {
76970 if (copy_in_user(compat_rxnfc, rxnfc,
76971 - (const void *)(&rxnfc->fs.m_ext + 1) -
76972 - (const void *)rxnfc) ||
76973 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
76974 + (const void __user *)rxnfc) ||
76975 copy_in_user(&compat_rxnfc->fs.ring_cookie,
76976 &rxnfc->fs.ring_cookie,
76977 - (const void *)(&rxnfc->fs.location + 1) -
76978 - (const void *)&rxnfc->fs.ring_cookie) ||
76979 + (const void __user *)(&rxnfc->fs.location + 1) -
76980 + (const void __user *)&rxnfc->fs.ring_cookie) ||
76981 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
76982 sizeof(rxnfc->rule_cnt)))
76983 return -EFAULT;
76984 @@ -2864,7 +2924,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
76985 old_fs = get_fs();
76986 set_fs(KERNEL_DS);
76987 err = dev_ioctl(net, cmd,
76988 - (struct ifreq __user __force *) &kifr);
76989 + (struct ifreq __force_user *) &kifr);
76990 set_fs(old_fs);
76991
76992 return err;
76993 @@ -2973,7 +3033,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
76994
76995 old_fs = get_fs();
76996 set_fs(KERNEL_DS);
76997 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
76998 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
76999 set_fs(old_fs);
77000
77001 if (cmd == SIOCGIFMAP && !err) {
77002 @@ -3078,7 +3138,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
77003 ret |= __get_user(rtdev, &(ur4->rt_dev));
77004 if (rtdev) {
77005 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
77006 - r4.rt_dev = (char __user __force *)devname;
77007 + r4.rt_dev = (char __force_user *)devname;
77008 devname[15] = 0;
77009 } else
77010 r4.rt_dev = NULL;
77011 @@ -3318,8 +3378,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
77012 int __user *uoptlen;
77013 int err;
77014
77015 - uoptval = (char __user __force *) optval;
77016 - uoptlen = (int __user __force *) optlen;
77017 + uoptval = (char __force_user *) optval;
77018 + uoptlen = (int __force_user *) optlen;
77019
77020 set_fs(KERNEL_DS);
77021 if (level == SOL_SOCKET)
77022 @@ -3339,7 +3399,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
77023 char __user *uoptval;
77024 int err;
77025
77026 - uoptval = (char __user __force *) optval;
77027 + uoptval = (char __force_user *) optval;
77028
77029 set_fs(KERNEL_DS);
77030 if (level == SOL_SOCKET)
77031 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
77032 index 00a1a2a..6a0138a 100644
77033 --- a/net/sunrpc/sched.c
77034 +++ b/net/sunrpc/sched.c
77035 @@ -238,9 +238,9 @@ static int rpc_wait_bit_killable(void *word)
77036 #ifdef RPC_DEBUG
77037 static void rpc_task_set_debuginfo(struct rpc_task *task)
77038 {
77039 - static atomic_t rpc_pid;
77040 + static atomic_unchecked_t rpc_pid;
77041
77042 - task->tk_pid = atomic_inc_return(&rpc_pid);
77043 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
77044 }
77045 #else
77046 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
77047 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
77048 index 71bed1c..5dff36d 100644
77049 --- a/net/sunrpc/svcsock.c
77050 +++ b/net/sunrpc/svcsock.c
77051 @@ -396,7 +396,7 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
77052 int buflen, unsigned int base)
77053 {
77054 size_t save_iovlen;
77055 - void __user *save_iovbase;
77056 + void *save_iovbase;
77057 unsigned int i;
77058 int ret;
77059
77060 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
77061 index 09af4fa..77110a9 100644
77062 --- a/net/sunrpc/xprtrdma/svc_rdma.c
77063 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
77064 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
77065 static unsigned int min_max_inline = 4096;
77066 static unsigned int max_max_inline = 65536;
77067
77068 -atomic_t rdma_stat_recv;
77069 -atomic_t rdma_stat_read;
77070 -atomic_t rdma_stat_write;
77071 -atomic_t rdma_stat_sq_starve;
77072 -atomic_t rdma_stat_rq_starve;
77073 -atomic_t rdma_stat_rq_poll;
77074 -atomic_t rdma_stat_rq_prod;
77075 -atomic_t rdma_stat_sq_poll;
77076 -atomic_t rdma_stat_sq_prod;
77077 +atomic_unchecked_t rdma_stat_recv;
77078 +atomic_unchecked_t rdma_stat_read;
77079 +atomic_unchecked_t rdma_stat_write;
77080 +atomic_unchecked_t rdma_stat_sq_starve;
77081 +atomic_unchecked_t rdma_stat_rq_starve;
77082 +atomic_unchecked_t rdma_stat_rq_poll;
77083 +atomic_unchecked_t rdma_stat_rq_prod;
77084 +atomic_unchecked_t rdma_stat_sq_poll;
77085 +atomic_unchecked_t rdma_stat_sq_prod;
77086
77087 /* Temporary NFS request map and context caches */
77088 struct kmem_cache *svc_rdma_map_cachep;
77089 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *table, int write,
77090 len -= *ppos;
77091 if (len > *lenp)
77092 len = *lenp;
77093 - if (len && copy_to_user(buffer, str_buf, len))
77094 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
77095 return -EFAULT;
77096 *lenp = len;
77097 *ppos += len;
77098 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] = {
77099 {
77100 .procname = "rdma_stat_read",
77101 .data = &rdma_stat_read,
77102 - .maxlen = sizeof(atomic_t),
77103 + .maxlen = sizeof(atomic_unchecked_t),
77104 .mode = 0644,
77105 .proc_handler = read_reset_stat,
77106 },
77107 {
77108 .procname = "rdma_stat_recv",
77109 .data = &rdma_stat_recv,
77110 - .maxlen = sizeof(atomic_t),
77111 + .maxlen = sizeof(atomic_unchecked_t),
77112 .mode = 0644,
77113 .proc_handler = read_reset_stat,
77114 },
77115 {
77116 .procname = "rdma_stat_write",
77117 .data = &rdma_stat_write,
77118 - .maxlen = sizeof(atomic_t),
77119 + .maxlen = sizeof(atomic_unchecked_t),
77120 .mode = 0644,
77121 .proc_handler = read_reset_stat,
77122 },
77123 {
77124 .procname = "rdma_stat_sq_starve",
77125 .data = &rdma_stat_sq_starve,
77126 - .maxlen = sizeof(atomic_t),
77127 + .maxlen = sizeof(atomic_unchecked_t),
77128 .mode = 0644,
77129 .proc_handler = read_reset_stat,
77130 },
77131 {
77132 .procname = "rdma_stat_rq_starve",
77133 .data = &rdma_stat_rq_starve,
77134 - .maxlen = sizeof(atomic_t),
77135 + .maxlen = sizeof(atomic_unchecked_t),
77136 .mode = 0644,
77137 .proc_handler = read_reset_stat,
77138 },
77139 {
77140 .procname = "rdma_stat_rq_poll",
77141 .data = &rdma_stat_rq_poll,
77142 - .maxlen = sizeof(atomic_t),
77143 + .maxlen = sizeof(atomic_unchecked_t),
77144 .mode = 0644,
77145 .proc_handler = read_reset_stat,
77146 },
77147 {
77148 .procname = "rdma_stat_rq_prod",
77149 .data = &rdma_stat_rq_prod,
77150 - .maxlen = sizeof(atomic_t),
77151 + .maxlen = sizeof(atomic_unchecked_t),
77152 .mode = 0644,
77153 .proc_handler = read_reset_stat,
77154 },
77155 {
77156 .procname = "rdma_stat_sq_poll",
77157 .data = &rdma_stat_sq_poll,
77158 - .maxlen = sizeof(atomic_t),
77159 + .maxlen = sizeof(atomic_unchecked_t),
77160 .mode = 0644,
77161 .proc_handler = read_reset_stat,
77162 },
77163 {
77164 .procname = "rdma_stat_sq_prod",
77165 .data = &rdma_stat_sq_prod,
77166 - .maxlen = sizeof(atomic_t),
77167 + .maxlen = sizeof(atomic_unchecked_t),
77168 .mode = 0644,
77169 .proc_handler = read_reset_stat,
77170 },
77171 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
77172 index df67211..c354b13 100644
77173 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
77174 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
77175 @@ -499,7 +499,7 @@ next_sge:
77176 svc_rdma_put_context(ctxt, 0);
77177 goto out;
77178 }
77179 - atomic_inc(&rdma_stat_read);
77180 + atomic_inc_unchecked(&rdma_stat_read);
77181
77182 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
77183 chl_map->ch[ch_no].count -= read_wr.num_sge;
77184 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
77185 dto_q);
77186 list_del_init(&ctxt->dto_q);
77187 } else {
77188 - atomic_inc(&rdma_stat_rq_starve);
77189 + atomic_inc_unchecked(&rdma_stat_rq_starve);
77190 clear_bit(XPT_DATA, &xprt->xpt_flags);
77191 ctxt = NULL;
77192 }
77193 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
77194 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
77195 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
77196 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
77197 - atomic_inc(&rdma_stat_recv);
77198 + atomic_inc_unchecked(&rdma_stat_recv);
77199
77200 /* Build up the XDR from the receive buffers. */
77201 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
77202 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
77203 index 249a835..fb2794b 100644
77204 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
77205 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
77206 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
77207 write_wr.wr.rdma.remote_addr = to;
77208
77209 /* Post It */
77210 - atomic_inc(&rdma_stat_write);
77211 + atomic_inc_unchecked(&rdma_stat_write);
77212 if (svc_rdma_send(xprt, &write_wr))
77213 goto err;
77214 return 0;
77215 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
77216 index ba1296d..0fec1a5 100644
77217 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
77218 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
77219 @@ -300,7 +300,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
77220 return;
77221
77222 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
77223 - atomic_inc(&rdma_stat_rq_poll);
77224 + atomic_inc_unchecked(&rdma_stat_rq_poll);
77225
77226 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
77227 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
77228 @@ -322,7 +322,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
77229 }
77230
77231 if (ctxt)
77232 - atomic_inc(&rdma_stat_rq_prod);
77233 + atomic_inc_unchecked(&rdma_stat_rq_prod);
77234
77235 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
77236 /*
77237 @@ -394,7 +394,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
77238 return;
77239
77240 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
77241 - atomic_inc(&rdma_stat_sq_poll);
77242 + atomic_inc_unchecked(&rdma_stat_sq_poll);
77243 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
77244 if (wc.status != IB_WC_SUCCESS)
77245 /* Close the transport */
77246 @@ -412,7 +412,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
77247 }
77248
77249 if (ctxt)
77250 - atomic_inc(&rdma_stat_sq_prod);
77251 + atomic_inc_unchecked(&rdma_stat_sq_prod);
77252 }
77253
77254 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
77255 @@ -1274,7 +1274,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
77256 spin_lock_bh(&xprt->sc_lock);
77257 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
77258 spin_unlock_bh(&xprt->sc_lock);
77259 - atomic_inc(&rdma_stat_sq_starve);
77260 + atomic_inc_unchecked(&rdma_stat_sq_starve);
77261
77262 /* See if we can opportunistically reap SQ WR to make room */
77263 sq_cq_reap(xprt);
77264 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
77265 index e758139..d29ea47 100644
77266 --- a/net/sysctl_net.c
77267 +++ b/net/sysctl_net.c
77268 @@ -47,7 +47,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
77269 struct ctl_table *table)
77270 {
77271 /* Allow network administrator to have same access as root. */
77272 - if (capable(CAP_NET_ADMIN)) {
77273 + if (capable_nolog(CAP_NET_ADMIN)) {
77274 int mode = (table->mode >> 6) & 7;
77275 return (mode << 6) | (mode << 3) | mode;
77276 }
77277 diff --git a/net/tipc/link.c b/net/tipc/link.c
77278 index ae98a72..7bb6056 100644
77279 --- a/net/tipc/link.c
77280 +++ b/net/tipc/link.c
77281 @@ -1203,7 +1203,7 @@ static int link_send_sections_long(struct tipc_port *sender,
77282 struct tipc_msg fragm_hdr;
77283 struct sk_buff *buf, *buf_chain, *prev;
77284 u32 fragm_crs, fragm_rest, hsz, sect_rest;
77285 - const unchar *sect_crs;
77286 + const unchar __user *sect_crs;
77287 int curr_sect;
77288 u32 fragm_no;
77289
77290 @@ -1247,7 +1247,7 @@ again:
77291
77292 if (!sect_rest) {
77293 sect_rest = msg_sect[++curr_sect].iov_len;
77294 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
77295 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
77296 }
77297
77298 if (sect_rest < fragm_rest)
77299 @@ -1266,7 +1266,7 @@ error:
77300 }
77301 } else
77302 skb_copy_to_linear_data_offset(buf, fragm_crs,
77303 - sect_crs, sz);
77304 + (const void __force_kernel *)sect_crs, sz);
77305 sect_crs += sz;
77306 sect_rest -= sz;
77307 fragm_crs += sz;
77308 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
77309 index 83d5096..dcba497 100644
77310 --- a/net/tipc/msg.c
77311 +++ b/net/tipc/msg.c
77312 @@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
77313 msg_sect[cnt].iov_len);
77314 else
77315 skb_copy_to_linear_data_offset(*buf, pos,
77316 - msg_sect[cnt].iov_base,
77317 + (const void __force_kernel *)msg_sect[cnt].iov_base,
77318 msg_sect[cnt].iov_len);
77319 pos += msg_sect[cnt].iov_len;
77320 }
77321 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
77322 index 1983717..4d6102c 100644
77323 --- a/net/tipc/subscr.c
77324 +++ b/net/tipc/subscr.c
77325 @@ -101,7 +101,7 @@ static void subscr_send_event(struct subscription *sub,
77326 {
77327 struct iovec msg_sect;
77328
77329 - msg_sect.iov_base = (void *)&sub->evt;
77330 + msg_sect.iov_base = (void __force_user *)&sub->evt;
77331 msg_sect.iov_len = sizeof(struct tipc_event);
77332
77333 sub->evt.event = htohl(event, sub->swap);
77334 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
77335 index d99678a..3514a21 100644
77336 --- a/net/unix/af_unix.c
77337 +++ b/net/unix/af_unix.c
77338 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(struct net *net,
77339 err = -ECONNREFUSED;
77340 if (!S_ISSOCK(inode->i_mode))
77341 goto put_fail;
77342 +
77343 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
77344 + err = -EACCES;
77345 + goto put_fail;
77346 + }
77347 +
77348 u = unix_find_socket_byinode(inode);
77349 if (!u)
77350 goto put_fail;
77351 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(struct net *net,
77352 if (u) {
77353 struct dentry *dentry;
77354 dentry = unix_sk(u)->dentry;
77355 +
77356 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
77357 + err = -EPERM;
77358 + sock_put(u);
77359 + goto fail;
77360 + }
77361 +
77362 if (dentry)
77363 touch_atime(unix_sk(u)->mnt, dentry);
77364 } else
77365 @@ -869,11 +882,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
77366 err = security_path_mknod(&path, dentry, mode, 0);
77367 if (err)
77368 goto out_mknod_drop_write;
77369 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
77370 + err = -EACCES;
77371 + goto out_mknod_drop_write;
77372 + }
77373 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
77374 out_mknod_drop_write:
77375 mnt_drop_write(path.mnt);
77376 if (err)
77377 goto out_mknod_dput;
77378 +
77379 + gr_handle_create(dentry, path.mnt);
77380 +
77381 mutex_unlock(&path.dentry->d_inode->i_mutex);
77382 dput(path.dentry);
77383 path.dentry = dentry;
77384 diff --git a/net/wireless/core.h b/net/wireless/core.h
77385 index b9ec306..b4a563e 100644
77386 --- a/net/wireless/core.h
77387 +++ b/net/wireless/core.h
77388 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
77389 struct mutex mtx;
77390
77391 /* rfkill support */
77392 - struct rfkill_ops rfkill_ops;
77393 + rfkill_ops_no_const rfkill_ops;
77394 struct rfkill *rfkill;
77395 struct work_struct rfkill_sync;
77396
77397 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
77398 index 0af7f54..c916d2f 100644
77399 --- a/net/wireless/wext-core.c
77400 +++ b/net/wireless/wext-core.c
77401 @@ -747,8 +747,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
77402 */
77403
77404 /* Support for very large requests */
77405 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
77406 - (user_length > descr->max_tokens)) {
77407 + if (user_length > descr->max_tokens) {
77408 /* Allow userspace to GET more than max so
77409 * we can support any size GET requests.
77410 * There is still a limit : -ENOMEM.
77411 @@ -785,22 +784,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
77412 }
77413 }
77414
77415 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
77416 - /*
77417 - * If this is a GET, but not NOMAX, it means that the extra
77418 - * data is not bounded by userspace, but by max_tokens. Thus
77419 - * set the length to max_tokens. This matches the extra data
77420 - * allocation.
77421 - * The driver should fill it with the number of tokens it
77422 - * provided, and it may check iwp->length rather than having
77423 - * knowledge of max_tokens. If the driver doesn't change the
77424 - * iwp->length, this ioctl just copies back max_token tokens
77425 - * filled with zeroes. Hopefully the driver isn't claiming
77426 - * them to be valid data.
77427 - */
77428 - iwp->length = descr->max_tokens;
77429 - }
77430 -
77431 err = handler(dev, info, (union iwreq_data *) iwp, extra);
77432
77433 iwp->length += essid_compat;
77434 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
77435 index 9049a5c..cfa6f5c 100644
77436 --- a/net/xfrm/xfrm_policy.c
77437 +++ b/net/xfrm/xfrm_policy.c
77438 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
77439 {
77440 policy->walk.dead = 1;
77441
77442 - atomic_inc(&policy->genid);
77443 + atomic_inc_unchecked(&policy->genid);
77444
77445 if (del_timer(&policy->timer))
77446 xfrm_pol_put(policy);
77447 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
77448 hlist_add_head(&policy->bydst, chain);
77449 xfrm_pol_hold(policy);
77450 net->xfrm.policy_count[dir]++;
77451 - atomic_inc(&flow_cache_genid);
77452 + atomic_inc_unchecked(&flow_cache_genid);
77453 if (delpol)
77454 __xfrm_policy_unlink(delpol, dir);
77455 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
77456 @@ -1530,7 +1530,7 @@ free_dst:
77457 goto out;
77458 }
77459
77460 -static int inline
77461 +static inline int
77462 xfrm_dst_alloc_copy(void **target, const void *src, int size)
77463 {
77464 if (!*target) {
77465 @@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
77466 return 0;
77467 }
77468
77469 -static int inline
77470 +static inline int
77471 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
77472 {
77473 #ifdef CONFIG_XFRM_SUB_POLICY
77474 @@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
77475 #endif
77476 }
77477
77478 -static int inline
77479 +static inline int
77480 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
77481 {
77482 #ifdef CONFIG_XFRM_SUB_POLICY
77483 @@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
77484
77485 xdst->num_pols = num_pols;
77486 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
77487 - xdst->policy_genid = atomic_read(&pols[0]->genid);
77488 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
77489
77490 return xdst;
77491 }
77492 @@ -2345,7 +2345,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
77493 if (xdst->xfrm_genid != dst->xfrm->genid)
77494 return 0;
77495 if (xdst->num_pols > 0 &&
77496 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
77497 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
77498 return 0;
77499
77500 mtu = dst_mtu(dst->child);
77501 @@ -2882,7 +2882,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
77502 sizeof(pol->xfrm_vec[i].saddr));
77503 pol->xfrm_vec[i].encap_family = mp->new_family;
77504 /* flush bundles */
77505 - atomic_inc(&pol->genid);
77506 + atomic_inc_unchecked(&pol->genid);
77507 }
77508 }
77509
77510 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
77511 index d2b366c..51ff91ebc 100644
77512 --- a/scripts/Makefile.build
77513 +++ b/scripts/Makefile.build
77514 @@ -109,7 +109,7 @@ endif
77515 endif
77516
77517 # Do not include host rules unless needed
77518 -ifneq ($(hostprogs-y)$(hostprogs-m),)
77519 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
77520 include scripts/Makefile.host
77521 endif
77522
77523 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
77524 index 686cb0d..9d653bf 100644
77525 --- a/scripts/Makefile.clean
77526 +++ b/scripts/Makefile.clean
77527 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
77528 __clean-files := $(extra-y) $(always) \
77529 $(targets) $(clean-files) \
77530 $(host-progs) \
77531 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
77532 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
77533 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
77534
77535 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
77536
77537 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
77538 index 1ac414f..a1c1451 100644
77539 --- a/scripts/Makefile.host
77540 +++ b/scripts/Makefile.host
77541 @@ -31,6 +31,7 @@
77542 # Note: Shared libraries consisting of C++ files are not supported
77543
77544 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
77545 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
77546
77547 # C code
77548 # Executables compiled from a single .c file
77549 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
77550 # Shared libaries (only .c supported)
77551 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
77552 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
77553 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
77554 # Remove .so files from "xxx-objs"
77555 host-cobjs := $(filter-out %.so,$(host-cobjs))
77556
77557 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
77558 index cb1f50c..cef2a7c 100644
77559 --- a/scripts/basic/fixdep.c
77560 +++ b/scripts/basic/fixdep.c
77561 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
77562 /*
77563 * Lookup a value in the configuration string.
77564 */
77565 -static int is_defined_config(const char *name, int len, unsigned int hash)
77566 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
77567 {
77568 struct item *aux;
77569
77570 @@ -211,10 +211,10 @@ static void clear_config(void)
77571 /*
77572 * Record the use of a CONFIG_* word.
77573 */
77574 -static void use_config(const char *m, int slen)
77575 +static void use_config(const char *m, unsigned int slen)
77576 {
77577 unsigned int hash = strhash(m, slen);
77578 - int c, i;
77579 + unsigned int c, i;
77580
77581 if (is_defined_config(m, slen, hash))
77582 return;
77583 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
77584
77585 static void parse_config_file(const char *map, size_t len)
77586 {
77587 - const int *end = (const int *) (map + len);
77588 + const unsigned int *end = (const unsigned int *) (map + len);
77589 /* start at +1, so that p can never be < map */
77590 - const int *m = (const int *) map + 1;
77591 + const unsigned int *m = (const unsigned int *) map + 1;
77592 const char *p, *q;
77593
77594 for (; m < end; m++) {
77595 @@ -406,7 +406,7 @@ static void print_deps(void)
77596 static void traps(void)
77597 {
77598 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
77599 - int *p = (int *)test;
77600 + unsigned int *p = (unsigned int *)test;
77601
77602 if (*p != INT_CONF) {
77603 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
77604 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
77605 new file mode 100644
77606 index 0000000..8729101
77607 --- /dev/null
77608 +++ b/scripts/gcc-plugin.sh
77609 @@ -0,0 +1,2 @@
77610 +#!/bin/sh
77611 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
77612 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
77613 index f936d1f..a66d95f 100644
77614 --- a/scripts/mod/file2alias.c
77615 +++ b/scripts/mod/file2alias.c
77616 @@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id,
77617 unsigned long size, unsigned long id_size,
77618 void *symval)
77619 {
77620 - int i;
77621 + unsigned int i;
77622
77623 if (size % id_size || size < id_size) {
77624 if (cross_build != 0)
77625 @@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id,
77626 /* USB is special because the bcdDevice can be matched against a numeric range */
77627 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
77628 static void do_usb_entry(struct usb_device_id *id,
77629 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
77630 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
77631 unsigned char range_lo, unsigned char range_hi,
77632 unsigned char max, struct module *mod)
77633 {
77634 @@ -203,7 +203,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
77635 {
77636 unsigned int devlo, devhi;
77637 unsigned char chi, clo, max;
77638 - int ndigits;
77639 + unsigned int ndigits;
77640
77641 id->match_flags = TO_NATIVE(id->match_flags);
77642 id->idVendor = TO_NATIVE(id->idVendor);
77643 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
77644 for (i = 0; i < count; i++) {
77645 const char *id = (char *)devs[i].id;
77646 char acpi_id[sizeof(devs[0].id)];
77647 - int j;
77648 + unsigned int j;
77649
77650 buf_printf(&mod->dev_table_buf,
77651 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
77652 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
77653
77654 for (j = 0; j < PNP_MAX_DEVICES; j++) {
77655 const char *id = (char *)card->devs[j].id;
77656 - int i2, j2;
77657 + unsigned int i2, j2;
77658 int dup = 0;
77659
77660 if (!id[0])
77661 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
77662 /* add an individual alias for every device entry */
77663 if (!dup) {
77664 char acpi_id[sizeof(card->devs[0].id)];
77665 - int k;
77666 + unsigned int k;
77667
77668 buf_printf(&mod->dev_table_buf,
77669 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
77670 @@ -807,7 +807,7 @@ static void dmi_ascii_filter(char *d, const char *s)
77671 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
77672 char *alias)
77673 {
77674 - int i, j;
77675 + unsigned int i, j;
77676
77677 sprintf(alias, "dmi*");
77678
77679 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
77680 index 2bd594e..d43245e 100644
77681 --- a/scripts/mod/modpost.c
77682 +++ b/scripts/mod/modpost.c
77683 @@ -919,6 +919,7 @@ enum mismatch {
77684 ANY_INIT_TO_ANY_EXIT,
77685 ANY_EXIT_TO_ANY_INIT,
77686 EXPORT_TO_INIT_EXIT,
77687 + DATA_TO_TEXT
77688 };
77689
77690 struct sectioncheck {
77691 @@ -1027,6 +1028,12 @@ const struct sectioncheck sectioncheck[] = {
77692 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
77693 .mismatch = EXPORT_TO_INIT_EXIT,
77694 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
77695 +},
77696 +/* Do not reference code from writable data */
77697 +{
77698 + .fromsec = { DATA_SECTIONS, NULL },
77699 + .tosec = { TEXT_SECTIONS, NULL },
77700 + .mismatch = DATA_TO_TEXT
77701 }
77702 };
77703
77704 @@ -1149,10 +1156,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
77705 continue;
77706 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
77707 continue;
77708 - if (sym->st_value == addr)
77709 - return sym;
77710 /* Find a symbol nearby - addr are maybe negative */
77711 d = sym->st_value - addr;
77712 + if (d == 0)
77713 + return sym;
77714 if (d < 0)
77715 d = addr - sym->st_value;
77716 if (d < distance) {
77717 @@ -1431,6 +1438,14 @@ static void report_sec_mismatch(const char *modname,
77718 tosym, prl_to, prl_to, tosym);
77719 free(prl_to);
77720 break;
77721 + case DATA_TO_TEXT:
77722 +/*
77723 + fprintf(stderr,
77724 + "The variable %s references\n"
77725 + "the %s %s%s%s\n",
77726 + fromsym, to, sec2annotation(tosec), tosym, to_p);
77727 +*/
77728 + break;
77729 }
77730 fprintf(stderr, "\n");
77731 }
77732 @@ -1656,7 +1671,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
77733 static void check_sec_ref(struct module *mod, const char *modname,
77734 struct elf_info *elf)
77735 {
77736 - int i;
77737 + unsigned int i;
77738 Elf_Shdr *sechdrs = elf->sechdrs;
77739
77740 /* Walk through all sections */
77741 @@ -1754,7 +1769,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
77742 va_end(ap);
77743 }
77744
77745 -void buf_write(struct buffer *buf, const char *s, int len)
77746 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
77747 {
77748 if (buf->size - buf->pos < len) {
77749 buf->size += len + SZ;
77750 @@ -1972,7 +1987,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
77751 if (fstat(fileno(file), &st) < 0)
77752 goto close_write;
77753
77754 - if (st.st_size != b->pos)
77755 + if (st.st_size != (off_t)b->pos)
77756 goto close_write;
77757
77758 tmp = NOFAIL(malloc(b->pos));
77759 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
77760 index 2031119..b5433af 100644
77761 --- a/scripts/mod/modpost.h
77762 +++ b/scripts/mod/modpost.h
77763 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
77764
77765 struct buffer {
77766 char *p;
77767 - int pos;
77768 - int size;
77769 + unsigned int pos;
77770 + unsigned int size;
77771 };
77772
77773 void __attribute__((format(printf, 2, 3)))
77774 buf_printf(struct buffer *buf, const char *fmt, ...);
77775
77776 void
77777 -buf_write(struct buffer *buf, const char *s, int len);
77778 +buf_write(struct buffer *buf, const char *s, unsigned int len);
77779
77780 struct module {
77781 struct module *next;
77782 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
77783 index 9dfcd6d..099068e 100644
77784 --- a/scripts/mod/sumversion.c
77785 +++ b/scripts/mod/sumversion.c
77786 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
77787 goto out;
77788 }
77789
77790 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
77791 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
77792 warn("writing sum in %s failed: %s\n",
77793 filename, strerror(errno));
77794 goto out;
77795 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
77796 index 5c11312..72742b5 100644
77797 --- a/scripts/pnmtologo.c
77798 +++ b/scripts/pnmtologo.c
77799 @@ -237,14 +237,14 @@ static void write_header(void)
77800 fprintf(out, " * Linux logo %s\n", logoname);
77801 fputs(" */\n\n", out);
77802 fputs("#include <linux/linux_logo.h>\n\n", out);
77803 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
77804 + fprintf(out, "static unsigned char %s_data[] = {\n",
77805 logoname);
77806 }
77807
77808 static void write_footer(void)
77809 {
77810 fputs("\n};\n\n", out);
77811 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
77812 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
77813 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
77814 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
77815 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
77816 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
77817 fputs("\n};\n\n", out);
77818
77819 /* write logo clut */
77820 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
77821 + fprintf(out, "static unsigned char %s_clut[] = {\n",
77822 logoname);
77823 write_hex_cnt = 0;
77824 for (i = 0; i < logo_clutsize; i++) {
77825 diff --git a/security/Kconfig b/security/Kconfig
77826 index 51bd5a0..3a4ebd0 100644
77827 --- a/security/Kconfig
77828 +++ b/security/Kconfig
77829 @@ -4,6 +4,627 @@
77830
77831 menu "Security options"
77832
77833 +source grsecurity/Kconfig
77834 +
77835 +menu "PaX"
77836 +
77837 + config ARCH_TRACK_EXEC_LIMIT
77838 + bool
77839 +
77840 + config PAX_KERNEXEC_PLUGIN
77841 + bool
77842 +
77843 + config PAX_PER_CPU_PGD
77844 + bool
77845 +
77846 + config TASK_SIZE_MAX_SHIFT
77847 + int
77848 + depends on X86_64
77849 + default 47 if !PAX_PER_CPU_PGD
77850 + default 42 if PAX_PER_CPU_PGD
77851 +
77852 + config PAX_ENABLE_PAE
77853 + bool
77854 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
77855 +
77856 +config PAX
77857 + bool "Enable various PaX features"
77858 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
77859 + help
77860 + This allows you to enable various PaX features. PaX adds
77861 + intrusion prevention mechanisms to the kernel that reduce
77862 + the risks posed by exploitable memory corruption bugs.
77863 +
77864 +menu "PaX Control"
77865 + depends on PAX
77866 +
77867 +config PAX_SOFTMODE
77868 + bool 'Support soft mode'
77869 + help
77870 + Enabling this option will allow you to run PaX in soft mode, that
77871 + is, PaX features will not be enforced by default, only on executables
77872 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
77873 + support as they are the only way to mark executables for soft mode use.
77874 +
77875 + Soft mode can be activated by using the "pax_softmode=1" kernel command
77876 + line option on boot. Furthermore you can control various PaX features
77877 + at runtime via the entries in /proc/sys/kernel/pax.
77878 +
77879 +config PAX_EI_PAX
77880 + bool 'Use legacy ELF header marking'
77881 + help
77882 + Enabling this option will allow you to control PaX features on
77883 + a per executable basis via the 'chpax' utility available at
77884 + http://pax.grsecurity.net/. The control flags will be read from
77885 + an otherwise reserved part of the ELF header. This marking has
77886 + numerous drawbacks (no support for soft-mode, toolchain does not
77887 + know about the non-standard use of the ELF header) therefore it
77888 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
77889 + support.
77890 +
77891 + If you have applications not marked by the PT_PAX_FLAGS ELF program
77892 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
77893 + option otherwise they will not get any protection.
77894 +
77895 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
77896 + support as well, they will override the legacy EI_PAX marks.
77897 +
77898 +config PAX_PT_PAX_FLAGS
77899 + bool 'Use ELF program header marking'
77900 + help
77901 + Enabling this option will allow you to control PaX features on
77902 + a per executable basis via the 'paxctl' utility available at
77903 + http://pax.grsecurity.net/. The control flags will be read from
77904 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
77905 + has the benefits of supporting both soft mode and being fully
77906 + integrated into the toolchain (the binutils patch is available
77907 + from http://pax.grsecurity.net).
77908 +
77909 + If you have applications not marked by the PT_PAX_FLAGS ELF program
77910 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
77911 + support otherwise they will not get any protection.
77912 +
77913 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
77914 + must make sure that the marks are the same if a binary has both marks.
77915 +
77916 + Note that if you enable the legacy EI_PAX marking support as well,
77917 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
77918 +
77919 +config PAX_XATTR_PAX_FLAGS
77920 + bool 'Use filesystem extended attributes marking'
77921 + depends on EXPERT
77922 + select CIFS_XATTR if CIFS
77923 + select EXT2_FS_XATTR if EXT2_FS
77924 + select EXT3_FS_XATTR if EXT3_FS
77925 + select EXT4_FS_XATTR if EXT4_FS
77926 + select JFFS2_FS_XATTR if JFFS2_FS
77927 + select REISERFS_FS_XATTR if REISERFS_FS
77928 + select SQUASHFS_XATTR if SQUASHFS
77929 + select TMPFS_XATTR if TMPFS
77930 + select UBIFS_FS_XATTR if UBIFS_FS
77931 + help
77932 + Enabling this option will allow you to control PaX features on
77933 + a per executable basis via the 'setfattr' utility. The control
77934 + flags will be read from the user.pax.flags extended attribute of
77935 + the file. This marking has the benefit of supporting binary-only
77936 + applications that self-check themselves (e.g., skype) and would
77937 + not tolerate chpax/paxctl changes. The main drawback is that
77938 + extended attributes are not supported by some filesystems (e.g.,
77939 + isofs, udf, vfat) so copying files through such filesystems will
77940 + lose the extended attributes and these PaX markings.
77941 +
77942 + If you have applications not marked by the PT_PAX_FLAGS ELF program
77943 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
77944 + support otherwise they will not get any protection.
77945 +
77946 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
77947 + must make sure that the marks are the same if a binary has both marks.
77948 +
77949 + Note that if you enable the legacy EI_PAX marking support as well,
77950 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
77951 +
77952 +choice
77953 + prompt 'MAC system integration'
77954 + default PAX_HAVE_ACL_FLAGS
77955 + help
77956 + Mandatory Access Control systems have the option of controlling
77957 + PaX flags on a per executable basis, choose the method supported
77958 + by your particular system.
77959 +
77960 + - "none": if your MAC system does not interact with PaX,
77961 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
77962 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
77963 +
77964 + NOTE: this option is for developers/integrators only.
77965 +
77966 + config PAX_NO_ACL_FLAGS
77967 + bool 'none'
77968 +
77969 + config PAX_HAVE_ACL_FLAGS
77970 + bool 'direct'
77971 +
77972 + config PAX_HOOK_ACL_FLAGS
77973 + bool 'hook'
77974 +endchoice
77975 +
77976 +endmenu
77977 +
77978 +menu "Non-executable pages"
77979 + depends on PAX
77980 +
77981 +config PAX_NOEXEC
77982 + bool "Enforce non-executable pages"
77983 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
77984 + help
77985 + By design some architectures do not allow for protecting memory
77986 + pages against execution or even if they do, Linux does not make
77987 + use of this feature. In practice this means that if a page is
77988 + readable (such as the stack or heap) it is also executable.
77989 +
77990 + There is a well known exploit technique that makes use of this
77991 + fact and a common programming mistake where an attacker can
77992 + introduce code of his choice somewhere in the attacked program's
77993 + memory (typically the stack or the heap) and then execute it.
77994 +
77995 + If the attacked program was running with different (typically
77996 + higher) privileges than that of the attacker, then he can elevate
77997 + his own privilege level (e.g. get a root shell, write to files for
77998 + which he does not have write access to, etc).
77999 +
78000 + Enabling this option will let you choose from various features
78001 + that prevent the injection and execution of 'foreign' code in
78002 + a program.
78003 +
78004 + This will also break programs that rely on the old behaviour and
78005 + expect that dynamically allocated memory via the malloc() family
78006 + of functions is executable (which it is not). Notable examples
78007 + are the XFree86 4.x server, the java runtime and wine.
78008 +
78009 +config PAX_PAGEEXEC
78010 + bool "Paging based non-executable pages"
78011 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
78012 + select S390_SWITCH_AMODE if S390
78013 + select S390_EXEC_PROTECT if S390
78014 + select ARCH_TRACK_EXEC_LIMIT if X86_32
78015 + help
78016 + This implementation is based on the paging feature of the CPU.
78017 + On i386 without hardware non-executable bit support there is a
78018 + variable but usually low performance impact, however on Intel's
78019 + P4 core based CPUs it is very high so you should not enable this
78020 + for kernels meant to be used on such CPUs.
78021 +
78022 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
78023 + with hardware non-executable bit support there is no performance
78024 + impact, on ppc the impact is negligible.
78025 +
78026 + Note that several architectures require various emulations due to
78027 + badly designed userland ABIs, this will cause a performance impact
78028 + but will disappear as soon as userland is fixed. For example, ppc
78029 + userland MUST have been built with secure-plt by a recent toolchain.
78030 +
78031 +config PAX_SEGMEXEC
78032 + bool "Segmentation based non-executable pages"
78033 + depends on PAX_NOEXEC && X86_32
78034 + help
78035 + This implementation is based on the segmentation feature of the
78036 + CPU and has a very small performance impact, however applications
78037 + will be limited to a 1.5 GB address space instead of the normal
78038 + 3 GB.
78039 +
78040 +config PAX_EMUTRAMP
78041 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
78042 + default y if PARISC
78043 + help
78044 + There are some programs and libraries that for one reason or
78045 + another attempt to execute special small code snippets from
78046 + non-executable memory pages. Most notable examples are the
78047 + signal handler return code generated by the kernel itself and
78048 + the GCC trampolines.
78049 +
78050 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
78051 + such programs will no longer work under your kernel.
78052 +
78053 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
78054 + utilities to enable trampoline emulation for the affected programs
78055 + yet still have the protection provided by the non-executable pages.
78056 +
78057 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
78058 + your system will not even boot.
78059 +
78060 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
78061 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
78062 + for the affected files.
78063 +
78064 + NOTE: enabling this feature *may* open up a loophole in the
78065 + protection provided by non-executable pages that an attacker
78066 + could abuse. Therefore the best solution is to not have any
78067 + files on your system that would require this option. This can
78068 + be achieved by not using libc5 (which relies on the kernel
78069 + signal handler return code) and not using or rewriting programs
78070 + that make use of the nested function implementation of GCC.
78071 + Skilled users can just fix GCC itself so that it implements
78072 + nested function calls in a way that does not interfere with PaX.
78073 +
78074 +config PAX_EMUSIGRT
78075 + bool "Automatically emulate sigreturn trampolines"
78076 + depends on PAX_EMUTRAMP && PARISC
78077 + default y
78078 + help
78079 + Enabling this option will have the kernel automatically detect
78080 + and emulate signal return trampolines executing on the stack
78081 + that would otherwise lead to task termination.
78082 +
78083 + This solution is intended as a temporary one for users with
78084 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
78085 + Modula-3 runtime, etc) or executables linked to such, basically
78086 + everything that does not specify its own SA_RESTORER function in
78087 + normal executable memory like glibc 2.1+ does.
78088 +
78089 + On parisc you MUST enable this option, otherwise your system will
78090 + not even boot.
78091 +
78092 + NOTE: this feature cannot be disabled on a per executable basis
78093 + and since it *does* open up a loophole in the protection provided
78094 + by non-executable pages, the best solution is to not have any
78095 + files on your system that would require this option.
78096 +
78097 +config PAX_MPROTECT
78098 + bool "Restrict mprotect()"
78099 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
78100 + help
78101 + Enabling this option will prevent programs from
78102 + - changing the executable status of memory pages that were
78103 + not originally created as executable,
78104 + - making read-only executable pages writable again,
78105 + - creating executable pages from anonymous memory,
78106 + - making read-only-after-relocations (RELRO) data pages writable again.
78107 +
78108 + You should say Y here to complete the protection provided by
78109 + the enforcement of non-executable pages.
78110 +
78111 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
78112 + this feature on a per file basis.
78113 +
78114 +config PAX_MPROTECT_COMPAT
78115 + bool "Use legacy/compat protection demoting (read help)"
78116 + depends on PAX_MPROTECT
78117 + default n
78118 + help
78119 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
78120 + by sending the proper error code to the application. For some broken
78121 + userland, this can cause problems with Python or other applications. The
78122 + current implementation however allows for applications like clamav to
78123 + detect if JIT compilation/execution is allowed and to fall back gracefully
78124 + to an interpreter-based mode if it does not. While we encourage everyone
78125 + to use the current implementation as-is and push upstream to fix broken
78126 + userland (note that the RWX logging option can assist with this), in some
78127 + environments this may not be possible. Having to disable MPROTECT
78128 + completely on certain binaries reduces the security benefit of PaX,
78129 + so this option is provided for those environments to revert to the old
78130 + behavior.
78131 +
78132 +config PAX_ELFRELOCS
78133 + bool "Allow ELF text relocations (read help)"
78134 + depends on PAX_MPROTECT
78135 + default n
78136 + help
78137 + Non-executable pages and mprotect() restrictions are effective
78138 + in preventing the introduction of new executable code into an
78139 + attacked task's address space. There remain only two venues
78140 + for this kind of attack: if the attacker can execute already
78141 + existing code in the attacked task then he can either have it
78142 + create and mmap() a file containing his code or have it mmap()
78143 + an already existing ELF library that does not have position
78144 + independent code in it and use mprotect() on it to make it
78145 + writable and copy his code there. While protecting against
78146 + the former approach is beyond PaX, the latter can be prevented
78147 + by having only PIC ELF libraries on one's system (which do not
78148 + need to relocate their code). If you are sure this is your case,
78149 + as is the case with all modern Linux distributions, then leave
78150 + this option disabled. You should say 'n' here.
78151 +
78152 +config PAX_ETEXECRELOCS
78153 + bool "Allow ELF ET_EXEC text relocations"
78154 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
78155 + select PAX_ELFRELOCS
78156 + default y
78157 + help
78158 + On some architectures there are incorrectly created applications
78159 + that require text relocations and would not work without enabling
78160 + this option. If you are an alpha, ia64 or parisc user, you should
78161 + enable this option and disable it once you have made sure that
78162 + none of your applications need it.
78163 +
78164 +config PAX_EMUPLT
78165 + bool "Automatically emulate ELF PLT"
78166 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
78167 + default y
78168 + help
78169 + Enabling this option will have the kernel automatically detect
78170 + and emulate the Procedure Linkage Table entries in ELF files.
78171 + On some architectures such entries are in writable memory, and
78172 + become non-executable leading to task termination. Therefore
78173 + it is mandatory that you enable this option on alpha, parisc,
78174 + sparc and sparc64, otherwise your system would not even boot.
78175 +
78176 + NOTE: this feature *does* open up a loophole in the protection
78177 + provided by the non-executable pages, therefore the proper
78178 + solution is to modify the toolchain to produce a PLT that does
78179 + not need to be writable.
78180 +
78181 +config PAX_DLRESOLVE
78182 + bool 'Emulate old glibc resolver stub'
78183 + depends on PAX_EMUPLT && SPARC
78184 + default n
78185 + help
78186 + This option is needed if userland has an old glibc (before 2.4)
78187 + that puts a 'save' instruction into the runtime generated resolver
78188 + stub that needs special emulation.
78189 +
78190 +config PAX_KERNEXEC
78191 + bool "Enforce non-executable kernel pages"
78192 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
78193 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
78194 + select PAX_KERNEXEC_PLUGIN if X86_64
78195 + help
78196 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
78197 + that is, enabling this option will make it harder to inject
78198 + and execute 'foreign' code in kernel memory itself.
78199 +
78200 + Note that on x86_64 kernels there is a known regression when
78201 + this feature and KVM/VMX are both enabled in the host kernel.
78202 +
78203 +choice
78204 + prompt "Return Address Instrumentation Method"
78205 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
78206 + depends on PAX_KERNEXEC_PLUGIN
78207 + help
78208 + Select the method used to instrument function pointer dereferences.
78209 + Note that binary modules cannot be instrumented by this approach.
78210 +
78211 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
78212 + bool "bts"
78213 + help
78214 + This method is compatible with binary only modules but has
78215 + a higher runtime overhead.
78216 +
78217 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
78218 + bool "or"
78219 + depends on !PARAVIRT
78220 + help
78221 + This method is incompatible with binary only modules but has
78222 + a lower runtime overhead.
78223 +endchoice
78224 +
78225 +config PAX_KERNEXEC_PLUGIN_METHOD
78226 + string
78227 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
78228 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
78229 + default ""
78230 +
78231 +config PAX_KERNEXEC_MODULE_TEXT
78232 + int "Minimum amount of memory reserved for module code"
78233 + default "4"
78234 + depends on PAX_KERNEXEC && X86_32 && MODULES
78235 + help
78236 + Due to implementation details the kernel must reserve a fixed
78237 + amount of memory for module code at compile time that cannot be
78238 + changed at runtime. Here you can specify the minimum amount
78239 + in MB that will be reserved. Due to the same implementation
78240 + details this size will always be rounded up to the next 2/4 MB
78241 + boundary (depends on PAE) so the actually available memory for
78242 + module code will usually be more than this minimum.
78243 +
78244 + The default 4 MB should be enough for most users but if you have
78245 + an excessive number of modules (e.g., most distribution configs
78246 + compile many drivers as modules) or use huge modules such as
78247 + nvidia's kernel driver, you will need to adjust this amount.
78248 + A good rule of thumb is to look at your currently loaded kernel
78249 + modules and add up their sizes.
78250 +
78251 +endmenu
78252 +
78253 +menu "Address Space Layout Randomization"
78254 + depends on PAX
78255 +
78256 +config PAX_ASLR
78257 + bool "Address Space Layout Randomization"
78258 + help
78259 + Many if not most exploit techniques rely on the knowledge of
78260 + certain addresses in the attacked program. The following options
78261 + will allow the kernel to apply a certain amount of randomization
78262 + to specific parts of the program thereby forcing an attacker to
78263 + guess them in most cases. Any failed guess will most likely crash
78264 + the attacked program which allows the kernel to detect such attempts
78265 + and react on them. PaX itself provides no reaction mechanisms,
78266 + instead it is strongly encouraged that you make use of Nergal's
78267 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
78268 + (http://www.grsecurity.net/) built-in crash detection features or
78269 + develop one yourself.
78270 +
78271 + By saying Y here you can choose to randomize the following areas:
78272 + - top of the task's kernel stack
78273 + - top of the task's userland stack
78274 + - base address for mmap() requests that do not specify one
78275 + (this includes all libraries)
78276 + - base address of the main executable
78277 +
78278 + It is strongly recommended to say Y here as address space layout
78279 + randomization has negligible impact on performance yet it provides
78280 + a very effective protection.
78281 +
78282 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
78283 + this feature on a per file basis.
78284 +
78285 +config PAX_RANDKSTACK
78286 + bool "Randomize kernel stack base"
78287 + depends on X86_TSC && X86
78288 + help
78289 + By saying Y here the kernel will randomize every task's kernel
78290 + stack on every system call. This will not only force an attacker
78291 + to guess it but also prevent him from making use of possible
78292 + leaked information about it.
78293 +
78294 + Since the kernel stack is a rather scarce resource, randomization
78295 + may cause unexpected stack overflows, therefore you should very
78296 + carefully test your system. Note that once enabled in the kernel
78297 + configuration, this feature cannot be disabled on a per file basis.
78298 +
78299 +config PAX_RANDUSTACK
78300 + bool "Randomize user stack base"
78301 + depends on PAX_ASLR
78302 + help
78303 + By saying Y here the kernel will randomize every task's userland
78304 + stack. The randomization is done in two steps where the second
78305 + one may apply a big amount of shift to the top of the stack and
78306 + cause problems for programs that want to use lots of memory (more
78307 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
78308 + For this reason the second step can be controlled by 'chpax' or
78309 + 'paxctl' on a per file basis.
78310 +
78311 +config PAX_RANDMMAP
78312 + bool "Randomize mmap() base"
78313 + depends on PAX_ASLR
78314 + help
78315 + By saying Y here the kernel will use a randomized base address for
78316 + mmap() requests that do not specify one themselves. As a result
78317 + all dynamically loaded libraries will appear at random addresses
78318 + and therefore be harder to exploit by a technique where an attacker
78319 + attempts to execute library code for his purposes (e.g. spawn a
78320 + shell from an exploited program that is running at an elevated
78321 + privilege level).
78322 +
78323 + Furthermore, if a program is relinked as a dynamic ELF file, its
78324 + base address will be randomized as well, completing the full
78325 + randomization of the address space layout. Attacking such programs
78326 + becomes a guess game. You can find an example of doing this at
78327 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
78328 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
78329 +
78330 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
78331 + feature on a per file basis.
78332 +
78333 +endmenu
78334 +
78335 +menu "Miscellaneous hardening features"
78336 +
78337 +config PAX_MEMORY_SANITIZE
78338 + bool "Sanitize all freed memory"
78339 + depends on !HIBERNATION
78340 + help
78341 + By saying Y here the kernel will erase memory pages as soon as they
78342 + are freed. This in turn reduces the lifetime of data stored in the
78343 + pages, making it less likely that sensitive information such as
78344 + passwords, cryptographic secrets, etc stay in memory for too long.
78345 +
78346 + This is especially useful for programs whose runtime is short, long
78347 + lived processes and the kernel itself benefit from this as long as
78348 + they operate on whole memory pages and ensure timely freeing of pages
78349 + that may hold sensitive information.
78350 +
78351 + The tradeoff is performance impact, on a single CPU system kernel
78352 + compilation sees a 3% slowdown, other systems and workloads may vary
78353 + and you are advised to test this feature on your expected workload
78354 + before deploying it.
78355 +
78356 + Note that this feature does not protect data stored in live pages,
78357 + e.g., process memory swapped to disk may stay there for a long time.
78358 +
78359 +config PAX_MEMORY_STACKLEAK
78360 + bool "Sanitize kernel stack"
78361 + depends on X86
78362 + help
78363 + By saying Y here the kernel will erase the kernel stack before it
78364 + returns from a system call. This in turn reduces the information
78365 + that a kernel stack leak bug can reveal.
78366 +
78367 + Note that such a bug can still leak information that was put on
78368 + the stack by the current system call (the one eventually triggering
78369 + the bug) but traces of earlier system calls on the kernel stack
78370 + cannot leak anymore.
78371 +
78372 + The tradeoff is performance impact: on a single CPU system kernel
78373 + compilation sees a 1% slowdown, other systems and workloads may vary
78374 + and you are advised to test this feature on your expected workload
78375 + before deploying it.
78376 +
78377 + Note: full support for this feature requires gcc with plugin support
78378 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
78379 + versions means that functions with large enough stack frames may
78380 + leave uninitialized memory behind that may be exposed to a later
78381 + syscall leaking the stack.
78382 +
78383 +config PAX_MEMORY_UDEREF
78384 + bool "Prevent invalid userland pointer dereference"
78385 + depends on X86 && !UML_X86 && !XEN
78386 + select PAX_PER_CPU_PGD if X86_64
78387 + help
78388 + By saying Y here the kernel will be prevented from dereferencing
78389 + userland pointers in contexts where the kernel expects only kernel
78390 + pointers. This is both a useful runtime debugging feature and a
78391 + security measure that prevents exploiting a class of kernel bugs.
78392 +
78393 + The tradeoff is that some virtualization solutions may experience
78394 + a huge slowdown and therefore you should not enable this feature
78395 + for kernels meant to run in such environments. Whether a given VM
78396 + solution is affected or not is best determined by simply trying it
78397 + out, the performance impact will be obvious right on boot as this
78398 + mechanism engages from very early on. A good rule of thumb is that
78399 + VMs running on CPUs without hardware virtualization support (i.e.,
78400 + the majority of IA-32 CPUs) will likely experience the slowdown.
78401 +
78402 +config PAX_REFCOUNT
78403 + bool "Prevent various kernel object reference counter overflows"
78404 + depends on GRKERNSEC && ((ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86)
78405 + help
78406 + By saying Y here the kernel will detect and prevent overflowing
78407 + various (but not all) kinds of object reference counters. Such
78408 + overflows can normally occur due to bugs only and are often, if
78409 + not always, exploitable.
78410 +
78411 + The tradeoff is that data structures protected by an overflowed
78412 + refcount will never be freed and therefore will leak memory. Note
78413 + that this leak also happens even without this protection but in
78414 + that case the overflow can eventually trigger the freeing of the
78415 + data structure while it is still being used elsewhere, resulting
78416 + in the exploitable situation that this feature prevents.
78417 +
78418 + Since this has a negligible performance impact, you should enable
78419 + this feature.
78420 +
78421 +config PAX_USERCOPY
78422 + bool "Harden heap object copies between kernel and userland"
78423 + depends on X86 || PPC || SPARC || ARM
78424 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
78425 + help
78426 + By saying Y here the kernel will enforce the size of heap objects
78427 + when they are copied in either direction between the kernel and
78428 + userland, even if only a part of the heap object is copied.
78429 +
78430 + Specifically, this checking prevents information leaking from the
78431 + kernel heap during kernel to userland copies (if the kernel heap
78432 + object is otherwise fully initialized) and prevents kernel heap
78433 + overflows during userland to kernel copies.
78434 +
78435 + Note that the current implementation provides the strictest bounds
78436 + checks for the SLUB allocator.
78437 +
78438 + Enabling this option also enables per-slab cache protection against
78439 + data in a given cache being copied into/out of via userland
78440 + accessors. Though the whitelist of regions will be reduced over
78441 + time, it notably protects important data structures like task structs.
78442 +
78443 + If frame pointers are enabled on x86, this option will also restrict
78444 + copies into and out of the kernel stack to local variables within a
78445 + single frame.
78446 +
78447 + Since this has a negligible performance impact, you should enable
78448 + this feature.
78449 +
78450 +endmenu
78451 +
78452 +endmenu
78453 +
78454 config KEYS
78455 bool "Enable access key retention support"
78456 help
78457 @@ -169,7 +790,7 @@ config INTEL_TXT
78458 config LSM_MMAP_MIN_ADDR
78459 int "Low address space for LSM to protect from user allocation"
78460 depends on SECURITY && SECURITY_SELINUX
78461 - default 32768 if ARM
78462 + default 32768 if ALPHA || ARM || PARISC || SPARC32
78463 default 65536
78464 help
78465 This is the portion of low virtual memory which should be protected
78466 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
78467 index 3783202..1852837 100644
78468 --- a/security/apparmor/lsm.c
78469 +++ b/security/apparmor/lsm.c
78470 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
78471 return error;
78472 }
78473
78474 -static struct security_operations apparmor_ops = {
78475 +static struct security_operations apparmor_ops __read_only = {
78476 .name = "apparmor",
78477
78478 .ptrace_access_check = apparmor_ptrace_access_check,
78479 diff --git a/security/commoncap.c b/security/commoncap.c
78480 index ee4f848..a320c64 100644
78481 --- a/security/commoncap.c
78482 +++ b/security/commoncap.c
78483 @@ -28,6 +28,7 @@
78484 #include <linux/prctl.h>
78485 #include <linux/securebits.h>
78486 #include <linux/user_namespace.h>
78487 +#include <net/sock.h>
78488
78489 /*
78490 * If a non-root user executes a setuid-root binary in
78491 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
78492
78493 int cap_netlink_recv(struct sk_buff *skb, int cap)
78494 {
78495 - if (!cap_raised(current_cap(), cap))
78496 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
78497 return -EPERM;
78498 return 0;
78499 }
78500 @@ -579,6 +580,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
78501 {
78502 const struct cred *cred = current_cred();
78503
78504 + if (gr_acl_enable_at_secure())
78505 + return 1;
78506 +
78507 if (cred->uid != 0) {
78508 if (bprm->cap_effective)
78509 return 1;
78510 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
78511 index 3ccf7ac..d73ad64 100644
78512 --- a/security/integrity/ima/ima.h
78513 +++ b/security/integrity/ima/ima.h
78514 @@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
78515 extern spinlock_t ima_queue_lock;
78516
78517 struct ima_h_table {
78518 - atomic_long_t len; /* number of stored measurements in the list */
78519 - atomic_long_t violations;
78520 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
78521 + atomic_long_unchecked_t violations;
78522 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
78523 };
78524 extern struct ima_h_table ima_htable;
78525 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
78526 index 88a2788..581ab92 100644
78527 --- a/security/integrity/ima/ima_api.c
78528 +++ b/security/integrity/ima/ima_api.c
78529 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
78530 int result;
78531
78532 /* can overflow, only indicator */
78533 - atomic_long_inc(&ima_htable.violations);
78534 + atomic_long_inc_unchecked(&ima_htable.violations);
78535
78536 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
78537 if (!entry) {
78538 diff --git a/security/integrity/ima/ima_audit.c b/security/integrity/ima/ima_audit.c
78539 index c5c5a72..2ad942f 100644
78540 --- a/security/integrity/ima/ima_audit.c
78541 +++ b/security/integrity/ima/ima_audit.c
78542 @@ -56,9 +56,11 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
78543 audit_log_format(ab, " name=");
78544 audit_log_untrustedstring(ab, fname);
78545 }
78546 - if (inode)
78547 - audit_log_format(ab, " dev=%s ino=%lu",
78548 - inode->i_sb->s_id, inode->i_ino);
78549 + if (inode) {
78550 + audit_log_format(ab, " dev=");
78551 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
78552 + audit_log_format(ab, " ino=%lu", inode->i_ino);
78553 + }
78554 audit_log_format(ab, " res=%d", !result ? 0 : 1);
78555 audit_log_end(ab);
78556 }
78557 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
78558 index e1aa2b4..52027bf 100644
78559 --- a/security/integrity/ima/ima_fs.c
78560 +++ b/security/integrity/ima/ima_fs.c
78561 @@ -28,12 +28,12 @@
78562 static int valid_policy = 1;
78563 #define TMPBUFLEN 12
78564 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
78565 - loff_t *ppos, atomic_long_t *val)
78566 + loff_t *ppos, atomic_long_unchecked_t *val)
78567 {
78568 char tmpbuf[TMPBUFLEN];
78569 ssize_t len;
78570
78571 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
78572 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
78573 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
78574 }
78575
78576 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
78577 index 55a6271..ad829c3 100644
78578 --- a/security/integrity/ima/ima_queue.c
78579 +++ b/security/integrity/ima/ima_queue.c
78580 @@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
78581 INIT_LIST_HEAD(&qe->later);
78582 list_add_tail_rcu(&qe->later, &ima_measurements);
78583
78584 - atomic_long_inc(&ima_htable.len);
78585 + atomic_long_inc_unchecked(&ima_htable.len);
78586 key = ima_hash_key(entry->digest);
78587 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
78588 return 0;
78589 diff --git a/security/keys/compat.c b/security/keys/compat.c
78590 index 4c48e13..7abdac9 100644
78591 --- a/security/keys/compat.c
78592 +++ b/security/keys/compat.c
78593 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
78594 if (ret == 0)
78595 goto no_payload_free;
78596
78597 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
78598 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
78599
78600 if (iov != iovstack)
78601 kfree(iov);
78602 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
78603 index 0b3f5d7..892c8a6 100644
78604 --- a/security/keys/keyctl.c
78605 +++ b/security/keys/keyctl.c
78606 @@ -921,7 +921,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
78607 /*
78608 * Copy the iovec data from userspace
78609 */
78610 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
78611 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
78612 unsigned ioc)
78613 {
78614 for (; ioc > 0; ioc--) {
78615 @@ -943,7 +943,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
78616 * If successful, 0 will be returned.
78617 */
78618 long keyctl_instantiate_key_common(key_serial_t id,
78619 - const struct iovec *payload_iov,
78620 + const struct iovec __user *payload_iov,
78621 unsigned ioc,
78622 size_t plen,
78623 key_serial_t ringid)
78624 @@ -1038,7 +1038,7 @@ long keyctl_instantiate_key(key_serial_t id,
78625 [0].iov_len = plen
78626 };
78627
78628 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
78629 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
78630 }
78631
78632 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
78633 @@ -1071,7 +1071,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
78634 if (ret == 0)
78635 goto no_payload_free;
78636
78637 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
78638 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
78639
78640 if (iov != iovstack)
78641 kfree(iov);
78642 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
78643 index 37a7f3b..86dc19f 100644
78644 --- a/security/keys/keyring.c
78645 +++ b/security/keys/keyring.c
78646 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
78647 ret = -EFAULT;
78648
78649 for (loop = 0; loop < klist->nkeys; loop++) {
78650 + key_serial_t serial;
78651 key = klist->keys[loop];
78652 + serial = key->serial;
78653
78654 tmp = sizeof(key_serial_t);
78655 if (tmp > buflen)
78656 tmp = buflen;
78657
78658 - if (copy_to_user(buffer,
78659 - &key->serial,
78660 - tmp) != 0)
78661 + if (copy_to_user(buffer, &serial, tmp))
78662 goto error;
78663
78664 buflen -= tmp;
78665 diff --git a/security/lsm_audit.c b/security/lsm_audit.c
78666 index 893af8a..ba9237c 100644
78667 --- a/security/lsm_audit.c
78668 +++ b/security/lsm_audit.c
78669 @@ -234,10 +234,11 @@ static void dump_common_audit_data(struct audit_buffer *ab,
78670 audit_log_d_path(ab, "path=", &a->u.path);
78671
78672 inode = a->u.path.dentry->d_inode;
78673 - if (inode)
78674 - audit_log_format(ab, " dev=%s ino=%lu",
78675 - inode->i_sb->s_id,
78676 - inode->i_ino);
78677 + if (inode) {
78678 + audit_log_format(ab, " dev=");
78679 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
78680 + audit_log_format(ab, " ino=%lu", inode->i_ino);
78681 + }
78682 break;
78683 }
78684 case LSM_AUDIT_DATA_DENTRY: {
78685 @@ -247,10 +248,11 @@ static void dump_common_audit_data(struct audit_buffer *ab,
78686 audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
78687
78688 inode = a->u.dentry->d_inode;
78689 - if (inode)
78690 - audit_log_format(ab, " dev=%s ino=%lu",
78691 - inode->i_sb->s_id,
78692 - inode->i_ino);
78693 + if (inode) {
78694 + audit_log_format(ab, " dev=");
78695 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
78696 + audit_log_format(ab, " ino=%lu", inode->i_ino);
78697 + }
78698 break;
78699 }
78700 case LSM_AUDIT_DATA_INODE: {
78701 @@ -265,8 +267,9 @@ static void dump_common_audit_data(struct audit_buffer *ab,
78702 dentry->d_name.name);
78703 dput(dentry);
78704 }
78705 - audit_log_format(ab, " dev=%s ino=%lu", inode->i_sb->s_id,
78706 - inode->i_ino);
78707 + audit_log_format(ab, " dev=");
78708 + audit_log_untrustedstring(ab, inode->i_sb->s_id);
78709 + audit_log_format(ab, " ino=%lu", inode->i_ino);
78710 break;
78711 }
78712 case LSM_AUDIT_DATA_TASK:
78713 diff --git a/security/min_addr.c b/security/min_addr.c
78714 index f728728..6457a0c 100644
78715 --- a/security/min_addr.c
78716 +++ b/security/min_addr.c
78717 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
78718 */
78719 static void update_mmap_min_addr(void)
78720 {
78721 +#ifndef SPARC
78722 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
78723 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
78724 mmap_min_addr = dac_mmap_min_addr;
78725 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
78726 #else
78727 mmap_min_addr = dac_mmap_min_addr;
78728 #endif
78729 +#endif
78730 }
78731
78732 /*
78733 diff --git a/security/security.c b/security/security.c
78734 index e2f684a..8d62ef5 100644
78735 --- a/security/security.c
78736 +++ b/security/security.c
78737 @@ -26,8 +26,8 @@
78738 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
78739 CONFIG_DEFAULT_SECURITY;
78740
78741 -static struct security_operations *security_ops;
78742 -static struct security_operations default_security_ops = {
78743 +static struct security_operations *security_ops __read_only;
78744 +static struct security_operations default_security_ops __read_only = {
78745 .name = "default",
78746 };
78747
78748 @@ -68,7 +68,9 @@ int __init security_init(void)
78749
78750 void reset_security_ops(void)
78751 {
78752 + pax_open_kernel();
78753 security_ops = &default_security_ops;
78754 + pax_close_kernel();
78755 }
78756
78757 /* Save user chosen LSM */
78758 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
78759 index 1126c10..effb32b 100644
78760 --- a/security/selinux/hooks.c
78761 +++ b/security/selinux/hooks.c
78762 @@ -94,8 +94,6 @@
78763
78764 #define NUM_SEL_MNT_OPTS 5
78765
78766 -extern struct security_operations *security_ops;
78767 -
78768 /* SECMARK reference count */
78769 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
78770
78771 @@ -5449,7 +5447,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
78772
78773 #endif
78774
78775 -static struct security_operations selinux_ops = {
78776 +static struct security_operations selinux_ops __read_only = {
78777 .name = "selinux",
78778
78779 .ptrace_access_check = selinux_ptrace_access_check,
78780 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
78781 index b43813c..74be837 100644
78782 --- a/security/selinux/include/xfrm.h
78783 +++ b/security/selinux/include/xfrm.h
78784 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
78785
78786 static inline void selinux_xfrm_notify_policyload(void)
78787 {
78788 - atomic_inc(&flow_cache_genid);
78789 + atomic_inc_unchecked(&flow_cache_genid);
78790 }
78791 #else
78792 static inline int selinux_xfrm_enabled(void)
78793 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
78794 index 7db62b4..ee4d949 100644
78795 --- a/security/smack/smack_lsm.c
78796 +++ b/security/smack/smack_lsm.c
78797 @@ -3481,7 +3481,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
78798 return 0;
78799 }
78800
78801 -struct security_operations smack_ops = {
78802 +struct security_operations smack_ops __read_only = {
78803 .name = "smack",
78804
78805 .ptrace_access_check = smack_ptrace_access_check,
78806 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
78807 index 4b327b6..646c57a 100644
78808 --- a/security/tomoyo/tomoyo.c
78809 +++ b/security/tomoyo/tomoyo.c
78810 @@ -504,7 +504,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
78811 * tomoyo_security_ops is a "struct security_operations" which is used for
78812 * registering TOMOYO.
78813 */
78814 -static struct security_operations tomoyo_security_ops = {
78815 +static struct security_operations tomoyo_security_ops __read_only = {
78816 .name = "tomoyo",
78817 .cred_alloc_blank = tomoyo_cred_alloc_blank,
78818 .cred_prepare = tomoyo_cred_prepare,
78819 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
78820 index 762af68..7103453 100644
78821 --- a/sound/aoa/codecs/onyx.c
78822 +++ b/sound/aoa/codecs/onyx.c
78823 @@ -54,7 +54,7 @@ struct onyx {
78824 spdif_locked:1,
78825 analog_locked:1,
78826 original_mute:2;
78827 - int open_count;
78828 + local_t open_count;
78829 struct codec_info *codec_info;
78830
78831 /* mutex serializes concurrent access to the device
78832 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
78833 struct onyx *onyx = cii->codec_data;
78834
78835 mutex_lock(&onyx->mutex);
78836 - onyx->open_count++;
78837 + local_inc(&onyx->open_count);
78838 mutex_unlock(&onyx->mutex);
78839
78840 return 0;
78841 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
78842 struct onyx *onyx = cii->codec_data;
78843
78844 mutex_lock(&onyx->mutex);
78845 - onyx->open_count--;
78846 - if (!onyx->open_count)
78847 + if (local_dec_and_test(&onyx->open_count))
78848 onyx->spdif_locked = onyx->analog_locked = 0;
78849 mutex_unlock(&onyx->mutex);
78850
78851 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
78852 index ffd2025..df062c9 100644
78853 --- a/sound/aoa/codecs/onyx.h
78854 +++ b/sound/aoa/codecs/onyx.h
78855 @@ -11,6 +11,7 @@
78856 #include <linux/i2c.h>
78857 #include <asm/pmac_low_i2c.h>
78858 #include <asm/prom.h>
78859 +#include <asm/local.h>
78860
78861 /* PCM3052 register definitions */
78862
78863 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
78864 index 3cc4b86..af0a951 100644
78865 --- a/sound/core/oss/pcm_oss.c
78866 +++ b/sound/core/oss/pcm_oss.c
78867 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
78868 if (in_kernel) {
78869 mm_segment_t fs;
78870 fs = snd_enter_user();
78871 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
78872 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
78873 snd_leave_user(fs);
78874 } else {
78875 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
78876 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
78877 }
78878 if (ret != -EPIPE && ret != -ESTRPIPE)
78879 break;
78880 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
78881 if (in_kernel) {
78882 mm_segment_t fs;
78883 fs = snd_enter_user();
78884 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
78885 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
78886 snd_leave_user(fs);
78887 } else {
78888 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
78889 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
78890 }
78891 if (ret == -EPIPE) {
78892 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
78893 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
78894 struct snd_pcm_plugin_channel *channels;
78895 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
78896 if (!in_kernel) {
78897 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
78898 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
78899 return -EFAULT;
78900 buf = runtime->oss.buffer;
78901 }
78902 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
78903 }
78904 } else {
78905 tmp = snd_pcm_oss_write2(substream,
78906 - (const char __force *)buf,
78907 + (const char __force_kernel *)buf,
78908 runtime->oss.period_bytes, 0);
78909 if (tmp <= 0)
78910 goto err;
78911 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
78912 struct snd_pcm_runtime *runtime = substream->runtime;
78913 snd_pcm_sframes_t frames, frames1;
78914 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
78915 - char __user *final_dst = (char __force __user *)buf;
78916 + char __user *final_dst = (char __force_user *)buf;
78917 if (runtime->oss.plugin_first) {
78918 struct snd_pcm_plugin_channel *channels;
78919 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
78920 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
78921 xfer += tmp;
78922 runtime->oss.buffer_used -= tmp;
78923 } else {
78924 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
78925 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
78926 runtime->oss.period_bytes, 0);
78927 if (tmp <= 0)
78928 goto err;
78929 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
78930 size1);
78931 size1 /= runtime->channels; /* frames */
78932 fs = snd_enter_user();
78933 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
78934 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
78935 snd_leave_user(fs);
78936 }
78937 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
78938 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
78939 index 91cdf94..4085161 100644
78940 --- a/sound/core/pcm_compat.c
78941 +++ b/sound/core/pcm_compat.c
78942 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
78943 int err;
78944
78945 fs = snd_enter_user();
78946 - err = snd_pcm_delay(substream, &delay);
78947 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
78948 snd_leave_user(fs);
78949 if (err < 0)
78950 return err;
78951 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
78952 index 25ed9fe..24c46e9 100644
78953 --- a/sound/core/pcm_native.c
78954 +++ b/sound/core/pcm_native.c
78955 @@ -2765,11 +2765,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
78956 switch (substream->stream) {
78957 case SNDRV_PCM_STREAM_PLAYBACK:
78958 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
78959 - (void __user *)arg);
78960 + (void __force_user *)arg);
78961 break;
78962 case SNDRV_PCM_STREAM_CAPTURE:
78963 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
78964 - (void __user *)arg);
78965 + (void __force_user *)arg);
78966 break;
78967 default:
78968 result = -EINVAL;
78969 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
78970 index 5cf8d65..912a79c 100644
78971 --- a/sound/core/seq/seq_device.c
78972 +++ b/sound/core/seq/seq_device.c
78973 @@ -64,7 +64,7 @@ struct ops_list {
78974 int argsize; /* argument size */
78975
78976 /* operators */
78977 - struct snd_seq_dev_ops ops;
78978 + struct snd_seq_dev_ops *ops;
78979
78980 /* registred devices */
78981 struct list_head dev_list; /* list of devices */
78982 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
78983
78984 mutex_lock(&ops->reg_mutex);
78985 /* copy driver operators */
78986 - ops->ops = *entry;
78987 + ops->ops = entry;
78988 ops->driver |= DRIVER_LOADED;
78989 ops->argsize = argsize;
78990
78991 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
78992 dev->name, ops->id, ops->argsize, dev->argsize);
78993 return -EINVAL;
78994 }
78995 - if (ops->ops.init_device(dev) >= 0) {
78996 + if (ops->ops->init_device(dev) >= 0) {
78997 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
78998 ops->num_init_devices++;
78999 } else {
79000 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
79001 dev->name, ops->id, ops->argsize, dev->argsize);
79002 return -EINVAL;
79003 }
79004 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
79005 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
79006 dev->status = SNDRV_SEQ_DEVICE_FREE;
79007 dev->driver_data = NULL;
79008 ops->num_init_devices--;
79009 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
79010 index f24bf9a..1f7b67c 100644
79011 --- a/sound/drivers/mts64.c
79012 +++ b/sound/drivers/mts64.c
79013 @@ -29,6 +29,7 @@
79014 #include <sound/initval.h>
79015 #include <sound/rawmidi.h>
79016 #include <sound/control.h>
79017 +#include <asm/local.h>
79018
79019 #define CARD_NAME "Miditerminal 4140"
79020 #define DRIVER_NAME "MTS64"
79021 @@ -67,7 +68,7 @@ struct mts64 {
79022 struct pardevice *pardev;
79023 int pardev_claimed;
79024
79025 - int open_count;
79026 + local_t open_count;
79027 int current_midi_output_port;
79028 int current_midi_input_port;
79029 u8 mode[MTS64_NUM_INPUT_PORTS];
79030 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
79031 {
79032 struct mts64 *mts = substream->rmidi->private_data;
79033
79034 - if (mts->open_count == 0) {
79035 + if (local_read(&mts->open_count) == 0) {
79036 /* We don't need a spinlock here, because this is just called
79037 if the device has not been opened before.
79038 So there aren't any IRQs from the device */
79039 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
79040
79041 msleep(50);
79042 }
79043 - ++(mts->open_count);
79044 + local_inc(&mts->open_count);
79045
79046 return 0;
79047 }
79048 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
79049 struct mts64 *mts = substream->rmidi->private_data;
79050 unsigned long flags;
79051
79052 - --(mts->open_count);
79053 - if (mts->open_count == 0) {
79054 + if (local_dec_return(&mts->open_count) == 0) {
79055 /* We need the spinlock_irqsave here because we can still
79056 have IRQs at this point */
79057 spin_lock_irqsave(&mts->lock, flags);
79058 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
79059
79060 msleep(500);
79061
79062 - } else if (mts->open_count < 0)
79063 - mts->open_count = 0;
79064 + } else if (local_read(&mts->open_count) < 0)
79065 + local_set(&mts->open_count, 0);
79066
79067 return 0;
79068 }
79069 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
79070 index b953fb4..1999c01 100644
79071 --- a/sound/drivers/opl4/opl4_lib.c
79072 +++ b/sound/drivers/opl4/opl4_lib.c
79073 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
79074 MODULE_DESCRIPTION("OPL4 driver");
79075 MODULE_LICENSE("GPL");
79076
79077 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
79078 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
79079 {
79080 int timeout = 10;
79081 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
79082 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
79083 index f664823..590c745 100644
79084 --- a/sound/drivers/portman2x4.c
79085 +++ b/sound/drivers/portman2x4.c
79086 @@ -48,6 +48,7 @@
79087 #include <sound/initval.h>
79088 #include <sound/rawmidi.h>
79089 #include <sound/control.h>
79090 +#include <asm/local.h>
79091
79092 #define CARD_NAME "Portman 2x4"
79093 #define DRIVER_NAME "portman"
79094 @@ -85,7 +86,7 @@ struct portman {
79095 struct pardevice *pardev;
79096 int pardev_claimed;
79097
79098 - int open_count;
79099 + local_t open_count;
79100 int mode[PORTMAN_NUM_INPUT_PORTS];
79101 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
79102 };
79103 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
79104 index 87657dd..a8268d4 100644
79105 --- a/sound/firewire/amdtp.c
79106 +++ b/sound/firewire/amdtp.c
79107 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
79108 ptr = s->pcm_buffer_pointer + data_blocks;
79109 if (ptr >= pcm->runtime->buffer_size)
79110 ptr -= pcm->runtime->buffer_size;
79111 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
79112 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
79113
79114 s->pcm_period_pointer += data_blocks;
79115 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
79116 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
79117 */
79118 void amdtp_out_stream_update(struct amdtp_out_stream *s)
79119 {
79120 - ACCESS_ONCE(s->source_node_id_field) =
79121 + ACCESS_ONCE_RW(s->source_node_id_field) =
79122 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
79123 }
79124 EXPORT_SYMBOL(amdtp_out_stream_update);
79125 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
79126 index 537a9cb..8e8c8e9 100644
79127 --- a/sound/firewire/amdtp.h
79128 +++ b/sound/firewire/amdtp.h
79129 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
79130 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
79131 struct snd_pcm_substream *pcm)
79132 {
79133 - ACCESS_ONCE(s->pcm) = pcm;
79134 + ACCESS_ONCE_RW(s->pcm) = pcm;
79135 }
79136
79137 /**
79138 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
79139 index cd094ec..eca1277 100644
79140 --- a/sound/firewire/isight.c
79141 +++ b/sound/firewire/isight.c
79142 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
79143 ptr += count;
79144 if (ptr >= runtime->buffer_size)
79145 ptr -= runtime->buffer_size;
79146 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
79147 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
79148
79149 isight->period_counter += count;
79150 if (isight->period_counter >= runtime->period_size) {
79151 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
79152 if (err < 0)
79153 return err;
79154
79155 - ACCESS_ONCE(isight->pcm_active) = true;
79156 + ACCESS_ONCE_RW(isight->pcm_active) = true;
79157
79158 return 0;
79159 }
79160 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
79161 {
79162 struct isight *isight = substream->private_data;
79163
79164 - ACCESS_ONCE(isight->pcm_active) = false;
79165 + ACCESS_ONCE_RW(isight->pcm_active) = false;
79166
79167 mutex_lock(&isight->mutex);
79168 isight_stop_streaming(isight);
79169 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
79170
79171 switch (cmd) {
79172 case SNDRV_PCM_TRIGGER_START:
79173 - ACCESS_ONCE(isight->pcm_running) = true;
79174 + ACCESS_ONCE_RW(isight->pcm_running) = true;
79175 break;
79176 case SNDRV_PCM_TRIGGER_STOP:
79177 - ACCESS_ONCE(isight->pcm_running) = false;
79178 + ACCESS_ONCE_RW(isight->pcm_running) = false;
79179 break;
79180 default:
79181 return -EINVAL;
79182 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
79183 index c94578d..0794ac1 100644
79184 --- a/sound/isa/cmi8330.c
79185 +++ b/sound/isa/cmi8330.c
79186 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
79187
79188 struct snd_pcm *pcm;
79189 struct snd_cmi8330_stream {
79190 - struct snd_pcm_ops ops;
79191 + snd_pcm_ops_no_const ops;
79192 snd_pcm_open_callback_t open;
79193 void *private_data; /* sb or wss */
79194 } streams[2];
79195 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
79196 index 733b014..56ce96f 100644
79197 --- a/sound/oss/sb_audio.c
79198 +++ b/sound/oss/sb_audio.c
79199 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
79200 buf16 = (signed short *)(localbuf + localoffs);
79201 while (c)
79202 {
79203 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
79204 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
79205 if (copy_from_user(lbuf8,
79206 userbuf+useroffs + p,
79207 locallen))
79208 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
79209 index 09d4648..cf234c7 100644
79210 --- a/sound/oss/swarm_cs4297a.c
79211 +++ b/sound/oss/swarm_cs4297a.c
79212 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
79213 {
79214 struct cs4297a_state *s;
79215 u32 pwr, id;
79216 - mm_segment_t fs;
79217 int rval;
79218 #ifndef CONFIG_BCM_CS4297A_CSWARM
79219 u64 cfg;
79220 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
79221 if (!rval) {
79222 char *sb1250_duart_present;
79223
79224 +#if 0
79225 + mm_segment_t fs;
79226 fs = get_fs();
79227 set_fs(KERNEL_DS);
79228 -#if 0
79229 val = SOUND_MASK_LINE;
79230 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
79231 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
79232 val = initvol[i].vol;
79233 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
79234 }
79235 + set_fs(fs);
79236 // cs4297a_write_ac97(s, 0x18, 0x0808);
79237 #else
79238 // cs4297a_write_ac97(s, 0x5e, 0x180);
79239 cs4297a_write_ac97(s, 0x02, 0x0808);
79240 cs4297a_write_ac97(s, 0x18, 0x0808);
79241 #endif
79242 - set_fs(fs);
79243
79244 list_add(&s->list, &cs4297a_devs);
79245
79246 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
79247 index 71f6744..d8aeae7 100644
79248 --- a/sound/pci/hda/hda_codec.h
79249 +++ b/sound/pci/hda/hda_codec.h
79250 @@ -614,7 +614,7 @@ struct hda_bus_ops {
79251 /* notify power-up/down from codec to controller */
79252 void (*pm_notify)(struct hda_bus *bus);
79253 #endif
79254 -};
79255 +} __no_const;
79256
79257 /* template to pass to the bus constructor */
79258 struct hda_bus_template {
79259 @@ -716,6 +716,7 @@ struct hda_codec_ops {
79260 #endif
79261 void (*reboot_notify)(struct hda_codec *codec);
79262 };
79263 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
79264
79265 /* record for amp information cache */
79266 struct hda_cache_head {
79267 @@ -746,7 +747,7 @@ struct hda_pcm_ops {
79268 struct snd_pcm_substream *substream);
79269 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
79270 struct snd_pcm_substream *substream);
79271 -};
79272 +} __no_const;
79273
79274 /* PCM information for each substream */
79275 struct hda_pcm_stream {
79276 @@ -804,7 +805,7 @@ struct hda_codec {
79277 const char *modelname; /* model name for preset */
79278
79279 /* set by patch */
79280 - struct hda_codec_ops patch_ops;
79281 + hda_codec_ops_no_const patch_ops;
79282
79283 /* PCM to create, set by patch_ops.build_pcms callback */
79284 unsigned int num_pcms;
79285 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
79286 index 0da778a..bc38b84 100644
79287 --- a/sound/pci/ice1712/ice1712.h
79288 +++ b/sound/pci/ice1712/ice1712.h
79289 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
79290 unsigned int mask_flags; /* total mask bits */
79291 struct snd_akm4xxx_ops {
79292 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
79293 - } ops;
79294 + } __no_const ops;
79295 };
79296
79297 struct snd_ice1712_spdif {
79298 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
79299 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
79300 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
79301 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
79302 - } ops;
79303 + } __no_const ops;
79304 };
79305
79306
79307 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
79308 index 03ee4e3..be86b46 100644
79309 --- a/sound/pci/ymfpci/ymfpci_main.c
79310 +++ b/sound/pci/ymfpci/ymfpci_main.c
79311 @@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
79312 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
79313 break;
79314 }
79315 - if (atomic_read(&chip->interrupt_sleep_count)) {
79316 - atomic_set(&chip->interrupt_sleep_count, 0);
79317 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
79318 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
79319 wake_up(&chip->interrupt_sleep);
79320 }
79321 __end:
79322 @@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
79323 continue;
79324 init_waitqueue_entry(&wait, current);
79325 add_wait_queue(&chip->interrupt_sleep, &wait);
79326 - atomic_inc(&chip->interrupt_sleep_count);
79327 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
79328 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
79329 remove_wait_queue(&chip->interrupt_sleep, &wait);
79330 }
79331 @@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
79332 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
79333 spin_unlock(&chip->reg_lock);
79334
79335 - if (atomic_read(&chip->interrupt_sleep_count)) {
79336 - atomic_set(&chip->interrupt_sleep_count, 0);
79337 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
79338 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
79339 wake_up(&chip->interrupt_sleep);
79340 }
79341 }
79342 @@ -2382,7 +2382,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
79343 spin_lock_init(&chip->reg_lock);
79344 spin_lock_init(&chip->voice_lock);
79345 init_waitqueue_head(&chip->interrupt_sleep);
79346 - atomic_set(&chip->interrupt_sleep_count, 0);
79347 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
79348 chip->card = card;
79349 chip->pci = pci;
79350 chip->irq = -1;
79351 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
79352 index ee15337..e2187a6 100644
79353 --- a/sound/soc/soc-pcm.c
79354 +++ b/sound/soc/soc-pcm.c
79355 @@ -583,7 +583,7 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
79356 }
79357
79358 /* ASoC PCM operations */
79359 -static struct snd_pcm_ops soc_pcm_ops = {
79360 +static snd_pcm_ops_no_const soc_pcm_ops = {
79361 .open = soc_pcm_open,
79362 .close = soc_pcm_close,
79363 .hw_params = soc_pcm_hw_params,
79364 diff --git a/sound/usb/card.h b/sound/usb/card.h
79365 index a39edcc..1014050 100644
79366 --- a/sound/usb/card.h
79367 +++ b/sound/usb/card.h
79368 @@ -44,6 +44,7 @@ struct snd_urb_ops {
79369 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
79370 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
79371 };
79372 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
79373
79374 struct snd_usb_substream {
79375 struct snd_usb_stream *stream;
79376 @@ -93,7 +94,7 @@ struct snd_usb_substream {
79377 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
79378 spinlock_t lock;
79379
79380 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
79381 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
79382 int last_frame_number; /* stored frame number */
79383 int last_delay; /* stored delay */
79384 };
79385 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
79386 new file mode 100644
79387 index 0000000..894c8bf
79388 --- /dev/null
79389 +++ b/tools/gcc/Makefile
79390 @@ -0,0 +1,23 @@
79391 +#CC := gcc
79392 +#PLUGIN_SOURCE_FILES := pax_plugin.c
79393 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
79394 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
79395 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
79396 +
79397 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
79398 +
79399 +hostlibs-y := constify_plugin.so
79400 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
79401 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
79402 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
79403 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
79404 +hostlibs-y += colorize_plugin.so
79405 +
79406 +always := $(hostlibs-y)
79407 +
79408 +constify_plugin-objs := constify_plugin.o
79409 +stackleak_plugin-objs := stackleak_plugin.o
79410 +kallocstat_plugin-objs := kallocstat_plugin.o
79411 +kernexec_plugin-objs := kernexec_plugin.o
79412 +checker_plugin-objs := checker_plugin.o
79413 +colorize_plugin-objs := colorize_plugin.o
79414 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
79415 new file mode 100644
79416 index 0000000..d41b5af
79417 --- /dev/null
79418 +++ b/tools/gcc/checker_plugin.c
79419 @@ -0,0 +1,171 @@
79420 +/*
79421 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
79422 + * Licensed under the GPL v2
79423 + *
79424 + * Note: the choice of the license means that the compilation process is
79425 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
79426 + * but for the kernel it doesn't matter since it doesn't link against
79427 + * any of the gcc libraries
79428 + *
79429 + * gcc plugin to implement various sparse (source code checker) features
79430 + *
79431 + * TODO:
79432 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
79433 + *
79434 + * BUGS:
79435 + * - none known
79436 + */
79437 +#include "gcc-plugin.h"
79438 +#include "config.h"
79439 +#include "system.h"
79440 +#include "coretypes.h"
79441 +#include "tree.h"
79442 +#include "tree-pass.h"
79443 +#include "flags.h"
79444 +#include "intl.h"
79445 +#include "toplev.h"
79446 +#include "plugin.h"
79447 +//#include "expr.h" where are you...
79448 +#include "diagnostic.h"
79449 +#include "plugin-version.h"
79450 +#include "tm.h"
79451 +#include "function.h"
79452 +#include "basic-block.h"
79453 +#include "gimple.h"
79454 +#include "rtl.h"
79455 +#include "emit-rtl.h"
79456 +#include "tree-flow.h"
79457 +#include "target.h"
79458 +
79459 +extern void c_register_addr_space (const char *str, addr_space_t as);
79460 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
79461 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
79462 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
79463 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
79464 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
79465 +
79466 +extern void print_gimple_stmt(FILE *, gimple, int, int);
79467 +extern rtx emit_move_insn(rtx x, rtx y);
79468 +
79469 +int plugin_is_GPL_compatible;
79470 +
79471 +static struct plugin_info checker_plugin_info = {
79472 + .version = "201111150100",
79473 +};
79474 +
79475 +#define ADDR_SPACE_KERNEL 0
79476 +#define ADDR_SPACE_FORCE_KERNEL 1
79477 +#define ADDR_SPACE_USER 2
79478 +#define ADDR_SPACE_FORCE_USER 3
79479 +#define ADDR_SPACE_IOMEM 0
79480 +#define ADDR_SPACE_FORCE_IOMEM 0
79481 +#define ADDR_SPACE_PERCPU 0
79482 +#define ADDR_SPACE_FORCE_PERCPU 0
79483 +#define ADDR_SPACE_RCU 0
79484 +#define ADDR_SPACE_FORCE_RCU 0
79485 +
79486 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
79487 +{
79488 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
79489 +}
79490 +
79491 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
79492 +{
79493 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
79494 +}
79495 +
79496 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
79497 +{
79498 + return default_addr_space_valid_pointer_mode(mode, as);
79499 +}
79500 +
79501 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
79502 +{
79503 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
79504 +}
79505 +
79506 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
79507 +{
79508 + return default_addr_space_legitimize_address(x, oldx, mode, as);
79509 +}
79510 +
79511 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
79512 +{
79513 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
79514 + return true;
79515 +
79516 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
79517 + return true;
79518 +
79519 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
79520 + return true;
79521 +
79522 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
79523 + return true;
79524 +
79525 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
79526 + return true;
79527 +
79528 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
79529 + return true;
79530 +
79531 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
79532 + return true;
79533 +
79534 + return subset == superset;
79535 +}
79536 +
79537 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
79538 +{
79539 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
79540 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
79541 +
79542 + return op;
79543 +}
79544 +
79545 +static void register_checker_address_spaces(void *event_data, void *data)
79546 +{
79547 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
79548 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
79549 + c_register_addr_space("__user", ADDR_SPACE_USER);
79550 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
79551 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
79552 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
79553 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
79554 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
79555 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
79556 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
79557 +
79558 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
79559 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
79560 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
79561 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
79562 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
79563 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
79564 + targetm.addr_space.convert = checker_addr_space_convert;
79565 +}
79566 +
79567 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79568 +{
79569 + const char * const plugin_name = plugin_info->base_name;
79570 + const int argc = plugin_info->argc;
79571 + const struct plugin_argument * const argv = plugin_info->argv;
79572 + int i;
79573 +
79574 + if (!plugin_default_version_check(version, &gcc_version)) {
79575 + error(G_("incompatible gcc/plugin versions"));
79576 + return 1;
79577 + }
79578 +
79579 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
79580 +
79581 + for (i = 0; i < argc; ++i)
79582 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
79583 +
79584 + if (TARGET_64BIT == 0)
79585 + return 0;
79586 +
79587 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
79588 +
79589 + return 0;
79590 +}
79591 diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
79592 new file mode 100644
79593 index 0000000..ee950d0
79594 --- /dev/null
79595 +++ b/tools/gcc/colorize_plugin.c
79596 @@ -0,0 +1,147 @@
79597 +/*
79598 + * Copyright 2012 by PaX Team <pageexec@freemail.hu>
79599 + * Licensed under the GPL v2
79600 + *
79601 + * Note: the choice of the license means that the compilation process is
79602 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
79603 + * but for the kernel it doesn't matter since it doesn't link against
79604 + * any of the gcc libraries
79605 + *
79606 + * gcc plugin to colorize diagnostic output
79607 + *
79608 + */
79609 +
79610 +#include "gcc-plugin.h"
79611 +#include "config.h"
79612 +#include "system.h"
79613 +#include "coretypes.h"
79614 +#include "tree.h"
79615 +#include "tree-pass.h"
79616 +#include "flags.h"
79617 +#include "intl.h"
79618 +#include "toplev.h"
79619 +#include "plugin.h"
79620 +#include "diagnostic.h"
79621 +#include "plugin-version.h"
79622 +#include "tm.h"
79623 +
79624 +int plugin_is_GPL_compatible;
79625 +
79626 +static struct plugin_info colorize_plugin_info = {
79627 + .version = "201203092200",
79628 +};
79629 +
79630 +#define GREEN "\033[32m\033[2m"
79631 +#define LIGHTGREEN "\033[32m\033[1m"
79632 +#define YELLOW "\033[33m\033[2m"
79633 +#define LIGHTYELLOW "\033[33m\033[1m"
79634 +#define RED "\033[31m\033[2m"
79635 +#define LIGHTRED "\033[31m\033[1m"
79636 +#define BLUE "\033[34m\033[2m"
79637 +#define LIGHTBLUE "\033[34m\033[1m"
79638 +#define BRIGHT "\033[m\033[1m"
79639 +#define NORMAL "\033[m"
79640 +
79641 +static diagnostic_starter_fn old_starter;
79642 +static diagnostic_finalizer_fn old_finalizer;
79643 +
79644 +static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
79645 +{
79646 + const char *color;
79647 + char *newprefix;
79648 +
79649 + switch (diagnostic->kind) {
79650 + case DK_NOTE:
79651 + color = LIGHTBLUE;
79652 + break;
79653 +
79654 + case DK_PEDWARN:
79655 + case DK_WARNING:
79656 + color = LIGHTYELLOW;
79657 + break;
79658 +
79659 + case DK_ERROR:
79660 + case DK_FATAL:
79661 + case DK_ICE:
79662 + case DK_PERMERROR:
79663 + case DK_SORRY:
79664 + color = LIGHTRED;
79665 + break;
79666 +
79667 + default:
79668 + color = NORMAL;
79669 + }
79670 +
79671 + old_starter(context, diagnostic);
79672 + if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
79673 + return;
79674 + pp_destroy_prefix(context->printer);
79675 + pp_set_prefix(context->printer, newprefix);
79676 +}
79677 +
79678 +static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
79679 +{
79680 + old_finalizer(context, diagnostic);
79681 +}
79682 +
79683 +static void colorize_arm(void)
79684 +{
79685 + old_starter = diagnostic_starter(global_dc);
79686 + old_finalizer = diagnostic_finalizer(global_dc);
79687 +
79688 + diagnostic_starter(global_dc) = start_colorize;
79689 + diagnostic_finalizer(global_dc) = finalize_colorize;
79690 +}
79691 +
79692 +static unsigned int execute_colorize_rearm(void)
79693 +{
79694 + if (diagnostic_starter(global_dc) == start_colorize)
79695 + return 0;
79696 +
79697 + colorize_arm();
79698 + return 0;
79699 +}
79700 +
79701 +struct simple_ipa_opt_pass pass_ipa_colorize_rearm = {
79702 + .pass = {
79703 + .type = SIMPLE_IPA_PASS,
79704 + .name = "colorize_rearm",
79705 + .gate = NULL,
79706 + .execute = execute_colorize_rearm,
79707 + .sub = NULL,
79708 + .next = NULL,
79709 + .static_pass_number = 0,
79710 + .tv_id = TV_NONE,
79711 + .properties_required = 0,
79712 + .properties_provided = 0,
79713 + .properties_destroyed = 0,
79714 + .todo_flags_start = 0,
79715 + .todo_flags_finish = 0
79716 + }
79717 +};
79718 +
79719 +static void colorize_start_unit(void *gcc_data, void *user_data)
79720 +{
79721 + colorize_arm();
79722 +}
79723 +
79724 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
79725 +{
79726 + const char * const plugin_name = plugin_info->base_name;
79727 + struct register_pass_info colorize_rearm_pass_info = {
79728 + .pass = &pass_ipa_colorize_rearm.pass,
79729 + .reference_pass_name = "*free_lang_data",
79730 + .ref_pass_instance_number = 0,
79731 + .pos_op = PASS_POS_INSERT_AFTER
79732 + };
79733 +
79734 + if (!plugin_default_version_check(version, &gcc_version)) {
79735 + error(G_("incompatible gcc/plugin versions"));
79736 + return 1;
79737 + }
79738 +
79739 + register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
79740 + register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
79741 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
79742 + return 0;
79743 +}
79744 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
79745 new file mode 100644
79746 index 0000000..704a564
79747 --- /dev/null
79748 +++ b/tools/gcc/constify_plugin.c
79749 @@ -0,0 +1,303 @@
79750 +/*
79751 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
79752 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
79753 + * Licensed under the GPL v2, or (at your option) v3
79754 + *
79755 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
79756 + *
79757 + * Homepage:
79758 + * http://www.grsecurity.net/~ephox/const_plugin/
79759 + *
79760 + * Usage:
79761 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
79762 + * $ gcc -fplugin=constify_plugin.so test.c -O2
79763 + */
79764 +
79765 +#include "gcc-plugin.h"
79766 +#include "config.h"
79767 +#include "system.h"
79768 +#include "coretypes.h"
79769 +#include "tree.h"
79770 +#include "tree-pass.h"
79771 +#include "flags.h"
79772 +#include "intl.h"
79773 +#include "toplev.h"
79774 +#include "plugin.h"
79775 +#include "diagnostic.h"
79776 +#include "plugin-version.h"
79777 +#include "tm.h"
79778 +#include "function.h"
79779 +#include "basic-block.h"
79780 +#include "gimple.h"
79781 +#include "rtl.h"
79782 +#include "emit-rtl.h"
79783 +#include "tree-flow.h"
79784 +
79785 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
79786 +
79787 +int plugin_is_GPL_compatible;
79788 +
79789 +static struct plugin_info const_plugin_info = {
79790 + .version = "201111150100",
79791 + .help = "no-constify\tturn off constification\n",
79792 +};
79793 +
79794 +static void constify_type(tree type);
79795 +static bool walk_struct(tree node);
79796 +
79797 +static tree deconstify_type(tree old_type)
79798 +{
79799 + tree new_type, field;
79800 +
79801 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
79802 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
79803 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
79804 + DECL_FIELD_CONTEXT(field) = new_type;
79805 + TYPE_READONLY(new_type) = 0;
79806 + C_TYPE_FIELDS_READONLY(new_type) = 0;
79807 + return new_type;
79808 +}
79809 +
79810 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
79811 +{
79812 + tree type;
79813 +
79814 + *no_add_attrs = true;
79815 + if (TREE_CODE(*node) == FUNCTION_DECL) {
79816 + error("%qE attribute does not apply to functions", name);
79817 + return NULL_TREE;
79818 + }
79819 +
79820 + if (TREE_CODE(*node) == VAR_DECL) {
79821 + error("%qE attribute does not apply to variables", name);
79822 + return NULL_TREE;
79823 + }
79824 +
79825 + if (TYPE_P(*node)) {
79826 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
79827 + *no_add_attrs = false;
79828 + else
79829 + error("%qE attribute applies to struct and union types only", name);
79830 + return NULL_TREE;
79831 + }
79832 +
79833 + type = TREE_TYPE(*node);
79834 +
79835 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
79836 + error("%qE attribute applies to struct and union types only", name);
79837 + return NULL_TREE;
79838 + }
79839 +
79840 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
79841 + error("%qE attribute is already applied to the type", name);
79842 + return NULL_TREE;
79843 + }
79844 +
79845 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
79846 + error("%qE attribute used on type that is not constified", name);
79847 + return NULL_TREE;
79848 + }
79849 +
79850 + if (TREE_CODE(*node) == TYPE_DECL) {
79851 + TREE_TYPE(*node) = deconstify_type(type);
79852 + TREE_READONLY(*node) = 0;
79853 + return NULL_TREE;
79854 + }
79855 +
79856 + return NULL_TREE;
79857 +}
79858 +
79859 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
79860 +{
79861 + *no_add_attrs = true;
79862 + if (!TYPE_P(*node)) {
79863 + error("%qE attribute applies to types only", name);
79864 + return NULL_TREE;
79865 + }
79866 +
79867 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
79868 + error("%qE attribute applies to struct and union types only", name);
79869 + return NULL_TREE;
79870 + }
79871 +
79872 + *no_add_attrs = false;
79873 + constify_type(*node);
79874 + return NULL_TREE;
79875 +}
79876 +
79877 +static struct attribute_spec no_const_attr = {
79878 + .name = "no_const",
79879 + .min_length = 0,
79880 + .max_length = 0,
79881 + .decl_required = false,
79882 + .type_required = false,
79883 + .function_type_required = false,
79884 + .handler = handle_no_const_attribute,
79885 +#if BUILDING_GCC_VERSION >= 4007
79886 + .affects_type_identity = true
79887 +#endif
79888 +};
79889 +
79890 +static struct attribute_spec do_const_attr = {
79891 + .name = "do_const",
79892 + .min_length = 0,
79893 + .max_length = 0,
79894 + .decl_required = false,
79895 + .type_required = false,
79896 + .function_type_required = false,
79897 + .handler = handle_do_const_attribute,
79898 +#if BUILDING_GCC_VERSION >= 4007
79899 + .affects_type_identity = true
79900 +#endif
79901 +};
79902 +
79903 +static void register_attributes(void *event_data, void *data)
79904 +{
79905 + register_attribute(&no_const_attr);
79906 + register_attribute(&do_const_attr);
79907 +}
79908 +
79909 +static void constify_type(tree type)
79910 +{
79911 + TYPE_READONLY(type) = 1;
79912 + C_TYPE_FIELDS_READONLY(type) = 1;
79913 +}
79914 +
79915 +static bool is_fptr(tree field)
79916 +{
79917 + tree ptr = TREE_TYPE(field);
79918 +
79919 + if (TREE_CODE(ptr) != POINTER_TYPE)
79920 + return false;
79921 +
79922 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
79923 +}
79924 +
79925 +static bool walk_struct(tree node)
79926 +{
79927 + tree field;
79928 +
79929 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
79930 + return false;
79931 +
79932 + if (TYPE_FIELDS(node) == NULL_TREE)
79933 + return false;
79934 +
79935 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
79936 + tree type = TREE_TYPE(field);
79937 + enum tree_code code = TREE_CODE(type);
79938 + if (code == RECORD_TYPE || code == UNION_TYPE) {
79939 + if (!(walk_struct(type)))
79940 + return false;
79941 + } else if (!is_fptr(field) && !TREE_READONLY(field))
79942 + return false;
79943 + }
79944 + return true;
79945 +}
79946 +
79947 +static void finish_type(void *event_data, void *data)
79948 +{
79949 + tree type = (tree)event_data;
79950 +
79951 + if (type == NULL_TREE)
79952 + return;
79953 +
79954 + if (TYPE_READONLY(type))
79955 + return;
79956 +
79957 + if (walk_struct(type))
79958 + constify_type(type);
79959 +}
79960 +
79961 +static unsigned int check_local_variables(void);
79962 +
79963 +struct gimple_opt_pass pass_local_variable = {
79964 + {
79965 + .type = GIMPLE_PASS,
79966 + .name = "check_local_variables",
79967 + .gate = NULL,
79968 + .execute = check_local_variables,
79969 + .sub = NULL,
79970 + .next = NULL,
79971 + .static_pass_number = 0,
79972 + .tv_id = TV_NONE,
79973 + .properties_required = 0,
79974 + .properties_provided = 0,
79975 + .properties_destroyed = 0,
79976 + .todo_flags_start = 0,
79977 + .todo_flags_finish = 0
79978 + }
79979 +};
79980 +
79981 +static unsigned int check_local_variables(void)
79982 +{
79983 + tree var;
79984 + referenced_var_iterator rvi;
79985 +
79986 +#if BUILDING_GCC_VERSION == 4005
79987 + FOR_EACH_REFERENCED_VAR(var, rvi) {
79988 +#else
79989 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
79990 +#endif
79991 + tree type = TREE_TYPE(var);
79992 +
79993 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
79994 + continue;
79995 +
79996 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
79997 + continue;
79998 +
79999 + if (!TYPE_READONLY(type))
80000 + continue;
80001 +
80002 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
80003 +// continue;
80004 +
80005 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
80006 +// continue;
80007 +
80008 + if (walk_struct(type)) {
80009 + error("constified variable %qE cannot be local", var);
80010 + return 1;
80011 + }
80012 + }
80013 + return 0;
80014 +}
80015 +
80016 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80017 +{
80018 + const char * const plugin_name = plugin_info->base_name;
80019 + const int argc = plugin_info->argc;
80020 + const struct plugin_argument * const argv = plugin_info->argv;
80021 + int i;
80022 + bool constify = true;
80023 +
80024 + struct register_pass_info local_variable_pass_info = {
80025 + .pass = &pass_local_variable.pass,
80026 + .reference_pass_name = "*referenced_vars",
80027 + .ref_pass_instance_number = 0,
80028 + .pos_op = PASS_POS_INSERT_AFTER
80029 + };
80030 +
80031 + if (!plugin_default_version_check(version, &gcc_version)) {
80032 + error(G_("incompatible gcc/plugin versions"));
80033 + return 1;
80034 + }
80035 +
80036 + for (i = 0; i < argc; ++i) {
80037 + if (!(strcmp(argv[i].key, "no-constify"))) {
80038 + constify = false;
80039 + continue;
80040 + }
80041 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80042 + }
80043 +
80044 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
80045 + if (constify) {
80046 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
80047 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
80048 + }
80049 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
80050 +
80051 + return 0;
80052 +}
80053 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
80054 new file mode 100644
80055 index 0000000..a5eabce
80056 --- /dev/null
80057 +++ b/tools/gcc/kallocstat_plugin.c
80058 @@ -0,0 +1,167 @@
80059 +/*
80060 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
80061 + * Licensed under the GPL v2
80062 + *
80063 + * Note: the choice of the license means that the compilation process is
80064 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80065 + * but for the kernel it doesn't matter since it doesn't link against
80066 + * any of the gcc libraries
80067 + *
80068 + * gcc plugin to find the distribution of k*alloc sizes
80069 + *
80070 + * TODO:
80071 + *
80072 + * BUGS:
80073 + * - none known
80074 + */
80075 +#include "gcc-plugin.h"
80076 +#include "config.h"
80077 +#include "system.h"
80078 +#include "coretypes.h"
80079 +#include "tree.h"
80080 +#include "tree-pass.h"
80081 +#include "flags.h"
80082 +#include "intl.h"
80083 +#include "toplev.h"
80084 +#include "plugin.h"
80085 +//#include "expr.h" where are you...
80086 +#include "diagnostic.h"
80087 +#include "plugin-version.h"
80088 +#include "tm.h"
80089 +#include "function.h"
80090 +#include "basic-block.h"
80091 +#include "gimple.h"
80092 +#include "rtl.h"
80093 +#include "emit-rtl.h"
80094 +
80095 +extern void print_gimple_stmt(FILE *, gimple, int, int);
80096 +
80097 +int plugin_is_GPL_compatible;
80098 +
80099 +static const char * const kalloc_functions[] = {
80100 + "__kmalloc",
80101 + "kmalloc",
80102 + "kmalloc_large",
80103 + "kmalloc_node",
80104 + "kmalloc_order",
80105 + "kmalloc_order_trace",
80106 + "kmalloc_slab",
80107 + "kzalloc",
80108 + "kzalloc_node",
80109 +};
80110 +
80111 +static struct plugin_info kallocstat_plugin_info = {
80112 + .version = "201111150100",
80113 +};
80114 +
80115 +static unsigned int execute_kallocstat(void);
80116 +
80117 +static struct gimple_opt_pass kallocstat_pass = {
80118 + .pass = {
80119 + .type = GIMPLE_PASS,
80120 + .name = "kallocstat",
80121 + .gate = NULL,
80122 + .execute = execute_kallocstat,
80123 + .sub = NULL,
80124 + .next = NULL,
80125 + .static_pass_number = 0,
80126 + .tv_id = TV_NONE,
80127 + .properties_required = 0,
80128 + .properties_provided = 0,
80129 + .properties_destroyed = 0,
80130 + .todo_flags_start = 0,
80131 + .todo_flags_finish = 0
80132 + }
80133 +};
80134 +
80135 +static bool is_kalloc(const char *fnname)
80136 +{
80137 + size_t i;
80138 +
80139 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
80140 + if (!strcmp(fnname, kalloc_functions[i]))
80141 + return true;
80142 + return false;
80143 +}
80144 +
80145 +static unsigned int execute_kallocstat(void)
80146 +{
80147 + basic_block bb;
80148 +
80149 + // 1. loop through BBs and GIMPLE statements
80150 + FOR_EACH_BB(bb) {
80151 + gimple_stmt_iterator gsi;
80152 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
80153 + // gimple match:
80154 + tree fndecl, size;
80155 + gimple call_stmt;
80156 + const char *fnname;
80157 +
80158 + // is it a call
80159 + call_stmt = gsi_stmt(gsi);
80160 + if (!is_gimple_call(call_stmt))
80161 + continue;
80162 + fndecl = gimple_call_fndecl(call_stmt);
80163 + if (fndecl == NULL_TREE)
80164 + continue;
80165 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
80166 + continue;
80167 +
80168 + // is it a call to k*alloc
80169 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
80170 + if (!is_kalloc(fnname))
80171 + continue;
80172 +
80173 + // is the size arg the result of a simple const assignment
80174 + size = gimple_call_arg(call_stmt, 0);
80175 + while (true) {
80176 + gimple def_stmt;
80177 + expanded_location xloc;
80178 + size_t size_val;
80179 +
80180 + if (TREE_CODE(size) != SSA_NAME)
80181 + break;
80182 + def_stmt = SSA_NAME_DEF_STMT(size);
80183 + if (!def_stmt || !is_gimple_assign(def_stmt))
80184 + break;
80185 + if (gimple_num_ops(def_stmt) != 2)
80186 + break;
80187 + size = gimple_assign_rhs1(def_stmt);
80188 + if (!TREE_CONSTANT(size))
80189 + continue;
80190 + xloc = expand_location(gimple_location(def_stmt));
80191 + if (!xloc.file)
80192 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
80193 + size_val = TREE_INT_CST_LOW(size);
80194 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
80195 + break;
80196 + }
80197 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
80198 +//debug_tree(gimple_call_fn(call_stmt));
80199 +//print_node(stderr, "pax", fndecl, 4);
80200 + }
80201 + }
80202 +
80203 + return 0;
80204 +}
80205 +
80206 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80207 +{
80208 + const char * const plugin_name = plugin_info->base_name;
80209 + struct register_pass_info kallocstat_pass_info = {
80210 + .pass = &kallocstat_pass.pass,
80211 + .reference_pass_name = "ssa",
80212 + .ref_pass_instance_number = 0,
80213 + .pos_op = PASS_POS_INSERT_AFTER
80214 + };
80215 +
80216 + if (!plugin_default_version_check(version, &gcc_version)) {
80217 + error(G_("incompatible gcc/plugin versions"));
80218 + return 1;
80219 + }
80220 +
80221 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
80222 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
80223 +
80224 + return 0;
80225 +}
80226 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
80227 new file mode 100644
80228 index 0000000..008f159
80229 --- /dev/null
80230 +++ b/tools/gcc/kernexec_plugin.c
80231 @@ -0,0 +1,427 @@
80232 +/*
80233 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
80234 + * Licensed under the GPL v2
80235 + *
80236 + * Note: the choice of the license means that the compilation process is
80237 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80238 + * but for the kernel it doesn't matter since it doesn't link against
80239 + * any of the gcc libraries
80240 + *
80241 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
80242 + *
80243 + * TODO:
80244 + *
80245 + * BUGS:
80246 + * - none known
80247 + */
80248 +#include "gcc-plugin.h"
80249 +#include "config.h"
80250 +#include "system.h"
80251 +#include "coretypes.h"
80252 +#include "tree.h"
80253 +#include "tree-pass.h"
80254 +#include "flags.h"
80255 +#include "intl.h"
80256 +#include "toplev.h"
80257 +#include "plugin.h"
80258 +//#include "expr.h" where are you...
80259 +#include "diagnostic.h"
80260 +#include "plugin-version.h"
80261 +#include "tm.h"
80262 +#include "function.h"
80263 +#include "basic-block.h"
80264 +#include "gimple.h"
80265 +#include "rtl.h"
80266 +#include "emit-rtl.h"
80267 +#include "tree-flow.h"
80268 +
80269 +extern void print_gimple_stmt(FILE *, gimple, int, int);
80270 +extern rtx emit_move_insn(rtx x, rtx y);
80271 +
80272 +int plugin_is_GPL_compatible;
80273 +
80274 +static struct plugin_info kernexec_plugin_info = {
80275 + .version = "201111291120",
80276 + .help = "method=[bts|or]\tinstrumentation method\n"
80277 +};
80278 +
80279 +static unsigned int execute_kernexec_reload(void);
80280 +static unsigned int execute_kernexec_fptr(void);
80281 +static unsigned int execute_kernexec_retaddr(void);
80282 +static bool kernexec_cmodel_check(void);
80283 +
80284 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
80285 +static void (*kernexec_instrument_retaddr)(rtx);
80286 +
80287 +static struct gimple_opt_pass kernexec_reload_pass = {
80288 + .pass = {
80289 + .type = GIMPLE_PASS,
80290 + .name = "kernexec_reload",
80291 + .gate = kernexec_cmodel_check,
80292 + .execute = execute_kernexec_reload,
80293 + .sub = NULL,
80294 + .next = NULL,
80295 + .static_pass_number = 0,
80296 + .tv_id = TV_NONE,
80297 + .properties_required = 0,
80298 + .properties_provided = 0,
80299 + .properties_destroyed = 0,
80300 + .todo_flags_start = 0,
80301 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
80302 + }
80303 +};
80304 +
80305 +static struct gimple_opt_pass kernexec_fptr_pass = {
80306 + .pass = {
80307 + .type = GIMPLE_PASS,
80308 + .name = "kernexec_fptr",
80309 + .gate = kernexec_cmodel_check,
80310 + .execute = execute_kernexec_fptr,
80311 + .sub = NULL,
80312 + .next = NULL,
80313 + .static_pass_number = 0,
80314 + .tv_id = TV_NONE,
80315 + .properties_required = 0,
80316 + .properties_provided = 0,
80317 + .properties_destroyed = 0,
80318 + .todo_flags_start = 0,
80319 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
80320 + }
80321 +};
80322 +
80323 +static struct rtl_opt_pass kernexec_retaddr_pass = {
80324 + .pass = {
80325 + .type = RTL_PASS,
80326 + .name = "kernexec_retaddr",
80327 + .gate = kernexec_cmodel_check,
80328 + .execute = execute_kernexec_retaddr,
80329 + .sub = NULL,
80330 + .next = NULL,
80331 + .static_pass_number = 0,
80332 + .tv_id = TV_NONE,
80333 + .properties_required = 0,
80334 + .properties_provided = 0,
80335 + .properties_destroyed = 0,
80336 + .todo_flags_start = 0,
80337 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
80338 + }
80339 +};
80340 +
80341 +static bool kernexec_cmodel_check(void)
80342 +{
80343 + tree section;
80344 +
80345 + if (ix86_cmodel != CM_KERNEL)
80346 + return false;
80347 +
80348 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
80349 + if (!section || !TREE_VALUE(section))
80350 + return true;
80351 +
80352 + section = TREE_VALUE(TREE_VALUE(section));
80353 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
80354 + return true;
80355 +
80356 + return false;
80357 +}
80358 +
80359 +/*
80360 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
80361 + */
80362 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
80363 +{
80364 + gimple asm_movabs_stmt;
80365 +
80366 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
80367 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
80368 + gimple_asm_set_volatile(asm_movabs_stmt, true);
80369 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
80370 + update_stmt(asm_movabs_stmt);
80371 +}
80372 +
80373 +/*
80374 + * find all asm() stmts that clobber r10 and add a reload of r10
80375 + */
80376 +static unsigned int execute_kernexec_reload(void)
80377 +{
80378 + basic_block bb;
80379 +
80380 + // 1. loop through BBs and GIMPLE statements
80381 + FOR_EACH_BB(bb) {
80382 + gimple_stmt_iterator gsi;
80383 +
80384 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
80385 + // gimple match: __asm__ ("" : : : "r10");
80386 + gimple asm_stmt;
80387 + size_t nclobbers;
80388 +
80389 + // is it an asm ...
80390 + asm_stmt = gsi_stmt(gsi);
80391 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
80392 + continue;
80393 +
80394 + // ... clobbering r10
80395 + nclobbers = gimple_asm_nclobbers(asm_stmt);
80396 + while (nclobbers--) {
80397 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
80398 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
80399 + continue;
80400 + kernexec_reload_fptr_mask(&gsi);
80401 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
80402 + break;
80403 + }
80404 + }
80405 + }
80406 +
80407 + return 0;
80408 +}
80409 +
80410 +/*
80411 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
80412 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
80413 + */
80414 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
80415 +{
80416 + gimple assign_intptr, assign_new_fptr, call_stmt;
80417 + tree intptr, old_fptr, new_fptr, kernexec_mask;
80418 +
80419 + call_stmt = gsi_stmt(*gsi);
80420 + old_fptr = gimple_call_fn(call_stmt);
80421 +
80422 + // create temporary unsigned long variable used for bitops and cast fptr to it
80423 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
80424 + add_referenced_var(intptr);
80425 + mark_sym_for_renaming(intptr);
80426 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
80427 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
80428 + update_stmt(assign_intptr);
80429 +
80430 + // apply logical or to temporary unsigned long and bitmask
80431 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
80432 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
80433 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
80434 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
80435 + update_stmt(assign_intptr);
80436 +
80437 + // cast temporary unsigned long back to a temporary fptr variable
80438 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec");
80439 + add_referenced_var(new_fptr);
80440 + mark_sym_for_renaming(new_fptr);
80441 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
80442 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
80443 + update_stmt(assign_new_fptr);
80444 +
80445 + // replace call stmt fn with the new fptr
80446 + gimple_call_set_fn(call_stmt, new_fptr);
80447 + update_stmt(call_stmt);
80448 +}
80449 +
80450 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
80451 +{
80452 + gimple asm_or_stmt, call_stmt;
80453 + tree old_fptr, new_fptr, input, output;
80454 + VEC(tree, gc) *inputs = NULL;
80455 + VEC(tree, gc) *outputs = NULL;
80456 +
80457 + call_stmt = gsi_stmt(*gsi);
80458 + old_fptr = gimple_call_fn(call_stmt);
80459 +
80460 + // create temporary fptr variable
80461 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
80462 + add_referenced_var(new_fptr);
80463 + mark_sym_for_renaming(new_fptr);
80464 +
80465 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
80466 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
80467 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
80468 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
80469 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
80470 + VEC_safe_push(tree, gc, inputs, input);
80471 + VEC_safe_push(tree, gc, outputs, output);
80472 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
80473 + gimple_asm_set_volatile(asm_or_stmt, true);
80474 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
80475 + update_stmt(asm_or_stmt);
80476 +
80477 + // replace call stmt fn with the new fptr
80478 + gimple_call_set_fn(call_stmt, new_fptr);
80479 + update_stmt(call_stmt);
80480 +}
80481 +
80482 +/*
80483 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
80484 + */
80485 +static unsigned int execute_kernexec_fptr(void)
80486 +{
80487 + basic_block bb;
80488 +
80489 + // 1. loop through BBs and GIMPLE statements
80490 + FOR_EACH_BB(bb) {
80491 + gimple_stmt_iterator gsi;
80492 +
80493 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
80494 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
80495 + tree fn;
80496 + gimple call_stmt;
80497 +
80498 + // is it a call ...
80499 + call_stmt = gsi_stmt(gsi);
80500 + if (!is_gimple_call(call_stmt))
80501 + continue;
80502 + fn = gimple_call_fn(call_stmt);
80503 + if (TREE_CODE(fn) == ADDR_EXPR)
80504 + continue;
80505 + if (TREE_CODE(fn) != SSA_NAME)
80506 + gcc_unreachable();
80507 +
80508 + // ... through a function pointer
80509 + fn = SSA_NAME_VAR(fn);
80510 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
80511 + continue;
80512 + fn = TREE_TYPE(fn);
80513 + if (TREE_CODE(fn) != POINTER_TYPE)
80514 + continue;
80515 + fn = TREE_TYPE(fn);
80516 + if (TREE_CODE(fn) != FUNCTION_TYPE)
80517 + continue;
80518 +
80519 + kernexec_instrument_fptr(&gsi);
80520 +
80521 +//debug_tree(gimple_call_fn(call_stmt));
80522 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
80523 + }
80524 + }
80525 +
80526 + return 0;
80527 +}
80528 +
80529 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
80530 +static void kernexec_instrument_retaddr_bts(rtx insn)
80531 +{
80532 + rtx btsq;
80533 + rtvec argvec, constraintvec, labelvec;
80534 + int line;
80535 +
80536 + // create asm volatile("btsq $63,(%%rsp)":::)
80537 + argvec = rtvec_alloc(0);
80538 + constraintvec = rtvec_alloc(0);
80539 + labelvec = rtvec_alloc(0);
80540 + line = expand_location(RTL_LOCATION(insn)).line;
80541 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
80542 + MEM_VOLATILE_P(btsq) = 1;
80543 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
80544 + emit_insn_before(btsq, insn);
80545 +}
80546 +
80547 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
80548 +static void kernexec_instrument_retaddr_or(rtx insn)
80549 +{
80550 + rtx orq;
80551 + rtvec argvec, constraintvec, labelvec;
80552 + int line;
80553 +
80554 + // create asm volatile("orq %%r10,(%%rsp)":::)
80555 + argvec = rtvec_alloc(0);
80556 + constraintvec = rtvec_alloc(0);
80557 + labelvec = rtvec_alloc(0);
80558 + line = expand_location(RTL_LOCATION(insn)).line;
80559 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
80560 + MEM_VOLATILE_P(orq) = 1;
80561 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
80562 + emit_insn_before(orq, insn);
80563 +}
80564 +
80565 +/*
80566 + * find all asm level function returns and forcibly set the highest bit of the return address
80567 + */
80568 +static unsigned int execute_kernexec_retaddr(void)
80569 +{
80570 + rtx insn;
80571 +
80572 + // 1. find function returns
80573 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
80574 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
80575 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
80576 + rtx body;
80577 +
80578 + // is it a retn
80579 + if (!JUMP_P(insn))
80580 + continue;
80581 + body = PATTERN(insn);
80582 + if (GET_CODE(body) == PARALLEL)
80583 + body = XVECEXP(body, 0, 0);
80584 + if (GET_CODE(body) != RETURN)
80585 + continue;
80586 + kernexec_instrument_retaddr(insn);
80587 + }
80588 +
80589 +// print_simple_rtl(stderr, get_insns());
80590 +// print_rtl(stderr, get_insns());
80591 +
80592 + return 0;
80593 +}
80594 +
80595 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80596 +{
80597 + const char * const plugin_name = plugin_info->base_name;
80598 + const int argc = plugin_info->argc;
80599 + const struct plugin_argument * const argv = plugin_info->argv;
80600 + int i;
80601 + struct register_pass_info kernexec_reload_pass_info = {
80602 + .pass = &kernexec_reload_pass.pass,
80603 + .reference_pass_name = "ssa",
80604 + .ref_pass_instance_number = 0,
80605 + .pos_op = PASS_POS_INSERT_AFTER
80606 + };
80607 + struct register_pass_info kernexec_fptr_pass_info = {
80608 + .pass = &kernexec_fptr_pass.pass,
80609 + .reference_pass_name = "ssa",
80610 + .ref_pass_instance_number = 0,
80611 + .pos_op = PASS_POS_INSERT_AFTER
80612 + };
80613 + struct register_pass_info kernexec_retaddr_pass_info = {
80614 + .pass = &kernexec_retaddr_pass.pass,
80615 + .reference_pass_name = "pro_and_epilogue",
80616 + .ref_pass_instance_number = 0,
80617 + .pos_op = PASS_POS_INSERT_AFTER
80618 + };
80619 +
80620 + if (!plugin_default_version_check(version, &gcc_version)) {
80621 + error(G_("incompatible gcc/plugin versions"));
80622 + return 1;
80623 + }
80624 +
80625 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
80626 +
80627 + if (TARGET_64BIT == 0)
80628 + return 0;
80629 +
80630 + for (i = 0; i < argc; ++i) {
80631 + if (!strcmp(argv[i].key, "method")) {
80632 + if (!argv[i].value) {
80633 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80634 + continue;
80635 + }
80636 + if (!strcmp(argv[i].value, "bts")) {
80637 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
80638 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
80639 + } else if (!strcmp(argv[i].value, "or")) {
80640 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
80641 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
80642 + fix_register("r10", 1, 1);
80643 + } else
80644 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
80645 + continue;
80646 + }
80647 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80648 + }
80649 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
80650 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
80651 +
80652 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
80653 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
80654 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
80655 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
80656 +
80657 + return 0;
80658 +}
80659 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
80660 new file mode 100644
80661 index 0000000..4e82b16
80662 --- /dev/null
80663 +++ b/tools/gcc/stackleak_plugin.c
80664 @@ -0,0 +1,311 @@
80665 +/*
80666 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
80667 + * Licensed under the GPL v2
80668 + *
80669 + * Note: the choice of the license means that the compilation process is
80670 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80671 + * but for the kernel it doesn't matter since it doesn't link against
80672 + * any of the gcc libraries
80673 + *
80674 + * gcc plugin to help implement various PaX features
80675 + *
80676 + * - track lowest stack pointer
80677 + *
80678 + * TODO:
80679 + * - initialize all local variables
80680 + *
80681 + * BUGS:
80682 + * - none known
80683 + */
80684 +#include "gcc-plugin.h"
80685 +#include "config.h"
80686 +#include "system.h"
80687 +#include "coretypes.h"
80688 +#include "tree.h"
80689 +#include "tree-pass.h"
80690 +#include "flags.h"
80691 +#include "intl.h"
80692 +#include "toplev.h"
80693 +#include "plugin.h"
80694 +//#include "expr.h" where are you...
80695 +#include "diagnostic.h"
80696 +#include "plugin-version.h"
80697 +#include "tm.h"
80698 +#include "function.h"
80699 +#include "basic-block.h"
80700 +#include "gimple.h"
80701 +#include "rtl.h"
80702 +#include "emit-rtl.h"
80703 +
80704 +extern void print_gimple_stmt(FILE *, gimple, int, int);
80705 +
80706 +int plugin_is_GPL_compatible;
80707 +
80708 +static int track_frame_size = -1;
80709 +static const char track_function[] = "pax_track_stack";
80710 +static const char check_function[] = "pax_check_alloca";
80711 +static bool init_locals;
80712 +
80713 +static struct plugin_info stackleak_plugin_info = {
80714 + .version = "201203140940",
80715 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
80716 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
80717 +};
80718 +
80719 +static bool gate_stackleak_track_stack(void);
80720 +static unsigned int execute_stackleak_tree_instrument(void);
80721 +static unsigned int execute_stackleak_final(void);
80722 +
80723 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
80724 + .pass = {
80725 + .type = GIMPLE_PASS,
80726 + .name = "stackleak_tree_instrument",
80727 + .gate = gate_stackleak_track_stack,
80728 + .execute = execute_stackleak_tree_instrument,
80729 + .sub = NULL,
80730 + .next = NULL,
80731 + .static_pass_number = 0,
80732 + .tv_id = TV_NONE,
80733 + .properties_required = PROP_gimple_leh | PROP_cfg,
80734 + .properties_provided = 0,
80735 + .properties_destroyed = 0,
80736 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
80737 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
80738 + }
80739 +};
80740 +
80741 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
80742 + .pass = {
80743 + .type = RTL_PASS,
80744 + .name = "stackleak_final",
80745 + .gate = gate_stackleak_track_stack,
80746 + .execute = execute_stackleak_final,
80747 + .sub = NULL,
80748 + .next = NULL,
80749 + .static_pass_number = 0,
80750 + .tv_id = TV_NONE,
80751 + .properties_required = 0,
80752 + .properties_provided = 0,
80753 + .properties_destroyed = 0,
80754 + .todo_flags_start = 0,
80755 + .todo_flags_finish = TODO_dump_func
80756 + }
80757 +};
80758 +
80759 +static bool gate_stackleak_track_stack(void)
80760 +{
80761 + return track_frame_size >= 0;
80762 +}
80763 +
80764 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
80765 +{
80766 + gimple check_alloca;
80767 + tree fntype, fndecl, alloca_size;
80768 +
80769 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
80770 + fndecl = build_fn_decl(check_function, fntype);
80771 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
80772 +
80773 + // insert call to void pax_check_alloca(unsigned long size)
80774 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
80775 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
80776 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
80777 +}
80778 +
80779 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
80780 +{
80781 + gimple track_stack;
80782 + tree fntype, fndecl;
80783 +
80784 + fntype = build_function_type_list(void_type_node, NULL_TREE);
80785 + fndecl = build_fn_decl(track_function, fntype);
80786 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
80787 +
80788 + // insert call to void pax_track_stack(void)
80789 + track_stack = gimple_build_call(fndecl, 0);
80790 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
80791 +}
80792 +
80793 +#if BUILDING_GCC_VERSION == 4005
80794 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
80795 +{
80796 + tree fndecl;
80797 +
80798 + if (!is_gimple_call(stmt))
80799 + return false;
80800 + fndecl = gimple_call_fndecl(stmt);
80801 + if (!fndecl)
80802 + return false;
80803 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
80804 + return false;
80805 +// print_node(stderr, "pax", fndecl, 4);
80806 + return DECL_FUNCTION_CODE(fndecl) == code;
80807 +}
80808 +#endif
80809 +
80810 +static bool is_alloca(gimple stmt)
80811 +{
80812 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
80813 + return true;
80814 +
80815 +#if BUILDING_GCC_VERSION >= 4007
80816 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
80817 + return true;
80818 +#endif
80819 +
80820 + return false;
80821 +}
80822 +
80823 +static unsigned int execute_stackleak_tree_instrument(void)
80824 +{
80825 + basic_block bb, entry_bb;
80826 + bool prologue_instrumented = false, is_leaf = true;
80827 +
80828 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
80829 +
80830 + // 1. loop through BBs and GIMPLE statements
80831 + FOR_EACH_BB(bb) {
80832 + gimple_stmt_iterator gsi;
80833 +
80834 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
80835 + gimple stmt;
80836 +
80837 + stmt = gsi_stmt(gsi);
80838 +
80839 + if (is_gimple_call(stmt))
80840 + is_leaf = false;
80841 +
80842 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
80843 + if (!is_alloca(stmt))
80844 + continue;
80845 +
80846 + // 2. insert stack overflow check before each __builtin_alloca call
80847 + stackleak_check_alloca(&gsi);
80848 +
80849 + // 3. insert track call after each __builtin_alloca call
80850 + stackleak_add_instrumentation(&gsi);
80851 + if (bb == entry_bb)
80852 + prologue_instrumented = true;
80853 + }
80854 + }
80855 +
80856 + // special case for some bad linux code: taking the address of static inline functions will materialize them
80857 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
80858 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
80859 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
80860 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
80861 + return 0;
80862 +
80863 + // 4. insert track call at the beginning
80864 + if (!prologue_instrumented) {
80865 + gimple_stmt_iterator gsi;
80866 +
80867 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
80868 + if (dom_info_available_p(CDI_DOMINATORS))
80869 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
80870 + gsi = gsi_start_bb(bb);
80871 + stackleak_add_instrumentation(&gsi);
80872 + }
80873 +
80874 + return 0;
80875 +}
80876 +
80877 +static unsigned int execute_stackleak_final(void)
80878 +{
80879 + rtx insn;
80880 +
80881 + if (cfun->calls_alloca)
80882 + return 0;
80883 +
80884 + // keep calls only if function frame is big enough
80885 + if (get_frame_size() >= track_frame_size)
80886 + return 0;
80887 +
80888 + // 1. find pax_track_stack calls
80889 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
80890 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
80891 + rtx body;
80892 +
80893 + if (!CALL_P(insn))
80894 + continue;
80895 + body = PATTERN(insn);
80896 + if (GET_CODE(body) != CALL)
80897 + continue;
80898 + body = XEXP(body, 0);
80899 + if (GET_CODE(body) != MEM)
80900 + continue;
80901 + body = XEXP(body, 0);
80902 + if (GET_CODE(body) != SYMBOL_REF)
80903 + continue;
80904 + if (strcmp(XSTR(body, 0), track_function))
80905 + continue;
80906 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
80907 + // 2. delete call
80908 + insn = delete_insn_and_edges(insn);
80909 +#if BUILDING_GCC_VERSION >= 4007
80910 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
80911 + insn = delete_insn_and_edges(insn);
80912 +#endif
80913 + }
80914 +
80915 +// print_simple_rtl(stderr, get_insns());
80916 +// print_rtl(stderr, get_insns());
80917 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
80918 +
80919 + return 0;
80920 +}
80921 +
80922 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80923 +{
80924 + const char * const plugin_name = plugin_info->base_name;
80925 + const int argc = plugin_info->argc;
80926 + const struct plugin_argument * const argv = plugin_info->argv;
80927 + int i;
80928 + struct register_pass_info stackleak_tree_instrument_pass_info = {
80929 + .pass = &stackleak_tree_instrument_pass.pass,
80930 +// .reference_pass_name = "tree_profile",
80931 + .reference_pass_name = "optimized",
80932 + .ref_pass_instance_number = 0,
80933 + .pos_op = PASS_POS_INSERT_BEFORE
80934 + };
80935 + struct register_pass_info stackleak_final_pass_info = {
80936 + .pass = &stackleak_final_rtl_opt_pass.pass,
80937 + .reference_pass_name = "final",
80938 + .ref_pass_instance_number = 0,
80939 + .pos_op = PASS_POS_INSERT_BEFORE
80940 + };
80941 +
80942 + if (!plugin_default_version_check(version, &gcc_version)) {
80943 + error(G_("incompatible gcc/plugin versions"));
80944 + return 1;
80945 + }
80946 +
80947 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
80948 +
80949 + for (i = 0; i < argc; ++i) {
80950 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
80951 + if (!argv[i].value) {
80952 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80953 + continue;
80954 + }
80955 + track_frame_size = atoi(argv[i].value);
80956 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
80957 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
80958 + continue;
80959 + }
80960 + if (!strcmp(argv[i].key, "initialize-locals")) {
80961 + if (argv[i].value) {
80962 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
80963 + continue;
80964 + }
80965 + init_locals = true;
80966 + continue;
80967 + }
80968 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80969 + }
80970 +
80971 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
80972 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
80973 +
80974 + return 0;
80975 +}
80976 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
80977 index 6789d78..4afd019 100644
80978 --- a/tools/perf/util/include/asm/alternative-asm.h
80979 +++ b/tools/perf/util/include/asm/alternative-asm.h
80980 @@ -5,4 +5,7 @@
80981
80982 #define altinstruction_entry #
80983
80984 + .macro pax_force_retaddr rip=0, reload=0
80985 + .endm
80986 +
80987 #endif
80988 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
80989 index af0f22f..9a7d479 100644
80990 --- a/usr/gen_init_cpio.c
80991 +++ b/usr/gen_init_cpio.c
80992 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
80993 int retval;
80994 int rc = -1;
80995 int namesize;
80996 - int i;
80997 + unsigned int i;
80998
80999 mode |= S_IFREG;
81000
81001 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location)
81002 *env_var = *expanded = '\0';
81003 strncat(env_var, start + 2, end - start - 2);
81004 strncat(expanded, new_location, start - new_location);
81005 - strncat(expanded, getenv(env_var), PATH_MAX);
81006 - strncat(expanded, end + 1, PATH_MAX);
81007 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
81008 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
81009 strncpy(new_location, expanded, PATH_MAX);
81010 + new_location[PATH_MAX] = 0;
81011 } else
81012 break;
81013 }
81014 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
81015 index d9cfb78..4f27c10 100644
81016 --- a/virt/kvm/kvm_main.c
81017 +++ b/virt/kvm/kvm_main.c
81018 @@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
81019
81020 static cpumask_var_t cpus_hardware_enabled;
81021 static int kvm_usage_count = 0;
81022 -static atomic_t hardware_enable_failed;
81023 +static atomic_unchecked_t hardware_enable_failed;
81024
81025 struct kmem_cache *kvm_vcpu_cache;
81026 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
81027 @@ -2268,7 +2268,7 @@ static void hardware_enable_nolock(void *junk)
81028
81029 if (r) {
81030 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
81031 - atomic_inc(&hardware_enable_failed);
81032 + atomic_inc_unchecked(&hardware_enable_failed);
81033 printk(KERN_INFO "kvm: enabling virtualization on "
81034 "CPU%d failed\n", cpu);
81035 }
81036 @@ -2322,10 +2322,10 @@ static int hardware_enable_all(void)
81037
81038 kvm_usage_count++;
81039 if (kvm_usage_count == 1) {
81040 - atomic_set(&hardware_enable_failed, 0);
81041 + atomic_set_unchecked(&hardware_enable_failed, 0);
81042 on_each_cpu(hardware_enable_nolock, NULL, 1);
81043
81044 - if (atomic_read(&hardware_enable_failed)) {
81045 + if (atomic_read_unchecked(&hardware_enable_failed)) {
81046 hardware_disable_all_nolock();
81047 r = -EBUSY;
81048 }
81049 @@ -2676,7 +2676,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
81050 kvm_arch_vcpu_put(vcpu);
81051 }
81052
81053 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
81054 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
81055 struct module *module)
81056 {
81057 int r;
81058 @@ -2739,7 +2739,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
81059 if (!vcpu_align)
81060 vcpu_align = __alignof__(struct kvm_vcpu);
81061 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
81062 - 0, NULL);
81063 + SLAB_USERCOPY, NULL);
81064 if (!kvm_vcpu_cache) {
81065 r = -ENOMEM;
81066 goto out_free_3;
81067 @@ -2749,9 +2749,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
81068 if (r)
81069 goto out_free;
81070
81071 - kvm_chardev_ops.owner = module;
81072 - kvm_vm_fops.owner = module;
81073 - kvm_vcpu_fops.owner = module;
81074 + pax_open_kernel();
81075 + *(void **)&kvm_chardev_ops.owner = module;
81076 + *(void **)&kvm_vm_fops.owner = module;
81077 + *(void **)&kvm_vcpu_fops.owner = module;
81078 + pax_close_kernel();
81079
81080 r = misc_register(&kvm_dev);
81081 if (r) {